Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[BE] - cleanup docstring core.benchmark #2033

Merged
merged 3 commits into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 22 additions & 9 deletions habitat-lab/habitat/core/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Implements evaluation of ``habitat.Agent`` inside ``habitat.Env``.
"""Habitat Challenge Benchmark class: a generic benchmarking framework for locally or remotely evaluating performance of an Agent.
Implements evaluation of ``habitat.Agent`` inside ``habitat.Env``.
``habitat.Benchmark`` creates a ``habitat.Env`` which is specified through
the ``config_env`` parameter in constructor. The evaluation is task agnostic
and is implemented through metrics defined for ``habitat.EmbodiedTask``.
and implemented through metrics defined for ``habitat.EmbodiedTask``.
"""

import os
Expand All @@ -21,12 +22,13 @@


class Benchmark:
r"""Benchmark for evaluating agents in environments."""
"""Generic benchmark class for evaluating agents in environments from config."""

def __init__(
self, config_paths: Optional[str] = None, eval_remote: bool = False
) -> None:
r"""..
"""
Initialize the Env from the provided config.

:param config_paths: file to be used for creating the environment
:param eval_remote: boolean indicating whether evaluation should be run remotely or locally
Expand All @@ -39,9 +41,13 @@ def __init__(
else:
self._env = Env(config=config_env)

def remote_evaluate(
self, agent: "Agent", num_episodes: Optional[int] = None
):
def remote_evaluate(self, agent: "Agent") -> Dict[str, float]:
"""
Run remote evaluation with evalai for the instantiated Agent and Env. Runs remotely through a challenge evaluation server to prevent any potential for biased results. Imports come challenge-specific dependencies.

:param agent: The Agent to evaluate.
:return: The results dictionary containing metrics.
"""
# The modules imported below are specific to habitat-challenge remote evaluation.
# These modules are not part of the habitat-lab repository.
import pickle
Expand Down Expand Up @@ -118,6 +124,13 @@ def remote_ep_over(stub):
def local_evaluate(
self, agent: "Agent", num_episodes: Optional[int] = None
) -> Dict[str, float]:
"""
Run evaluation of an Agent in the Env locally.

:param agent: The Agent to evaluate.
:param num_episodes: The number of episodes to evaluate.
:return: The results dictionary containing metrics.
"""
if num_episodes is None:
num_episodes = len(self._env.episodes)
else:
Expand Down Expand Up @@ -160,7 +173,7 @@ def local_evaluate(
def evaluate(
self, agent: "Agent", num_episodes: Optional[int] = None
) -> Dict[str, float]:
r"""..
r"""Evaluates the provide agent in the configured environment either locally or remotely and returns the results dictionary with metrics.

:param agent: agent to be evaluated in environment.
:param num_episodes: count of number of episodes for which the
Expand All @@ -169,6 +182,6 @@ def evaluate(
"""

if self._eval_remote is True:
return self.remote_evaluate(agent, num_episodes)
return self.remote_evaluate(agent)
else:
return self.local_evaluate(agent, num_episodes)
4 changes: 4 additions & 0 deletions habitat-lab/habitat/core/challenge.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@


class Challenge(Benchmark):
"""
Extends the Benchmark class to run evaluate the current challenge config and submit results to the remote evaluation server.
"""

def __init__(self, eval_remote=False):
config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
super().__init__(config_paths, eval_remote=eval_remote)
Expand Down