Skip to content

Commit

Permalink
Get rid of MultiObjectiveBenchmarkProblem (#2721)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2721

Context:

The only purpose of `MultiObjectiveBenchmarkProblem` is as a type annotation indicating whether its `optimization_config` is a `MultiObjectiveOptimizationConfig`. This creates more trouble than its worth; we can simply check whether the optimization config is multi-objective rather than checking whether the problem is multi-objective. The same goes for having single-objective and multi-objective surrogate problems.

With this change, the only two `BenchmarkProblem` classes are `BenchmarkProblem` and `SurrogateBenchmarkProblem`; the latter can be removed in a future PR.
This diff:
* Removes `MultiObjectiveBenchmarkProblem`, and replaces references to it with `BenchmarkProblem`
* Consolidates `SurrogateBenchmarkProblemBase`, `SooSurrogateBenchmarkProblem`, and `MOOSurrogateBenchmarkProblem` into `SurrogateBenchmarkProblem` and replaces references
* Removes some branching logic

Reviewed By: Balandat

Differential Revision: D61869477

fbshipit-source-id: f5a7948d264616bf4a8e7352cf851d68ce303dc6
  • Loading branch information
esantorella authored and facebook-github-bot committed Aug 28, 2024
1 parent 52864e5 commit 36917bc
Show file tree
Hide file tree
Showing 6 changed files with 33 additions and 69 deletions.
19 changes: 2 additions & 17 deletions ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,28 +256,13 @@ def create_single_objective_problem_from_botorch(
)


@dataclass(kw_only=True, repr=True)
class MultiObjectiveBenchmarkProblem(BenchmarkProblem):
"""
A `BenchmarkProblem` that supports multiple objectives.
For multi-objective problems, `optimal_value` indicates the maximum
hypervolume attainable with the objective thresholds provided on the
`optimization_config`.
For argument descriptions, see `BenchmarkProblem`.
"""

optimization_config: MultiObjectiveOptimizationConfig


def create_multi_objective_problem_from_botorch(
test_problem_class: type[MultiObjectiveTestProblem],
test_problem_kwargs: dict[str, Any],
# TODO: Figure out whether we should use `lower_is_better` here.
num_trials: int,
observe_noise_sd: bool = False,
) -> MultiObjectiveBenchmarkProblem:
) -> BenchmarkProblem:
"""Create a BenchmarkProblem from a BoTorch BaseTestProblem using specialized
Metrics and Runners. The test problem's result will be computed on the Runner
once per trial and each Metric will retrieve its own result by index.
Expand Down Expand Up @@ -337,7 +322,7 @@ def create_multi_objective_problem_from_botorch(
],
)

return MultiObjectiveBenchmarkProblem(
return BenchmarkProblem(
name=name,
search_space=get_continuous_search_space(test_problem._bounds),
optimization_config=optimization_config,
Expand Down
37 changes: 9 additions & 28 deletions ax/benchmark/problems/surrogate.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,48 +5,29 @@

# pyre-strict
"""
Benchmark problems based on surrogates.
Benchmark problem based on surrogate.
These problems might appear to function identically to their non-surrogate
counterparts, `BenchmarkProblem` and `MultiObjectiveBenchmarkProblem`, aside
from the restriction that their runners are of type `SurrogateRunner`. However,
they are treated specially within JSON storage because surrogates cannot be
easily serialized.
This problem class might appear to function identically to its non-surrogate
counterpart, `BenchmarkProblem`, aside from the restriction that its runners is
of type `SurrogateRunner`. However, it is treated specially within JSON storage
because surrogates cannot be easily serialized.
"""

from dataclasses import dataclass, field

from ax.benchmark.benchmark_problem import BenchmarkProblem
from ax.benchmark.runners.surrogate import SurrogateRunner
from ax.core.optimization_config import MultiObjectiveOptimizationConfig


@dataclass(kw_only=True)
class SurrogateBenchmarkProblemBase(BenchmarkProblem):
class SurrogateBenchmarkProblem(BenchmarkProblem):
"""
Base class for SOOSurrogateBenchmarkProblem and MOOSurrogateBenchmarkProblem.
Benchmark problem whose `runner` is a `SurrogateRunner`.
Its `runner` is a `SurrogateRunner`, which allows for the surrogate to be
constructed lazily and datasets to be downloaded lazily.
`SurrogateRunner` allows for the surrogate to be constructed lazily and for
datasets to be downloaded lazily.
For argument descriptions, see `BenchmarkProblem`.
"""

runner: SurrogateRunner = field(repr=False)


class SOOSurrogateBenchmarkProblem(SurrogateBenchmarkProblemBase):
pass


@dataclass(kw_only=True)
class MOOSurrogateBenchmarkProblem(SurrogateBenchmarkProblemBase):
"""
Has the same attributes/properties as a `MultiObjectiveBenchmarkProblem`,
but its `runner` is a `SurrogateRunner`, which allows for the surrogate to be
constructed lazily and datasets to be downloaded lazily.
For argument descriptions, see `BenchmarkProblem`.
"""

optimization_config: MultiObjectiveOptimizationConfig
2 changes: 1 addition & 1 deletion ax/benchmark/tests/problems/test_surrogate_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def test_repr(self) -> None:
sbp = get_soo_surrogate()

expected_repr = (
"SOOSurrogateBenchmarkProblem(name='test', "
"SurrogateBenchmarkProblem(name='test', "
"optimization_config=OptimizationConfig(objective=Objective(metric_name="
'"branin", '
"minimize=True), "
Expand Down
6 changes: 5 additions & 1 deletion ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
create_single_objective_problem_from_botorch,
)
from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
from ax.core.types import ComparisonOp
from ax.utils.common.testutils import TestCase
from ax.utils.common.typeutils import checked_cast
Expand All @@ -28,6 +29,7 @@
Cosine8,
)
from hypothesis import given, strategies as st
from pyre_extensions import assert_is_instance


class TestBenchmarkProblem(TestCase):
Expand Down Expand Up @@ -198,7 +200,9 @@ def test_moo_from_botorch(self) -> None:

# Test hypervolume
self.assertEqual(branin_currin_problem.optimal_value, test_problem._max_hv)
opt_config = branin_currin_problem.optimization_config
opt_config = assert_is_instance(
branin_currin_problem.optimization_config, MultiObjectiveOptimizationConfig
)
reference_point = [
threshold.bound for threshold in opt_config.objective_thresholds
]
Expand Down
7 changes: 2 additions & 5 deletions ax/storage/json_store/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,7 @@
import torch
from ax.benchmark.benchmark_method import BenchmarkMethod
from ax.benchmark.benchmark_metric import BenchmarkMetric
from ax.benchmark.benchmark_problem import (
BenchmarkProblem,
MultiObjectiveBenchmarkProblem,
)
from ax.benchmark.benchmark_problem import BenchmarkProblem
from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult
from ax.benchmark.problems.hpo.torchvision import PyTorchCNNTorchvisionParamBasedProblem
from ax.benchmark.runners.botorch_test import (
Expand Down Expand Up @@ -338,7 +335,7 @@
"ModelRegistryBase": ModelRegistryBase,
"ModelSpec": ModelSpec,
"MultiObjective": MultiObjective,
"MultiObjectiveBenchmarkProblem": MultiObjectiveBenchmarkProblem,
"MultiObjectiveBenchmarkProblem": BenchmarkProblem, # backward compatibility
"MultiObjectiveOptimizationConfig": MultiObjectiveOptimizationConfig,
"MultiTypeExperiment": MultiTypeExperiment,
"NegativeBraninMetric": NegativeBraninMetric,
Expand Down
31 changes: 14 additions & 17 deletions ax/utils/testing/benchmark_stubs.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,9 @@
BenchmarkProblem,
create_multi_objective_problem_from_botorch,
create_single_objective_problem_from_botorch,
MultiObjectiveBenchmarkProblem,
)
from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult
from ax.benchmark.problems.surrogate import (
MOOSurrogateBenchmarkProblem,
SOOSurrogateBenchmarkProblem,
)
from ax.benchmark.problems.surrogate import SurrogateBenchmarkProblem
from ax.benchmark.runners.botorch_test import ParamBasedTestProblem
from ax.benchmark.runners.surrogate import SurrogateRunner
from ax.core.experiment import Experiment
Expand Down Expand Up @@ -65,10 +61,12 @@ def get_single_objective_benchmark_problem(


def get_multi_objective_benchmark_problem(
observe_noise_sd: bool = False, num_trials: int = 4
) -> MultiObjectiveBenchmarkProblem:
observe_noise_sd: bool = False,
num_trials: int = 4,
test_problem_class: type[BraninCurrin] = BraninCurrin,
) -> BenchmarkProblem:
return create_multi_objective_problem_from_botorch(
test_problem_class=BraninCurrin,
test_problem_class=test_problem_class,
test_problem_kwargs={},
num_trials=num_trials,
observe_noise_sd=observe_noise_sd,
Expand All @@ -77,12 +75,11 @@ def get_multi_objective_benchmark_problem(

def get_constrained_multi_objective_benchmark_problem(
observe_noise_sd: bool = False, num_trials: int = 4
) -> MultiObjectiveBenchmarkProblem:
return create_multi_objective_problem_from_botorch(
test_problem_class=ConstrainedBraninCurrin,
test_problem_kwargs={},
num_trials=num_trials,
) -> BenchmarkProblem:
return get_multi_objective_benchmark_problem(
observe_noise_sd=observe_noise_sd,
num_trials=num_trials,
test_problem_class=ConstrainedBraninCurrin,
)


Expand All @@ -99,7 +96,7 @@ def get_sobol_benchmark_method() -> BenchmarkMethod:
)


def get_soo_surrogate() -> SOOSurrogateBenchmarkProblem:
def get_soo_surrogate() -> SurrogateBenchmarkProblem:
experiment = get_branin_experiment(with_completed_trial=True)
surrogate = TorchModelBridge(
experiment=experiment,
Expand All @@ -123,7 +120,7 @@ def get_soo_surrogate() -> SOOSurrogateBenchmarkProblem:
)
optimization_config = OptimizationConfig(objective=objective)

return SOOSurrogateBenchmarkProblem(
return SurrogateBenchmarkProblem(
name="test",
search_space=experiment.search_space,
optimization_config=optimization_config,
Expand All @@ -134,7 +131,7 @@ def get_soo_surrogate() -> SOOSurrogateBenchmarkProblem:
)


def get_moo_surrogate() -> MOOSurrogateBenchmarkProblem:
def get_moo_surrogate() -> SurrogateBenchmarkProblem:
experiment = get_branin_experiment_with_multi_objective(with_completed_trial=True)
surrogate = TorchModelBridge(
experiment=experiment,
Expand Down Expand Up @@ -171,7 +168,7 @@ def get_moo_surrogate() -> MOOSurrogateBenchmarkProblem:
],
)
)
return MOOSurrogateBenchmarkProblem(
return SurrogateBenchmarkProblem(
name="test",
search_space=experiment.search_space,
optimization_config=optimization_config,
Expand Down

0 comments on commit 36917bc

Please sign in to comment.