diff --git a/ax/benchmark/methods/modular_botorch.py b/ax/benchmark/methods/modular_botorch.py index 8c1dd159053..e1849133efa 100644 --- a/ax/benchmark/methods/modular_botorch.py +++ b/ax/benchmark/methods/modular_botorch.py @@ -34,8 +34,6 @@ def get_sobol_botorch_modular_fixed_noise_gp_qnei( }, Keys.ACQF_KWARGS: { "prune_baseline": True, - "qmc": True, - "mc_samples": 512, }, } } diff --git a/ax/benchmark/tests/test_benchmark.py b/ax/benchmark/tests/test_benchmark.py index 28179ff5183..87d102cb7ad 100644 --- a/ax/benchmark/tests/test_benchmark.py +++ b/ax/benchmark/tests/test_benchmark.py @@ -14,8 +14,14 @@ from ax.benchmark.benchmark_method import BenchmarkMethod from ax.benchmark.benchmark_problem import SingleObjectiveBenchmarkProblem from ax.benchmark.benchmark_result import BenchmarkResult -from ax.modelbridge.generation_strategy import GenerationStep, GenerationStrategy -from ax.modelbridge.registry import Models +from ax.benchmark.methods.gpei_and_moo import get_gpei_default +from ax.benchmark.methods.modular_botorch import ( + get_sobol_botorch_modular_default, + get_sobol_botorch_modular_fixed_noise_gp_qnehvi, + get_sobol_botorch_modular_fixed_noise_gp_qnei, + get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp_qnehvi, + get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp_qnei, +) from ax.service.utils.scheduler_options import SchedulerOptions from ax.storage.json_store.load import load_experiment from ax.storage.json_store.save import save_experiment @@ -25,7 +31,6 @@ get_multi_objective_benchmark_problem, get_single_objective_benchmark_problem, get_sobol_benchmark_method, - get_sobol_gpei_benchmark_method, ) from ax.utils.testing.core_stubs import get_experiment from ax.utils.testing.mock import fast_botorch_optimize @@ -84,12 +89,11 @@ def test_benchmark_result_invalid_inputs(self) -> None: gen_time=0.0, ) - def test_replication_synthetic(self) -> None: + def test_replication_synthetic_sobol(self) -> None: problem = get_single_objective_benchmark_problem() + method = get_sobol_benchmark_method() - res = benchmark_replication( - problem=problem, method=get_sobol_benchmark_method(), seed=0 - ) + res = benchmark_replication(problem=problem, method=method, seed=0) self.assertEqual( problem.num_trials, @@ -98,7 +102,51 @@ def test_replication_synthetic(self) -> None: self.assertTrue(np.all(res.score_trace <= 100)) - def test_replication_moo(self) -> None: + @fast_botorch_optimize + def test_replication_mbm(self) -> None: + for method, problem in [ + ( + get_sobol_botorch_modular_fixed_noise_gp_qnei(), + get_single_objective_benchmark_problem(infer_noise=False, num_trials=6), + ), + ( + get_sobol_botorch_modular_fixed_noise_gp_qnehvi(), + get_multi_objective_benchmark_problem(infer_noise=False, num_trials=6), + ), + ( + get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp_qnei(), + get_single_objective_benchmark_problem(num_trials=6), + ), + ( + get_sobol_botorch_modular_saas_fully_bayesian_single_task_gp_qnehvi(), + get_multi_objective_benchmark_problem(num_trials=6), + ), + ]: + with self.subTest(method=method, problem=problem): + res = benchmark_replication(problem=problem, method=method, seed=0) + self.assertEqual( + problem.num_trials, + len(not_none(res.experiment).trials), + ) + self.assertTrue(np.all(res.score_trace <= 100)) + + @fast_botorch_optimize + def test_replication_mbm_default(self) -> None: + method = get_sobol_botorch_modular_default() + for problem in [ + get_single_objective_benchmark_problem(infer_noise=False, num_trials=6), + get_multi_objective_benchmark_problem(infer_noise=False, num_trials=6), + get_single_objective_benchmark_problem(num_trials=6), + get_multi_objective_benchmark_problem(num_trials=6), + ]: + with self.subTest(problem=problem): + res = benchmark_replication(problem=problem, method=method, seed=0) + self.assertEqual( + problem.num_trials, + len(not_none(res.experiment).trials), + ) + + def test_replication_moo_sobol(self) -> None: problem = get_multi_objective_benchmark_problem() res = benchmark_replication( @@ -139,13 +187,12 @@ def test_benchmark_one_method_problem(self) -> None: @fast_botorch_optimize def test_benchmark_multiple_problems_methods(self) -> None: aggs = benchmark_multiple_problems_methods( - problems=[get_single_objective_benchmark_problem()], - methods=[get_sobol_benchmark_method(), get_sobol_gpei_benchmark_method()], + problems=[get_single_objective_benchmark_problem(num_trials=6)], + methods=[get_sobol_benchmark_method(), get_gpei_default()], seeds=(0, 1), ) self.assertEqual(len(aggs), 2) - for agg in aggs: for col in ["mean", "P25", "P50", "P75"]: self.assertTrue((agg.score_trace[col] <= 100).all()) @@ -157,17 +204,7 @@ def test_timeout(self) -> None: num_trials=1000, # Unachievable num_trials ) - generation_strategy = GenerationStrategy( - name="SOBOL+GPEI::default", - steps=[ - GenerationStep(model=Models.SOBOL, num_trials=5, min_trials_observed=5), - GenerationStep( - model=Models.GPEI, - num_trials=-1, - max_parallelism=1, - ), - ], - ) + generation_strategy = get_gpei_default().generation_strategy method = BenchmarkMethod( name=generation_strategy.name, diff --git a/ax/core/generator_run.py b/ax/core/generator_run.py index 65cc5a09698..4ba6a3a33ef 100644 --- a/ax/core/generator_run.py +++ b/ax/core/generator_run.py @@ -146,7 +146,10 @@ def __init__( if weights is None: weights = [1.0 for i in range(len(arms))] if len(arms) != len(weights): - raise ValueError("Weights and arms must have the same length.") + raise ValueError( + "Weights and arms must have the same length. Arms have length " + f"{len(arms)}, weights have length {len(weights)}." + ) if bridge_kwargs is not None or model_kwargs is not None: if model_key is None: raise ValueError( diff --git a/ax/models/torch/botorch_modular/acquisition.py b/ax/models/torch/botorch_modular/acquisition.py index 61da3f5f378..5a077d45f2d 100644 --- a/ax/models/torch/botorch_modular/acquisition.py +++ b/ax/models/torch/botorch_modular/acquisition.py @@ -193,7 +193,7 @@ def __init__( ] # Subset model only to the outcomes we need for the optimization. - if self.options.get(Keys.SUBSET_MODEL, True): + if self.options.pop(Keys.SUBSET_MODEL, True): subset_model_results = subset_model( model=primary_surrogate.model, objective_weights=torch_opt_config.objective_weights, diff --git a/ax/models/torch/botorch_modular/sebo.py b/ax/models/torch/botorch_modular/sebo.py index 1431a799e2f..289040158ee 100644 --- a/ax/models/torch/botorch_modular/sebo.py +++ b/ax/models/torch/botorch_modular/sebo.py @@ -61,8 +61,8 @@ def __init__( tkwargs = {"dtype": surrogate.dtype, "device": surrogate.device} options = options or {} - self.penalty_name: str = options.get("penalty", "L0_norm") - self.target_point: Tensor = options.get("target_point", None) + self.penalty_name: str = options.pop("penalty", "L0_norm") + self.target_point: Tensor = options.pop("target_point", None) if self.target_point is None: raise ValueError("please provide target point.") self.target_point.to(**tkwargs) # pyre-ignore @@ -93,6 +93,8 @@ def __init__( ) # instantiate botorch_acqf_class + if not issubclass(botorch_acqf_class, qExpectedHypervolumeImprovement): + raise ValueError("botorch_acqf_class must be qEHVI to use SEBO") super().__init__( surrogates={"sebo": surrogate_f}, search_space_digest=search_space_digest, diff --git a/ax/models/torch/tests/test_acquisition.py b/ax/models/torch/tests/test_acquisition.py index b64e03d15a0..c81ce515bd6 100644 --- a/ax/models/torch/tests/test_acquisition.py +++ b/ax/models/torch/tests/test_acquisition.py @@ -20,7 +20,11 @@ from ax.exceptions.core import AxWarning, SearchSpaceExhausted from ax.models.torch.botorch_modular.acquisition import Acquisition from ax.models.torch.botorch_modular.surrogate import Surrogate -from ax.models.torch.utils import SubsetModelData +from ax.models.torch.utils import ( + _get_X_pending_and_observed, + subset_model, + SubsetModelData, +) from ax.models.torch_base import TorchOptConfig from ax.utils.common.constants import Keys from ax.utils.common.testutils import TestCase @@ -124,7 +128,7 @@ def setUp(self) -> None: ) self.linear_constraints = None self.fixed_features = {1: 2.0} - self.options = {"best_f": 0.0, "cache_root": False, "prune_baseline": False} + self.options = {"cache_root": False, "prune_baseline": False} self.inequality_constraints = [ (torch.tensor([0, 1], **tkwargs), torch.tensor([-1.0, 1.0], **tkwargs), 1) ] @@ -159,41 +163,25 @@ def tearDown(self) -> None: # Avoid polluting the registry for other tests. ACQF_INPUT_CONSTRUCTOR_REGISTRY.pop(DummyAcquisitionFunction) - @mock.patch(f"{ACQUISITION_PATH}._get_X_pending_and_observed") - @mock.patch( - f"{ACQUISITION_PATH}.subset_model", - # pyre-fixme[6]: For 1st param expected `Model` but got `None`. - # pyre-fixme[6]: For 5th param expected `Tensor` but got `None`. - return_value=SubsetModelData(None, torch.ones(1), None, None, None), - ) - @mock.patch(f"{ACQUISITION_PATH}.get_botorch_objective_and_transform") - @mock.patch( - f"{CURRENT_PATH}.Acquisition.compute_model_dependencies", - return_value={"current_value": 1.2}, - ) - @mock.patch( - f"{DummyAcquisitionFunction.__module__}.DummyAcquisitionFunction.__init__", - return_value=None, - ) - def test_init( - self, - mock_botorch_acqf_class: Mock, - mock_compute_model_deps: Mock, - mock_get_objective_and_transform: Mock, - mock_subset_model: Mock, - mock_get_X: Mock, - ) -> None: + def test_init_raises_when_missing_acqf_cls(self) -> None: with self.assertRaisesRegex(TypeError, ".* missing .* 'botorch_acqf_class'"): - # pyre-fixme[20]: Argument `botorch_acqf_class` expected. + # pyre-ignore[20]: Argument `botorch_acqf_class` expected. Acquisition( surrogates={"surrogate": self.surrogate}, search_space_digest=self.search_space_digest, torch_opt_config=self.torch_opt_config, ) - botorch_objective = LinearMCObjective(weights=torch.tensor([1.0])) - mock_get_objective_and_transform.return_value = (botorch_objective, None) - mock_get_X.return_value = (self.pending_observations[0], self.X[:1]) + @mock.patch( + f"{ACQUISITION_PATH}._get_X_pending_and_observed", + wraps=_get_X_pending_and_observed, + ) + @mock.patch(f"{ACQUISITION_PATH}.subset_model", wraps=subset_model) + def test_init( + self, + mock_subset_model: Mock, + mock_get_X: Mock, + ) -> None: acquisition = Acquisition( surrogates={"surrogate": self.surrogate}, search_space_digest=self.search_space_digest, @@ -224,10 +212,36 @@ def test_init( outcome_constraints=self.outcome_constraints, objective_thresholds=self.objective_thresholds, ) - mock_subset_model.reset_mock() - mock_get_objective_and_transform.reset_mock() - self.mock_input_constructor.reset_mock() - mock_botorch_acqf_class.reset_mock() + + @mock.patch(f"{ACQUISITION_PATH}._get_X_pending_and_observed") + @mock.patch( + f"{ACQUISITION_PATH}.subset_model", + # pyre-fixme[6]: For 1st param expected `Model` but got `None`. + # pyre-fixme[6]: For 5th param expected `Tensor` but got `None`. + return_value=SubsetModelData(None, torch.ones(1), None, None, None), + ) + @mock.patch( + f"{ACQUISITION_PATH}.get_botorch_objective_and_transform", + ) + @mock.patch( + f"{CURRENT_PATH}.Acquisition.compute_model_dependencies", + return_value={"eta": 0.1}, + ) + @mock.patch( + f"{DummyAcquisitionFunction.__module__}.DummyAcquisitionFunction.__init__", + return_value=None, + ) + def test_init_with_subset_model_false( + self, + mock_botorch_acqf_class: Mock, + mock_compute_model_deps: Mock, + mock_get_objective_and_transform: Mock, + mock_subset_model: Mock, + mock_get_X: Mock, + ) -> None: + botorch_objective = LinearMCObjective(weights=torch.tensor([1.0])) + mock_get_objective_and_transform.return_value = (botorch_objective, None) + mock_get_X.return_value = (self.pending_observations[0], self.X[:1]) self.options[Keys.SUBSET_MODEL] = False with mock.patch( f"{ACQUISITION_PATH}.get_outcome_constraint_transforms", @@ -249,7 +263,7 @@ def test_init( self.assertIs(ckwargs["outcome_constraints"], self.outcome_constraints) self.assertTrue(torch.equal(ckwargs["X_observed"], self.X[:1])) # Check final `acqf` creation - model_deps = {Keys.CURRENT_VALUE: 1.2} + model_deps = {"eta": 0.1} self.mock_input_constructor.assert_called_once() mock_botorch_acqf_class.assert_called_once() _, ckwargs = self.mock_input_constructor.call_args diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index f7b3cb2e07e..d15c404ddfc 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -30,15 +30,26 @@ def get_benchmark_problem() -> BenchmarkProblem: ) -def get_single_objective_benchmark_problem() -> SingleObjectiveBenchmarkProblem: +def get_single_objective_benchmark_problem( + infer_noise: bool = True, + num_trials: int = 4, +) -> SingleObjectiveBenchmarkProblem: return SingleObjectiveBenchmarkProblem.from_botorch_synthetic( - test_problem_class=Branin, test_problem_kwargs={}, num_trials=4 + test_problem_class=Branin, + test_problem_kwargs={}, + num_trials=num_trials, + infer_noise=infer_noise, ) -def get_multi_objective_benchmark_problem() -> MultiObjectiveBenchmarkProblem: +def get_multi_objective_benchmark_problem( + infer_noise: bool = True, num_trials: int = 4 +) -> MultiObjectiveBenchmarkProblem: return MultiObjectiveBenchmarkProblem.from_botorch_multi_objective( - test_problem_class=BraninCurrin, test_problem_kwargs={}, num_trials=4 + test_problem_class=BraninCurrin, + test_problem_kwargs={}, + num_trials=num_trials, + infer_noise=infer_noise, ) @@ -67,6 +78,8 @@ def get_sobol_gpei_benchmark_method() -> BenchmarkMethod: num_trials=-1, model_kwargs={ "surrogate": Surrogate(SingleTaskGP), + # TODO: tests should better reflect defaults and not + # re-implement this logic. "botorch_acqf_class": qNoisyExpectedImprovement, }, model_gen_kwargs={ @@ -77,8 +90,6 @@ def get_sobol_gpei_benchmark_method() -> BenchmarkMethod: }, Keys.ACQF_KWARGS: { "prune_baseline": True, - "qmc": True, - "mc_samples": 512, }, } },