Skip to content

Commit

Permalink
fix constraint handling in single objective MBM
Browse files Browse the repository at this point in the history
Summary: Currently, constraints are not used in single objective AFs in MBM due to a name mismatch between `outcome_constraints` and `constraints`.

Reviewed By: SebastianAment

Differential Revision: D48176978

fbshipit-source-id: 2982aeef5d10d36c52b8870c26cfe63ced3f3afd
  • Loading branch information
sdaulton authored and facebook-github-bot committed Aug 9, 2023
1 parent 22a64fb commit ecbb88f
Show file tree
Hide file tree
Showing 3 changed files with 163 additions and 58 deletions.
5 changes: 4 additions & 1 deletion ax/models/torch/botorch_modular/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
optimize_acqf_discrete_local_search,
optimize_acqf_mixed,
)
from botorch.utils.constraints import get_outcome_constraint_transforms
from torch import Tensor


Expand Down Expand Up @@ -277,7 +278,9 @@ def __init__(
"X_baseline": unique_Xs_observed,
"X_pending": unique_Xs_pending,
"objective_thresholds": objective_thresholds,
"outcome_constraints": outcome_constraints,
"constraints": get_outcome_constraint_transforms(
outcome_constraints=outcome_constraints
),
"target_fidelities": search_space_digest.target_fidelities,
"bounds": search_space_digest.bounds,
**acqf_model_kwarg,
Expand Down
67 changes: 42 additions & 25 deletions ax/models/torch/tests/test_acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
)
from botorch.acquisition.objective import LinearMCObjective
from botorch.models.gp_regression import SingleTaskGP
from botorch.utils.constraints import get_outcome_constraint_transforms
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.testing import MockPosterior
from torch import Tensor
Expand Down Expand Up @@ -118,6 +119,9 @@ def setUp(self) -> None:
torch.tensor([[1.0]], **tkwargs),
torch.tensor([[0.5]], **tkwargs),
)
self.constraints = get_outcome_constraint_transforms(
outcome_constraints=self.outcome_constraints
)
self.linear_constraints = None
self.fixed_features = {1: 2.0}
self.options = {"best_f": 0.0, "cache_root": False, "prune_baseline": False}
Expand Down Expand Up @@ -225,31 +229,44 @@ def test_init(
self.mock_input_constructor.reset_mock()
mock_botorch_acqf_class.reset_mock()
self.options[Keys.SUBSET_MODEL] = False
acquisition = Acquisition(
surrogates={"surrogate": self.surrogate},
search_space_digest=self.search_space_digest,
torch_opt_config=self.torch_opt_config,
botorch_acqf_class=self.botorch_acqf_class,
options=self.options,
)
mock_subset_model.assert_not_called()
# Check `get_botorch_objective_and_transform` kwargs
mock_get_objective_and_transform.assert_called_once()
_, ckwargs = mock_get_objective_and_transform.call_args
self.assertIs(ckwargs["model"], acquisition.surrogates["surrogate"].model)
self.assertIs(ckwargs["objective_weights"], self.objective_weights)
self.assertIs(ckwargs["outcome_constraints"], self.outcome_constraints)
self.assertTrue(torch.equal(ckwargs["X_observed"], self.X[:1]))
# Check final `acqf` creation
model_deps = {Keys.CURRENT_VALUE: 1.2}
self.mock_input_constructor.assert_called_once()
mock_botorch_acqf_class.assert_called_once()
_, ckwargs = self.mock_input_constructor.call_args
self.assertIs(ckwargs["model"], acquisition.surrogates["surrogate"].model)
self.assertIs(ckwargs["objective"], botorch_objective)
self.assertTrue(torch.equal(ckwargs["X_pending"], self.pending_observations[0]))
for k, v in chain(self.options.items(), model_deps.items()):
self.assertEqual(ckwargs[k], v)
with mock.patch(
f"{ACQUISITION_PATH}.get_outcome_constraint_transforms",
return_value=self.constraints,
) as mock_get_outcome_constraint_transforms:
acquisition = Acquisition(
surrogates={"surrogate": self.surrogate},
search_space_digest=self.search_space_digest,
torch_opt_config=self.torch_opt_config,
botorch_acqf_class=self.botorch_acqf_class,
options=self.options,
)
mock_subset_model.assert_not_called()
# Check `get_botorch_objective_and_transform` kwargs
mock_get_objective_and_transform.assert_called_once()
_, ckwargs = mock_get_objective_and_transform.call_args
self.assertIs(ckwargs["model"], acquisition.surrogates["surrogate"].model)
self.assertIs(ckwargs["objective_weights"], self.objective_weights)
self.assertIs(ckwargs["outcome_constraints"], self.outcome_constraints)
self.assertTrue(torch.equal(ckwargs["X_observed"], self.X[:1]))
# Check final `acqf` creation
model_deps = {Keys.CURRENT_VALUE: 1.2}
self.mock_input_constructor.assert_called_once()
mock_botorch_acqf_class.assert_called_once()
_, ckwargs = self.mock_input_constructor.call_args
self.assertIs(ckwargs["model"], acquisition.surrogates["surrogate"].model)
self.assertIs(ckwargs["objective"], botorch_objective)
self.assertTrue(
torch.equal(ckwargs["X_pending"], self.pending_observations[0])
)
for k, v in chain(self.options.items(), model_deps.items()):
self.assertEqual(ckwargs[k], v)
self.assertIs(
ckwargs["constraints"],
self.constraints,
)
mock_get_outcome_constraint_transforms.assert_called_once_with(
outcome_constraints=self.outcome_constraints
)

@mock.patch(f"{ACQUISITION_PATH}.optimize_acqf")
def test_optimize(self, mock_optimize_acqf: Mock) -> None:
Expand Down
149 changes: 117 additions & 32 deletions ax/models/torch/tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,10 @@
from ax.models.torch.botorch_modular.acquisition import Acquisition
from ax.models.torch.botorch_modular.model import BoTorchModel, SurrogateSpec
from ax.models.torch.botorch_modular.surrogate import Surrogate
from ax.models.torch.botorch_modular.utils import choose_model_class
from ax.models.torch.botorch_modular.utils import (
choose_model_class,
construct_acquisition_and_optimizer_options,
)
from ax.models.torch.utils import _filter_X_observed
from ax.models.torch_base import TorchOptConfig
from ax.utils.common.constants import Keys
Expand All @@ -37,11 +40,13 @@
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.objective import WeightedMCMultiOutputObjective
from botorch.acquisition.objective import GenericMCObjective
from botorch.models.fully_bayesian import SaasFullyBayesianSingleTaskGP
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.gp_regression_fidelity import FixedNoiseMultiFidelityGP
from botorch.models.model import ModelList
from botorch.sampling.normal import SobolQMCNormalSampler
from botorch.utils.constraints import get_outcome_constraint_transforms
from botorch.utils.datasets import FixedNoiseDataset, SupervisedDataset
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood

Expand Down Expand Up @@ -106,11 +111,18 @@ def setUp(self) -> None:
self.optimizer_options = {Keys.NUM_RESTARTS: 40, Keys.RAW_SAMPLES: 1024}
self.model_gen_options = {Keys.OPTIMIZER_KWARGS: self.optimizer_options}
self.objective_weights = torch.tensor([1.0], **tkwargs)
self.outcome_constraints = (
torch.tensor([[1.0]], **tkwargs),
torch.tensor([[-5.0]], **tkwargs),
)
self.moo_objective_weights = torch.tensor([1.0, 1.5, 0.0], **tkwargs)
self.moo_objective_thresholds = torch.tensor(
[0.5, 1.5, float("nan")], **tkwargs
)
self.outcome_constraints = None
self.moo_outcome_constraints = (
torch.tensor([[1.0, 0.0, 0.0]], **tkwargs),
torch.tensor([[-5.0]], **tkwargs),
)
self.linear_constraints = None
self.fixed_features = None
self.pending_observations = None
Expand All @@ -136,6 +148,7 @@ def setUp(self) -> None:
self.torch_opt_config,
objective_weights=self.moo_objective_weights,
objective_thresholds=self.moo_objective_thresholds,
outcome_constraints=self.moo_outcome_constraints,
)

def test_init(self) -> None:
Expand Down Expand Up @@ -491,12 +504,9 @@ def test_cross_validate(self, mock_fit: Mock) -> None:

@mock.patch(
f"{MODEL_PATH}.construct_acquisition_and_optimizer_options",
return_value=(
ACQ_OPTIONS,
{"num_restarts": 40, "raw_samples": 1024},
),
wraps=construct_acquisition_and_optimizer_options,
)
@mock.patch(f"{CURRENT_PATH}.Acquisition")
@mock.patch(f"{CURRENT_PATH}.Acquisition.optimize")
@mock.patch(f"{MODEL_PATH}.get_rounding_func", return_value="func")
@mock.patch(f"{MODEL_PATH}._to_inequality_constraints", return_value=[])
@mock.patch(
Expand All @@ -507,10 +517,18 @@ def test_gen(
mock_choose_botorch_acqf_class: Mock,
mock_inequality_constraints: Mock,
mock_rounding: Mock,
mock_acquisition: Mock,
mock_optimize: Mock,
mock_construct_options: Mock,
) -> None:
mock_acquisition.return_value.optimize.return_value = (
qEI_input_constructor = get_acqf_input_constructor(qExpectedImprovement)
mock_input_constructor = mock.MagicMock(
qEI_input_constructor, side_effect=qEI_input_constructor
)
_register_acqf_input_constructor(
acqf_cls=qExpectedImprovement,
input_constructor=mock_input_constructor,
)
mock_optimize.return_value = (
torch.tensor([1.0]),
torch.tensor([2.0]),
)
Expand All @@ -534,10 +552,60 @@ def test_gen(
)
# Add search space digest reference to make the model think it's been fit
model._search_space_digest = self.mf_search_space_digest
model.gen(
n=1,
search_space_digest=self.mf_search_space_digest,
torch_opt_config=self.torch_opt_config,
with mock.patch.object(
BoTorchModel,
"_instantiate_acquisition",
wraps=model._instantiate_acquisition,
) as mock_init_acqf:
model.gen(
n=1,
search_space_digest=self.mf_search_space_digest,
torch_opt_config=self.torch_opt_config,
)
# Assert acquisition initialized with expected arguments
mock_init_acqf.assert_called_once_with(
search_space_digest=self.mf_search_space_digest,
torch_opt_config=self.torch_opt_config,
acq_options=self.acquisition_options,
)
ckwargs = mock_input_constructor.call_args[1]
mock_input_constructor.assert_called_once()
m = ckwargs["model"]
self.assertIsInstance(m, SingleTaskGP)
self.assertEqual(m.num_outputs, 1)
training_data = ckwargs["training_data"]
self.assertIsInstance(training_data, SupervisedDataset)
self.assertTrue(torch.equal(training_data.X(), self.Xs[0]))
self.assertTrue(
torch.equal(
training_data.Y(),
torch.cat([ds.Y() for ds in self.block_design_training_data], dim=-1),
)
)
self.assertIsNotNone(ckwargs["constraints"])

self.assertIsNone(
ckwargs["X_pending"],
)
self.assertIsInstance(
ckwargs.get("objective"),
GenericMCObjective,
)
expected_X_baseline = _filter_X_observed(
Xs=[dataset.X() for dataset in self.block_design_training_data],
objective_weights=self.objective_weights,
outcome_constraints=self.outcome_constraints,
bounds=self.search_space_digest.bounds,
linear_constraints=self.linear_constraints,
fixed_features=self.fixed_features,
)
self.assertTrue(
torch.equal(
ckwargs.get("X_baseline"),
# pyre-fixme[6]: For 2nd param expected `Tensor` but got
# `Optional[Tensor]`.
expected_X_baseline,
)
)

# Assert `construct_acquisition_and_optimizer_options` called with kwargs
Expand All @@ -548,16 +616,9 @@ def test_gen(
# Assert `choose_botorch_acqf_class` is called
mock_choose_botorch_acqf_class.assert_called_once()
self.assertEqual(model._botorch_acqf_class, qExpectedImprovement)
# Assert `acquisition_class` called with kwargs
mock_acquisition.assert_called_with(
surrogates={Keys.ONLY_SURROGATE: self.surrogate},
botorch_acqf_class=model.botorch_acqf_class,
search_space_digest=self.mf_search_space_digest,
torch_opt_config=self.torch_opt_config,
options=self.acquisition_options,
)

# Assert `optimize` called with kwargs
mock_acquisition.return_value.optimize.assert_called_with(
mock_optimize.assert_called_with(
n=1,
search_space_digest=self.mf_search_space_digest,
inequality_constraints=[],
Expand All @@ -566,6 +627,11 @@ def test_gen(
optimizer_options=self.optimizer_options,
)

_register_acqf_input_constructor(
acqf_cls=qExpectedImprovement,
input_constructor=qEI_input_constructor,
)

def test_feature_importances(self) -> None:
for botorch_model_class in [SingleTaskGP, SaasFullyBayesianSingleTaskGP]:
surrogate = Surrogate(botorch_model_class=botorch_model_class)
Expand Down Expand Up @@ -813,11 +879,29 @@ def test_MOO(self, _) -> None:
self.assertIsInstance(
model.surrogates[Keys.AUTOSET_SURROGATE].model, FixedNoiseGP
)
gen_results = model.gen(
n=1,
search_space_digest=self.mf_search_space_digest,
torch_opt_config=self.moo_torch_opt_config,
subset_outcome_constraints = (
# model is subset since last output is not used
self.moo_outcome_constraints[0][:, :2],
self.moo_outcome_constraints[1],
)
constraints = get_outcome_constraint_transforms(
outcome_constraints=subset_outcome_constraints,
)
with mock.patch(
f"{ACQUISITION_PATH}.get_outcome_constraint_transforms",
# Dummy candidates and acquisition function value.
return_value=constraints,
) as mock_get_outcome_constraint_transforms:
gen_results = model.gen(
n=1,
search_space_digest=self.mf_search_space_digest,
torch_opt_config=self.moo_torch_opt_config,
)
mock_get_outcome_constraint_transforms.assert_called_once()
ckwargs = mock_get_outcome_constraint_transforms.call_args[1]
oc = ckwargs["outcome_constraints"]
self.assertTrue(torch.equal(oc[0], subset_outcome_constraints[0]))
self.assertTrue(torch.equal(oc[1], subset_outcome_constraints[1]))
ckwargs = mock_input_constructor.call_args[1]
self.assertIs(model.botorch_acqf_class, qNoisyExpectedHypervolumeImprovement)
mock_input_constructor.assert_called_once()
Expand All @@ -844,9 +928,8 @@ def test_MOO(self, _) -> None:
ckwargs["objective_thresholds"], self.moo_objective_thresholds[:2]
)
)
self.assertIsNone(
ckwargs["outcome_constraints"],
)
self.assertIs(ckwargs["constraints"], constraints)

self.assertIsNone(
ckwargs["X_pending"],
)
Expand All @@ -860,21 +943,21 @@ def test_MOO(self, _) -> None:
)
self.assertTrue(
torch.equal(
mock_input_constructor.call_args[1].get("objective").weights,
ckwargs.get("objective").weights,
self.moo_objective_weights[:2],
)
)
expected_X_baseline = _filter_X_observed(
Xs=[dataset.X() for dataset in self.moo_training_data],
objective_weights=self.moo_objective_weights,
outcome_constraints=self.outcome_constraints,
outcome_constraints=self.moo_outcome_constraints,
bounds=self.search_space_digest.bounds,
linear_constraints=self.linear_constraints,
fixed_features=self.fixed_features,
)
self.assertTrue(
torch.equal(
mock_input_constructor.call_args[1].get("X_baseline"),
ckwargs.get("X_baseline"),
# pyre-fixme[6]: For 2nd param expected `Tensor` but got
# `Optional[Tensor]`.
expected_X_baseline,
Expand Down Expand Up @@ -936,6 +1019,8 @@ def test_MOO(self, _) -> None:
self.assertTrue(torch.equal(obj_t[:2], torch.tensor([9.9, 3.3])))
self.assertTrue(np.isnan(obj_t[2].item()))

# test outcome constraints

# Avoid polluting the registry for other tests; re-register correct input
# contructor for qNEHVI.
_register_acqf_input_constructor(
Expand Down

0 comments on commit ecbb88f

Please sign in to comment.