Skip to content

Commit

Permalink
add input constructor for qEUBO
Browse files Browse the repository at this point in the history
Summary: As titled

Differential Revision: D57130906
  • Loading branch information
ItsMrLin authored and facebook-github-bot committed May 9, 2024
1 parent 07f12b7 commit 4fbf1f0
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 23 deletions.
56 changes: 55 additions & 1 deletion botorch/acquisition/input_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,10 @@
MCAcquisitionObjective,
PosteriorTransform,
)
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from botorch.acquisition.preference import (
AnalyticExpectedUtilityOfBestOption,
qExpectedUtilityOfBestOption,
)
from botorch.acquisition.risk_measures import RiskMeasureMCObjective
from botorch.acquisition.utils import (
compute_best_feasible_objective,
Expand Down Expand Up @@ -1328,6 +1331,57 @@ def construct_inputs_analytic_eubo(
}


@acqf_input_constructor(qExpectedUtilityOfBestOption)
def construct_inputs_qeubo(
model: Model,
pref_model: Optional[Model] = None,
sample_multiplier: Optional[float] = 1.0,
**kwargs,
) -> Dict[str, Any]:
r"""Construct kwargs for the `qExpectedUtilityOfBestOption` (qEUBO) constructor.
`model` is the primary model defined over the parameter space. It can be the
outcomde model in BOPE or the preference model in PBO. `pref_model` is the model
defined over the outcome/metric space, which is typically the preference model
in BOPE.
If both model and pref_model exist, we are performing Bayesian Optimization with
Preference Exploration (BOPE). When only pref_model is None, we are performing
preferential BO (PBO).
Args:
model: The outcome model to be used in the acquisition function in BOPE
when pref_model exists; otherwise, model is the preference model and
we are doing Preferential BO
pref_model: The preference model to be used in preference exploration as in
BOPE; if None, we are doing PBO and model is the preference model.
sample_multiplier: The scale factor for the single-sample model.
Returns:
A dict mapping kwarg names of the constructor to values.
"""
if pref_model is None:
return {

Check warning on line 1364 in botorch/acquisition/input_constructors.py

View check run for this annotation

Codecov / codecov/patch

botorch/acquisition/input_constructors.py#L1364

Added line #L1364 was not covered by tests
"pref_model": model,
"outcome_model": None,
**kwargs,
}
else:
# construct a deterministic fixed single sample model from `model`
# i.e., performing EUBO-zeta by default as described
# in https://arxiv.org/abs/2203.11382
# using pref_model.dim instead of model.num_outputs here as MTGP's
# num_outputs could be tied to the number of tasks
w = torch.randn(pref_model.dim) * sample_multiplier
one_sample_outcome_model = FixedSingleSampleModel(model=model, w=w)

return {
"pref_model": pref_model,
"outcome_model": one_sample_outcome_model,
**kwargs,
}


def get_best_f_analytic(
training_data: MaybeDict[SupervisedDataset],
posterior_transform: Optional[PosteriorTransform] = None,
Expand Down
57 changes: 35 additions & 22 deletions test/acquisition/test_input_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,10 @@
LinearMCObjective,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from botorch.acquisition.preference import (
AnalyticExpectedUtilityOfBestOption,
qExpectedUtilityOfBestOption,
)
from botorch.acquisition.utils import (
expand_trace_observations,
project_to_target_fidelity,
Expand Down Expand Up @@ -393,7 +396,10 @@ def test_construct_inputs_noisy_ei(self) -> None:
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
c(model=mock_model, training_data=self.multiX_multiY)

def test_construct_inputs_constrained_analytic_eubo(self) -> None:
def test_construct_inputs_eubo(self) -> None:
"""test input constructor for analytical EUBO and MC qEUBO"""

# Set up
# create dummy modellist gp
n = 10
X = torch.linspace(0, 0.95, n).unsqueeze(dim=-1)
Expand All @@ -409,21 +415,40 @@ def test_construct_inputs_constrained_analytic_eubo(self) -> None:
)
self.assertEqual(model.num_outputs, 6)

c = get_acqf_input_constructor(AnalyticExpectedUtilityOfBestOption)
mock_pref_model = self.mock_model
# assume we only have a preference model with 2 outcomes
mock_pref_model.dim = 2
mock_pref_model.datapoints = torch.tensor([])

# test basic construction
kwargs = c(model=model, pref_model=mock_pref_model)
self.assertIsInstance(kwargs["outcome_model"], FixedSingleSampleModel)
self.assertIs(kwargs["pref_model"], mock_pref_model)
self.assertIsNone(kwargs["previous_winner"])
# test instantiation
AnalyticExpectedUtilityOfBestOption(**kwargs)
for eubo_acqf in (
AnalyticExpectedUtilityOfBestOption,
qExpectedUtilityOfBestOption,
):
c = get_acqf_input_constructor(eubo_acqf)

# test basic construction
kwargs = c(model=model, pref_model=mock_pref_model)
self.assertIsInstance(kwargs["outcome_model"], FixedSingleSampleModel)
self.assertIs(kwargs["pref_model"], mock_pref_model)
if eubo_acqf is AnalyticExpectedUtilityOfBestOption:
self.assertIsNone(kwargs["previous_winner"])
# test instantiation
eubo_acqf(**kwargs)

# test sample_multiplier
torch.manual_seed(123)
kwargs = c(
model=model,
pref_model=mock_pref_model,
sample_multiplier=1e6,
)
# w by default is drawn from std normal and very unlikely to be > 10.0
self.assertTrue((kwargs["outcome_model"].w.abs() > 10.0).all())
# Check w has the right dimension that agrees with the preference model
self.assertEqual(kwargs["outcome_model"].w.shape[-1], mock_pref_model.dim)

# test previous_winner
c = get_acqf_input_constructor(AnalyticExpectedUtilityOfBestOption)
previous_winner = torch.randn(mock_pref_model.dim)
kwargs = c(
model=model,
Expand All @@ -434,18 +459,6 @@ def test_construct_inputs_constrained_analytic_eubo(self) -> None:
# test instantiation
AnalyticExpectedUtilityOfBestOption(**kwargs)

# test sample_multiplier
torch.manual_seed(123)
kwargs = c(
model=model,
pref_model=mock_pref_model,
sample_multiplier=1e6,
)
# w by default is drawn from std normal and very unlikely to be > 10.0
self.assertTrue((kwargs["outcome_model"].w.abs() > 10.0).all())
# Check w has the right dimension that agrees with the preference model
self.assertEqual(kwargs["outcome_model"].w.shape[-1], mock_pref_model.dim)


class TestMCAcquisitionFunctionInputConstructors(InputConstructorBaseTestCase):
def test_construct_inputs_mc_base(self) -> None:
Expand Down

0 comments on commit 4fbf1f0

Please sign in to comment.