Skip to content

Commit

Permalink
Parameterization type fixes; use parameters instad of arms where appr…
Browse files Browse the repository at this point in the history
…opriate

Summary:
Sometimes benchmarking code creates Arms from parameters just so they can be passed to a function that expects an Arm but only uses its parameters. This is silly. It's better to just have the function expect parameters.

Also updated some method signatures to use `Mapping` to indicate that they do not mutate the parameterization, which unfortunately creates the need for Pyre-fixmes if they are passed to a function or class such as `Arm` that does not annotate its arguments as immutable.

Differential Revision: D63327381
  • Loading branch information
esantorella authored and facebook-github-bot committed Sep 24, 2024
1 parent 97668f7 commit f561106
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 28 deletions.
5 changes: 3 additions & 2 deletions ax/benchmark/problems/synthetic/hss/jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

# pyre-strict

from collections.abc import Mapping
from dataclasses import dataclass
from typing import Optional

Expand All @@ -19,7 +20,6 @@
from ax.core.optimization_config import OptimizationConfig
from ax.core.parameter import ChoiceParameter, ParameterType, RangeParameter
from ax.core.search_space import HierarchicalSearchSpace
from ax.core.types import TParameterization
from pyre_extensions import none_throws


Expand Down Expand Up @@ -60,7 +60,8 @@ class Jenatton(ParamBasedTestProblem):
optimal_value: float = 0.1
_is_constrained: bool = False

def evaluate_true(self, params: TParameterization) -> torch.Tensor:
# pyre-fixme[14]: Inconsistent override
def evaluate_true(self, params: Mapping[str, float | int | None]) -> torch.Tensor:
# pyre-fixme: Incompatible parameter type [6]: In call
# `jenatton_test_function`, for 1st positional argument, expected
# `Optional[float]` but got `Union[None, bool, float, int, str]`.
Expand Down
6 changes: 3 additions & 3 deletions ax/benchmark/runners/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(self, search_space_digest: SearchSpaceDigest | None = None) -> None
else:
self.target_fidelity_and_task = {}

def get_Y_true(self, arm: Arm) -> Tensor:
def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
"""
Return the ground truth values for a given arm.
Expand All @@ -79,7 +79,7 @@ def evaluate_oracle(self, parameters: Mapping[str, TParamValue]) -> ndarray:
at the true utility function (which would be unobserved in reality).
"""
params = {**parameters, **self.target_fidelity_and_task}
return self.get_Y_true(arm=Arm(parameters=params)).numpy()
return self.get_Y_true(params=params).numpy()

@abstractmethod
def get_noise_stds(self) -> Union[None, float, dict[str, float]]:
Expand Down Expand Up @@ -134,7 +134,7 @@ def run(self, trial: BaseTrial) -> dict[str, Any]:

for arm in trial.arms:
# Case where we do have a ground truth
Y_true = self.get_Y_true(arm)
Y_true = self.get_Y_true(arm.parameters)
if noise_stds is None:
# No noise, so just return the true outcome.
Ystds[arm.name] = [0.0] * len(Y_true)
Expand Down
20 changes: 9 additions & 11 deletions ax/benchmark/runners/botorch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,15 @@

import importlib
from abc import ABC, abstractmethod
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, Optional, Union

import torch
from ax.benchmark.runners.base import BenchmarkRunner
from ax.core.arm import Arm
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TParameterization
from ax.core.types import TParamValue
from ax.utils.common.base import Base
from ax.utils.common.equality import equality_typechecker
from ax.utils.common.serialization import TClassDecoderRegistry, TDecoderRegistry
Expand All @@ -41,9 +42,9 @@ class ParamBasedTestProblem(ABC):
negate: bool = False

@abstractmethod
def evaluate_true(self, params: TParameterization) -> Tensor: ...
def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor: ...

def evaluate_slack_true(self, params: TParameterization) -> Tensor:
def evaluate_slack_true(self, params: Mapping[str, TParamValue]) -> Tensor:
raise NotImplementedError(
f"{self.__class__.__name__} does not support constraints."
)
Expand Down Expand Up @@ -243,7 +244,7 @@ def __init__(
self.test_problem, ConstrainedBaseTestProblem
)

def get_Y_true(self, arm: Arm) -> Tensor:
def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
"""
Convert the arm to a tensor and evaluate it on the base test problem.
Expand All @@ -252,18 +253,15 @@ def get_Y_true(self, arm: Arm) -> Tensor:
`modified_bounds` in `BotorchTestProblemRunner.__init__` for details.
Args:
arm: Arm to evaluate. It will be converted to a
params: Parameterization to evaluate. It will be converted to a
`batch_shape x d`-dim tensor of point(s) at which to evaluate the
test problem.
Returns:
A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations.
"""
X = torch.tensor(
[
value
for _key, value in [*arm.parameters.items()][: self.test_problem.dim]
],
[value for _key, value in [*params.items()][: self.test_problem.dim]],
dtype=torch.double,
)

Expand Down Expand Up @@ -322,13 +320,13 @@ def __init__(
)
self.test_problem: ParamBasedTestProblem = self.test_problem

def get_Y_true(self, arm: Arm) -> Tensor:
def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
"""Evaluates the test problem.
Returns:
A `batch_shape x m`-dim tensor of ground truth (noiseless) evaluations.
"""
Y_true = self.test_problem.evaluate_true(arm.parameters).view(-1)
Y_true = self.test_problem.evaluate_true(params).view(-1)
# `ParamBasedTestProblem.evaluate_true()` does not negate the outcome
if self.test_problem.negate:
Y_true = -Y_true
Expand Down
8 changes: 5 additions & 3 deletions ax/benchmark/runners/surrogate.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@
# pyre-strict

import warnings
from collections.abc import Mapping
from typing import Any, Callable, Optional, Union

import torch
from ax.benchmark.runners.base import BenchmarkRunner
from ax.core.arm import Arm
from ax.core.base_trial import BaseTrial, TrialStatus
from ax.core.observation import ObservationFeatures
from ax.core.search_space import SearchSpace, SearchSpaceDigest
Expand Down Expand Up @@ -95,11 +95,13 @@ def datasets(self) -> list[SupervisedDataset]:
def get_noise_stds(self) -> Union[None, float, dict[str, float]]:
return self.noise_stds

def get_Y_true(self, arm: Arm) -> Tensor:
# pyre-fixme[14]: Inconsistent override
def get_Y_true(self, params: Mapping[str, float | int]) -> Tensor:
# We're ignoring the uncertainty predictions of the surrogate model here and
# use the mean predictions as the outcomes (before potentially adding noise)
means, _ = self.surrogate.predict(
observation_features=[ObservationFeatures(arm.parameters)]
# pyre-fixme[6]: params is a Mapping, but ObservationFeatures expects a Dict
observation_features=[ObservationFeatures(params)]
)
means = [means[name][0] for name in self.outcome_names]
return torch.tensor(
Expand Down
12 changes: 6 additions & 6 deletions ax/benchmark/tests/runners/test_botorch_test_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,11 +124,8 @@ def test_synthetic_runner(self) -> None:

with self.subTest(f"test `get_Y_true()`, {test_description}"):
X = torch.rand(1, 6, dtype=torch.double)
arm = Arm(
name="0_0",
parameters={f"x{i}": x.item() for i, x in enumerate(X.unbind(-1))},
)
Y = runner.get_Y_true(arm=arm)
params = {f"x{i}": x.item() for i, x in enumerate(X.unbind(-1))}
Y = runner.get_Y_true(params=params)
if modified_bounds is not None:
X_tf = normalize(
X, torch.tensor(modified_bounds, dtype=torch.double).T
Expand All @@ -152,11 +149,14 @@ def test_synthetic_runner(self) -> None:
torch.Size([2]), X.pow(2).sum().item(), dtype=torch.double
)
self.assertTrue(torch.allclose(Y, expected_Y))
oracle = runner.evaluate_oracle(parameters=arm.parameters)
oracle = runner.evaluate_oracle(parameters=params)
self.assertTrue(np.equal(Y.numpy(), oracle).all())

with self.subTest(f"test `run()`, {test_description}"):
trial = Mock(spec=Trial)
# pyre-fixme[6]: Incomptabile parameter type: params is a
# mutable subtype of the type expected by `Arm`.
arm = Arm(name="0_0", parameters=params)
trial.arms = [arm]
trial.arm = arm
trial.index = 0
Expand Down
6 changes: 3 additions & 3 deletions ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,15 +76,15 @@ def _test_multi_fidelity_or_multi_task(self, fidelity_or_task: str) -> None:
search_space=SearchSpace(parameters),
num_trials=3,
)
arm = Arm(parameters={"x0": 1.0, "x1": 0.0, "x2": 0.0})
params = {"x0": 1.0, "x1": 0.0, "x2": 0.0}
at_target = assert_is_instance(
Branin()
.evaluate_true(torch.tensor([1.0, 0.0], dtype=torch.double).unsqueeze(0))
.item(),
float,
)
self.assertAlmostEqual(
problem.runner.evaluate_oracle(parameters=arm.parameters)[0],
problem.runner.evaluate_oracle(parameters=params)[0],
at_target,
)
# first term: (-(b - 0.1) * (1 - x3) + c - r)^2
Expand All @@ -93,7 +93,7 @@ def _test_multi_fidelity_or_multi_task(self, fidelity_or_task: str) -> None:
t = -5.1 / (4 * math.pi**2) + 5 / math.pi - 6
expected_change = (t + 0.1) ** 2 - t**2
self.assertAlmostEqual(
problem.runner.get_Y_true(arm=arm).item(),
problem.runner.get_Y_true(params=params).item(),
at_target + expected_change,
)

Expand Down

0 comments on commit f561106

Please sign in to comment.