Skip to content

Commit

Permalink
Adds test_helpers. Do not use relative imports. (pytorch#2133)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#2133

Moves test utilities that were used in multiple test files to `botorch/utils/test_helpers.py`. This makes it possible to remove all relative imports from the test files, which does not play well with some internal tooling we want to use. Going forward, tests, like the rest of BoTorch, should only use absolute imports.

Reviewed By: esantorella

Differential Revision: D51767702

fbshipit-source-id: 7d7d32a4e06f8217159a65baa96240c49909b5d0
  • Loading branch information
saitcakmak authored and facebook-github-bot committed Dec 2, 2023
1 parent c14808f commit 504ccea
Show file tree
Hide file tree
Showing 15 changed files with 209 additions and 174 deletions.
6 changes: 4 additions & 2 deletions botorch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,11 @@
from botorch.utils import manual_seed

try:
from botorch.version import version as __version__
# Marking this as a manual import to avoid autodeps complaints
# due to imports from non-existent file.
from botorch.version import version as __version__ # @manual
except Exception: # pragma: no cover
__version__ = "Unknown" # pragma: no cover
__version__ = "Unknown"

logger.info(
"Turning off `fast_computations` in linear operator and increasing "
Expand Down
170 changes: 170 additions & 0 deletions botorch/utils/test_helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

"""
Dummy classes and other helpers that are used in multiple test files
should be defined here to avoid relative imports.
"""

from __future__ import annotations

import math
from typing import Optional, Tuple

import torch
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.gpytorch import GPyTorchModel
from botorch.models.model import FantasizeMixin, Model
from botorch.models.transforms.outcome import Standardize
from botorch.models.utils import add_output_dim
from botorch.models.utils.assorted import fantasize
from botorch.posteriors.posterior import Posterior
from botorch.utils.datasets import MultiTaskDataset, SupervisedDataset
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods.gaussian_likelihood import (
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
)
from gpytorch.means import ConstantMean
from gpytorch.models.exact_gp import ExactGP
from torch import Size, Tensor
from torch.nn.functional import pad


def get_sample_moments(samples: Tensor, sample_shape: Size) -> Tuple[Tensor, Tensor]:
"""Computes the mean and covariance of a set of samples.
Args:
samples: A tensor of shape `sample_shape x batch_shape x q`.
sample_shape: The sample_shape input used while generating the samples using
the pathwise sampling API.
"""
sample_dim = len(sample_shape)
samples = samples.view(-1, *samples.shape[sample_dim:])
loc = samples.mean(dim=0)
residuals = (samples - loc).permute(*range(1, samples.ndim), 0)
return loc, (residuals @ residuals.transpose(-2, -1)) / sample_shape.numel()


def standardize_moments(
transform: Standardize,
loc: Tensor,
covariance_matrix: Tensor,
) -> Tuple[Tensor, Tensor]:
"""Standardizes the loc and covariance_matrix using the mean and standard
deviations from a Standardize transform.
"""
m = transform.means.squeeze().unsqueeze(-1)
s = transform.stdvs.squeeze().reciprocal().unsqueeze(-1)
loc = s * (loc - m)
correlation_matrix = s.unsqueeze(-1) * covariance_matrix * s.unsqueeze(-2)
return loc, correlation_matrix


def gen_multi_task_dataset(
yvar: Optional[float] = None, **tkwargs
) -> Tuple[MultiTaskDataset, Tuple[Tensor, Tensor, Tensor]]:
"""Constructs a multi-task dataset with two tasks, each with 10 data points."""
X = torch.linspace(0, 0.95, 10, **tkwargs) + 0.05 * torch.rand(10, **tkwargs)
X = X.unsqueeze(dim=-1)
Y1 = torch.sin(X * (2 * math.pi)) + torch.randn_like(X) * 0.2
Y2 = torch.cos(X * (2 * math.pi)) + torch.randn_like(X) * 0.2
train_X = torch.cat([pad(X, (1, 0), value=i) for i in range(2)])
train_Y = torch.cat([Y1, Y2])

Yvar1 = None if yvar is None else torch.full_like(Y1, yvar)
Yvar2 = None if yvar is None else torch.full_like(Y2, yvar)
train_Yvar = None if yvar is None else torch.cat([Yvar1, Yvar2])
datasets = [
SupervisedDataset(
X=train_X[:10],
Y=Y1,
Yvar=Yvar1,
feature_names=["task", "X"],
outcome_names=["y"],
),
SupervisedDataset(
X=train_X[10:],
Y=Y2,
Yvar=Yvar2,
feature_names=["task", "X"],
outcome_names=["y1"],
),
]
dataset = MultiTaskDataset(
datasets=datasets, target_outcome_name="y", task_feature_index=0
)
return dataset, (train_X, train_Y, train_Yvar)


def get_pvar_expected(posterior: Posterior, model: Model, X: Tensor, m: int) -> Tensor:
"""Computes the expected variance of a posterior after adding the
predictive noise from the likelihood.
"""
X = model.transform_inputs(X)
lh_kwargs = {}
if isinstance(model.likelihood, FixedNoiseGaussianLikelihood):
lh_kwargs["noise"] = model.likelihood.noise.mean().expand(X.shape[:-1])
if m == 1:
return model.likelihood(
posterior.distribution, X, **lh_kwargs
).variance.unsqueeze(-1)
X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape)
pvar_exp = model.likelihood(model(X_), X_, **lh_kwargs).variance
return torch.stack([pvar_exp.select(dim=odi, index=i) for i in range(m)], dim=-1)


class DummyNonScalarizingPosteriorTransform(PosteriorTransform):
scalarize = False

def evaluate(self, Y):
pass # pragma: no cover

def forward(self, posterior):
pass # pragma: no cover


class SimpleGPyTorchModel(GPyTorchModel, ExactGP, FantasizeMixin):
last_fantasize_flag: bool = False

def __init__(self, train_X, train_Y, outcome_transform=None, input_transform=None):
r"""
Args:
train_X: A tensor of inputs, passed to self.transform_inputs.
train_Y: Passed to outcome_transform.
outcome_transform: Transform applied to train_Y.
input_transform: A Module that performs the input transformation, passed to
self.transform_inputs.
"""
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
self._validate_tensor_args(transformed_X, train_Y)
train_Y = train_Y.squeeze(-1)
likelihood = GaussianLikelihood()
super().__init__(train_X, train_Y, likelihood)
self.mean_module = ConstantMean()
self.covar_module = ScaleKernel(RBFKernel())
if outcome_transform is not None:
self.outcome_transform = outcome_transform
if input_transform is not None:
self.input_transform = input_transform
self._num_outputs = 1
self.to(train_X)
self.transformed_call_args = []

def forward(self, x):
self.last_fantasize_flag = fantasize.on()
if self.training:
x = self.transform_inputs(x)
self.transformed_call_args.append(x)
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
6 changes: 3 additions & 3 deletions botorch/utils/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
from botorch.utils.safe_math import logmeanexp
from torch import Tensor

if TYPE_CHECKING:
from botorch.acquisition import AcquisitionFunction # pragma: no cover
from botorch.model import Model # pragma: no cover
if TYPE_CHECKING: # pragma: no cover
from botorch.acquisition import AcquisitionFunction
from botorch.models.model import Model


def standardize(Y: Tensor) -> Tensor:
Expand Down
6 changes: 5 additions & 1 deletion sphinx/source/utils.rst
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,16 @@ Sampling from GP priors
.. automodule:: botorch.utils.gp_sampling
:members:


Testing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: botorch.utils.testing
:members:

Test Helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: botorch.utils.test_helpers
:members:

Torch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: botorch.utils.torch
Expand Down
3 changes: 1 addition & 2 deletions test/acquisition/test_knowledge_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,10 @@
from botorch.optim.utils import _filter_kwargs
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.test_helpers import DummyNonScalarizingPosteriorTransform
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from gpytorch.distributions import MultitaskMultivariateNormal

from .test_monte_carlo import DummyNonScalarizingPosteriorTransform

NO = "botorch.utils.testing.MockModel.num_outputs"


Expand Down
12 changes: 1 addition & 11 deletions test/acquisition/test_monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@
ConstrainedMCObjective,
GenericMCObjective,
IdentityMCObjective,
PosteriorTransform,
ScalarizedPosteriorTransform,
)
from botorch.acquisition.utils import prune_inferior_points
from botorch.exceptions import BotorchWarning, UnsupportedError
from botorch.models import SingleTaskGP
from botorch.sampling.normal import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.low_rank import sample_cached_cholesky
from botorch.utils.test_helpers import DummyNonScalarizingPosteriorTransform
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
from botorch.utils.transforms import standardize
from torch import Tensor
Expand All @@ -49,16 +49,6 @@ def _sample_forward(self, X):
pass


class DummyNonScalarizingPosteriorTransform(PosteriorTransform):
scalarize = False

def evaluate(self, Y):
pass # pragma: no cover

def forward(self, posterior):
pass # pragma: no cover


def infeasible_con(samples: Tensor) -> Tensor:
return torch.ones_like(samples[..., 0])

Expand Down
3 changes: 1 addition & 2 deletions test/models/test_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,12 @@
)
from botorch.models.transforms.input import AppendFeatures, Normalize
from botorch.models.transforms.outcome import Standardize
from botorch.utils.test_helpers import SimpleGPyTorchModel
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import RBFKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood

from .test_gpytorch import SimpleGPyTorchModel


class TestConverters(BotorchTestCase):
def test_batched_to_model_list(self):
Expand Down
5 changes: 2 additions & 3 deletions test/models/test_fully_bayesian_multitask.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,13 @@
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
NondominatedPartitioning,
)
from botorch.utils.test_helpers import gen_multi_task_dataset
from botorch.utils.testing import BotorchTestCase
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood
from gpytorch.likelihoods.gaussian_likelihood import GaussianLikelihood
from gpytorch.means import ConstantMean

from .test_multitask import _gen_multi_task_dataset

EXPECTED_KEYS = [
"latent_features",
"mean_module.raw_constant",
Expand Down Expand Up @@ -566,7 +565,7 @@ def test_construct_inputs(self):
for dtype, infer_noise in [(torch.float, False), (torch.double, True)]:
tkwargs = {"device": self.device, "dtype": dtype}
task_feature = 0
datasets, (train_X, train_Y, train_Yvar) = _gen_multi_task_dataset(
datasets, (train_X, train_Y, train_Yvar) = gen_multi_task_dataset(
yvar=None if infer_noise else 0.05, **tkwargs
)

Expand Down
20 changes: 3 additions & 17 deletions test/models/test_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
)
from botorch.models.transforms import Normalize, Standardize
from botorch.models.transforms.input import InputStandardize
from botorch.models.utils import add_output_dim
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.sampling import manual_seed
from botorch.utils.test_helpers import get_pvar_expected
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import (
Expand Down Expand Up @@ -142,7 +142,7 @@ def test_gp(self, double_only: bool = False):
self.assertAllClose(posterior_pred.variance, expected_var)
else:
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
pvar_exp = get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)

# Tensor valued observation noise.
Expand Down Expand Up @@ -176,7 +176,7 @@ def test_gp(self, double_only: bool = False):
self.assertAllClose(posterior_pred.variance, expected_var)
else:
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
pvar_exp = get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)

def test_custom_init(self):
Expand Down Expand Up @@ -599,17 +599,3 @@ def test_condition_on_observations(self):
def test_subset_model(self):
with self.assertRaises(NotImplementedError):
super().test_subset_model()


def _get_pvar_expected(posterior, model, X, m):
X = model.transform_inputs(X)
lh_kwargs = {}
if isinstance(model.likelihood, FixedNoiseGaussianLikelihood):
lh_kwargs["noise"] = model.likelihood.noise.mean().expand(X.shape[:-1])
if m == 1:
return model.likelihood(
posterior.distribution, X, **lh_kwargs
).variance.unsqueeze(-1)
X_, odi = add_output_dim(X=X, original_batch_shape=model._input_batch_shape)
pvar_exp = model.likelihood(model(X_), X_, **lh_kwargs).variance
return torch.stack([pvar_exp.select(dim=odi, index=i) for i in range(m)], dim=-1)
7 changes: 3 additions & 4 deletions test/models/test_gp_regression_mixed.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from botorch.posteriors import GPyTorchPosterior
from botorch.sampling import SobolQMCNormalSampler
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.test_helpers import get_pvar_expected
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.kernels.kernel import AdditiveKernel, ProductKernel
from gpytorch.kernels.matern_kernel import MaternKernel
Expand All @@ -26,8 +27,6 @@
from gpytorch.means import ConstantMean
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood

from .test_gp_regression import _get_pvar_expected


class TestMixedSingleTaskGP(BotorchTestCase):
observed_noise = False
Expand Down Expand Up @@ -119,7 +118,7 @@ def test_gp(self):
self.assertEqual(posterior_pred.mean.shape, expected_shape)
self.assertEqual(posterior_pred.variance.shape, expected_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
pvar_exp = get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)

# test batch evaluation
Expand All @@ -133,7 +132,7 @@ def test_gp(self):
self.assertIsInstance(posterior_pred, GPyTorchPosterior)
self.assertEqual(posterior_pred.mean.shape, expected_shape)
pvar = posterior_pred.variance
pvar_exp = _get_pvar_expected(posterior, model, X, m)
pvar_exp = get_pvar_expected(posterior, model, X, m)
self.assertAllClose(pvar, pvar_exp, rtol=1e-4, atol=1e-5)

# test that model converter throws an exception
Expand Down
Loading

0 comments on commit 504ccea

Please sign in to comment.