diff --git a/botorch/models/gp_regression.py b/botorch/models/gp_regression.py index 4abf32e663..71783f5726 100644 --- a/botorch/models/gp_regression.py +++ b/botorch/models/gp_regression.py @@ -40,8 +40,8 @@ from botorch.models.transforms.outcome import Log, OutcomeTransform from botorch.models.utils import validate_input_scaling from botorch.models.utils.gpytorch_modules import ( - get_gaussian_likelihood_with_gamma_prior, - get_matern_kernel_with_gamma_prior, + get_covar_module_with_dim_scaled_prior, + get_gaussian_likelihood_with_lognormal_prior, MIN_INFERRED_NOISE_LEVEL, ) from botorch.utils.containers import BotorchContainer @@ -174,7 +174,7 @@ def __init__( ) if likelihood is None: if train_Yvar is None: - likelihood = get_gaussian_likelihood_with_gamma_prior( + likelihood = get_gaussian_likelihood_with_lognormal_prior( batch_shape=self._aug_batch_shape ) else: @@ -190,14 +190,13 @@ def __init__( mean_module = ConstantMean(batch_shape=self._aug_batch_shape) self.mean_module = mean_module if covar_module is None: - covar_module = get_matern_kernel_with_gamma_prior( + covar_module = get_covar_module_with_dim_scaled_prior( ard_num_dims=transformed_X.shape[-1], batch_shape=self._aug_batch_shape, ) self._subset_batch_dict = { "mean_module.raw_constant": -1, - "covar_module.raw_outputscale": -1, - "covar_module.base_kernel.raw_lengthscale": -3, + "covar_module.raw_lengthscale": -3, } if train_Yvar is None: self._subset_batch_dict["likelihood.noise_covar.raw_noise"] = -2 diff --git a/botorch/utils/gp_sampling.py b/botorch/utils/gp_sampling.py index 008be0d2b1..c5ab09b981 100644 --- a/botorch/utils/gp_sampling.py +++ b/botorch/utils/gp_sampling.py @@ -143,10 +143,9 @@ def __init__( """ if not isinstance(kernel, ScaleKernel): base_kernel = kernel - outputscale = torch.tensor( - 1.0, - dtype=base_kernel.lengthscale.dtype, - device=base_kernel.lengthscale.device, + outputscale = torch.ones(kernel.batch_shape).to( + dtype=kernel.lengthscale.dtype, + device=kernel.lengthscale.device, ) else: base_kernel = kernel.base_kernel diff --git a/test/models/test_converter.py b/test/models/test_converter.py index f36d2e6e7e..d380c01fb9 100644 --- a/test/models/test_converter.py +++ b/test/models/test_converter.py @@ -23,7 +23,7 @@ from botorch.models.transforms.outcome import Standardize from botorch.utils.test_helpers import SimpleGPyTorchModel from botorch.utils.testing import BotorchTestCase -from gpytorch.kernels import RBFKernel +from gpytorch.kernels import MaternKernel, RBFKernel from gpytorch.likelihoods import GaussianLikelihood from gpytorch.likelihoods.gaussian_likelihood import FixedNoiseGaussianLikelihood from gpytorch.priors import LogNormalPrior @@ -134,13 +134,17 @@ def test_model_list_to_batched(self): model_list_to_batched(ModelListGP(gp1, gp2)) # check scalar agreement gp2 = SingleTaskGP(train_X, train_Y2) - gp2.likelihood.noise_covar.noise_prior.rate.fill_(1.0) + + # modified to check the scalar agreement in a parameter that is accessible + # since the error is going to slip through for the non-parametrizable + # priors regardless (like the LogNormal) + gp2.likelihood.noise_covar.raw_noise_constraint.lower_bound.fill_(1e-3) with self.assertRaises(UnsupportedError): model_list_to_batched(ModelListGP(gp1, gp2)) # check tensor shape agreement gp2 = SingleTaskGP(train_X, train_Y2) - gp2.covar_module.raw_outputscale = torch.nn.Parameter( - torch.tensor([0.0], device=self.device, dtype=dtype) + gp2.likelihood.noise_covar.raw_noise = torch.nn.Parameter( + torch.tensor([[0.42]], device=self.device, dtype=dtype) ) with self.assertRaises(UnsupportedError): model_list_to_batched(ModelListGP(gp1, gp2)) @@ -155,14 +159,15 @@ def test_model_list_to_batched(self): with self.assertRaises(NotImplementedError): model_list_to_batched(ModelListGP(gp2)) # test non-default kernel - gp1 = SingleTaskGP(train_X, train_Y1, covar_module=RBFKernel()) - gp2 = SingleTaskGP(train_X, train_Y2, covar_module=RBFKernel()) + gp1 = SingleTaskGP(train_X, train_Y1, covar_module=MaternKernel()) + gp2 = SingleTaskGP(train_X, train_Y2, covar_module=MaternKernel()) list_gp = ModelListGP(gp1, gp2) batch_gp = model_list_to_batched(list_gp) - self.assertEqual(type(batch_gp.covar_module), RBFKernel) + self.assertEqual(type(batch_gp.covar_module), MaternKernel) # test error when component GPs have different kernel types - gp1 = SingleTaskGP(train_X, train_Y1, covar_module=RBFKernel()) - gp2 = SingleTaskGP(train_X, train_Y2) + # added types for both default and non-default kernels for clarity + gp1 = SingleTaskGP(train_X, train_Y1, covar_module=MaternKernel()) + gp2 = SingleTaskGP(train_X, train_Y2, covar_module=RBFKernel()) list_gp = ModelListGP(gp1, gp2) with self.assertRaises(UnsupportedError): model_list_to_batched(list_gp) diff --git a/test/models/test_deterministic.py b/test/models/test_deterministic.py index edb7de22b5..794ef08e78 100644 --- a/test/models/test_deterministic.py +++ b/test/models/test_deterministic.py @@ -172,7 +172,7 @@ def test_FixedSingleSampleModel(self): post = model.posterior(test_X) original_output = post.mean + post.variance.sqrt() * w fss_output = fss_model(test_X) - self.assertTrue(torch.equal(original_output, fss_output)) + self.assertAllClose(original_output, fss_output) self.assertTrue(hasattr(fss_model, "num_outputs")) diff --git a/test/models/test_gp_regression.py b/test/models/test_gp_regression.py index 0f6f4056ee..547598a432 100644 --- a/test/models/test_gp_regression.py +++ b/test/models/test_gp_regression.py @@ -23,7 +23,7 @@ from botorch.utils.sampling import manual_seed from botorch.utils.test_helpers import get_pvar_expected from botorch.utils.testing import _get_random_data, BotorchTestCase -from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel +from gpytorch.kernels import RBFKernel from gpytorch.likelihoods import ( _GaussianLikelihoodBase, FixedNoiseGaussianLikelihood, @@ -33,7 +33,7 @@ from gpytorch.means import ConstantMean, ZeroMean from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood from gpytorch.mlls.noise_model_added_loss_term import NoiseModelAddedLossTerm -from gpytorch.priors import GammaPrior +from gpytorch.priors import LogNormalPrior class TestGPRegressionBase(BotorchTestCase): @@ -96,10 +96,10 @@ def test_gp(self, double_only: bool = False): # test init self.assertIsInstance(model.mean_module, ConstantMean) - self.assertIsInstance(model.covar_module, ScaleKernel) - matern_kernel = model.covar_module.base_kernel - self.assertIsInstance(matern_kernel, MaternKernel) - self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior) + self.assertIsInstance(model.covar_module, RBFKernel) + rbf_kernel = model.covar_module + self.assertIsInstance(rbf_kernel, RBFKernel) + self.assertIsInstance(rbf_kernel.lengthscale_prior, LogNormalPrior) if use_octf: self.assertIsInstance(model.outcome_transform, Standardize) if use_intf: diff --git a/test/models/test_model_list_gp_regression.py b/test/models/test_model_list_gp_regression.py index ee1e19dad7..148f8dcf43 100644 --- a/test/models/test_model_list_gp_regression.py +++ b/test/models/test_model_list_gp_regression.py @@ -25,7 +25,7 @@ from botorch.sampling.normal import IIDNormalSampler from botorch.utils.testing import _get_random_data, BotorchTestCase from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal -from gpytorch.kernels import MaternKernel, ScaleKernel +from gpytorch.kernels import RBFKernel from gpytorch.likelihoods import LikelihoodList from gpytorch.likelihoods.gaussian_likelihood import ( FixedNoiseGaussianLikelihood, @@ -34,7 +34,7 @@ from gpytorch.means import ConstantMean from gpytorch.mlls import SumMarginalLogLikelihood from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood -from gpytorch.priors import GammaPrior +from gpytorch.priors import LogNormalPrior from torch import Tensor @@ -104,10 +104,8 @@ def _base_test_ModelListGP( self.assertEqual(model.num_outputs, 2) for m in model.models: self.assertIsInstance(m.mean_module, ConstantMean) - self.assertIsInstance(m.covar_module, ScaleKernel) - matern_kernel = m.covar_module.base_kernel - self.assertIsInstance(matern_kernel, MaternKernel) - self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior) + self.assertIsInstance(m.covar_module, RBFKernel) + self.assertIsInstance(m.covar_module.lengthscale_prior, LogNormalPrior) if outcome_transform != "None": self.assertIsInstance( m.outcome_transform, (Log, Standardize, ChainedOutcomeTransform) diff --git a/test/optim/test_fit.py b/test/optim/test_fit.py index 69775f3385..55bcf33908 100644 --- a/test/optim/test_fit.py +++ b/test/optim/test_fit.py @@ -29,9 +29,9 @@ def setUp(self) -> None: self.mlls = {} with torch.random.fork_rng(): torch.manual_seed(0) - train_X = torch.linspace(0, 1, 10).unsqueeze(-1) - train_Y = torch.sin((2 * math.pi) * train_X) - train_Y = train_Y + 0.1 * torch.randn_like(train_Y) + train_X = torch.linspace(0, 1, 30).unsqueeze(-1) + train_Y = torch.sin((6 * math.pi) * train_X) + train_Y = train_Y + 0.01 * torch.randn_like(train_Y) model = SingleTaskGP( train_X=train_X, diff --git a/test/optim/utils/test_model_utils.py b/test/optim/utils/test_model_utils.py index d1a0bb132c..a0ab24c222 100644 --- a/test/optim/utils/test_model_utils.py +++ b/test/optim/utils/test_model_utils.py @@ -6,6 +6,7 @@ from __future__ import annotations +import itertools import re import warnings from copy import deepcopy @@ -16,6 +17,10 @@ import torch from botorch import settings from botorch.models import SingleTaskGP +from botorch.models.utils.gpytorch_modules import ( + get_covar_module_with_dim_scaled_prior, + get_matern_kernel_with_gamma_prior, +) from botorch.optim.utils import ( get_data_loader, get_name_filter, @@ -158,10 +163,18 @@ def test__get_name_filter(self) -> None: class TestSampleAllPriors(BotorchTestCase): def test_sample_all_priors(self): - for dtype in (torch.float, torch.double): + for dtype, covar_module in itertools.product( + (torch.float, torch.double), + ( + get_covar_module_with_dim_scaled_prior(ard_num_dims=5), + get_matern_kernel_with_gamma_prior(ard_num_dims=5), + ), + ): train_X = torch.rand(3, 5, device=self.device, dtype=dtype) train_Y = torch.rand(3, 1, device=self.device, dtype=dtype) - model = SingleTaskGP(train_X=train_X, train_Y=train_Y) + model = SingleTaskGP( + train_X=train_X, train_Y=train_Y, covar_module=covar_module + ) mll = ExactMarginalLogLikelihood(model.likelihood, model) mll.to(device=self.device, dtype=dtype) original_state_dict = dict(deepcopy(mll.model.state_dict())) @@ -173,7 +186,10 @@ def test_sample_all_priors(self): != original_state_dict["likelihood.noise_covar.raw_noise"] ) # check that lengthscales are all different - ls = model.covar_module.base_kernel.raw_lengthscale.view(-1).tolist() + if isinstance(model.covar_module, ScaleKernel): + ls = model.covar_module.base_kernel.raw_lengthscale.view(-1).tolist() + else: + ls = model.covar_module.raw_lengthscale.view(-1).tolist() self.assertTrue(all(ls[0] != ls[i]) for i in range(1, len(ls))) # change one of the priors to a dummy prior that does not support sampling diff --git a/test/test_fit.py b/test/test_fit.py index 9e9ac6d644..4541abe315 100644 --- a/test/test_fit.py +++ b/test/test_fit.py @@ -27,7 +27,7 @@ from botorch.settings import debug from botorch.utils.context_managers import module_rollback_ctx, TensorCheckpoint from botorch.utils.testing import BotorchTestCase -from gpytorch.kernels import MaternKernel +from gpytorch.kernels import RBFKernel from gpytorch.mlls import ExactMarginalLogLikelihood, VariationalELBO from linear_operator.utils.errors import NotPSDError @@ -136,8 +136,7 @@ def setUp(self, suppress_input_warnings: bool = True) -> None: input_transform=Normalize(d=1), outcome_transform=Standardize(m=output_dim), ) - self.assertIsInstance(model.covar_module.base_kernel, MaternKernel) - model.covar_module.base_kernel.nu = 2.5 + self.assertIsInstance(model.covar_module, RBFKernel) mll = ExactMarginalLogLikelihood(model.likelihood, model) for dtype in (torch.float32, torch.float64): diff --git a/tutorials/constraint_active_search.ipynb b/tutorials/constraint_active_search.ipynb index 8c36a56c7e..8e9c694920 100644 --- a/tutorials/constraint_active_search.ipynb +++ b/tutorials/constraint_active_search.ipynb @@ -249,7 +249,7 @@ " return radius * r * z\n", "\n", " def _get_base_point_mask(self, X):\n", - " distance_matrix = self.model.models[0].covar_module.base_kernel.covar_dist(\n", + " distance_matrix = self.model.models[0].covar_module.covar_dist(\n", " X, self.base_points\n", " )\n", " return smooth_mask(distance_matrix, self.punchout_radius)\n", @@ -676,9 +676,18 @@ "\n", "\n", "fig, ax = plt.subplots(figsize=(8, 6))\n", - "h1 = ax.contourf(Xplt.cpu().numpy(), Yplt.cpu().numpy(), Zplt.cpu().numpy(), 20, cmap=\"Blues\", alpha=0.6)\n", + "h1 = ax.contourf(\n", + " Xplt.cpu().numpy(),\n", + " Yplt.cpu().numpy(),\n", + " Zplt.cpu().numpy(),\n", + " 20,\n", + " cmap=\"Blues\",\n", + " alpha=0.6,\n", + ")\n", "fig.colorbar(h1)\n", - "ax.contour(Xplt.cpu().numpy(), Yplt.cpu().numpy(), Zplt.cpu().numpy(), [0.55, 0.75], colors=\"k\")\n", + "ax.contour(\n", + " Xplt.cpu().numpy(), Yplt.cpu().numpy(), Zplt.cpu().numpy(), [0.55, 0.75], colors=\"k\"\n", + ")\n", "\n", "feasible_inds = (\n", " identify_samples_which_satisfy_constraints(Y, constraints)\n", @@ -715,10 +724,12 @@ ], "metadata": { "fileHeader": "", + "fileUid": "cb282d47-f143-4c00-9ae1-b631e97daddb", + "isAdHoc": false, "kernelspec": { - "display_name": "python3", + "display_name": "Python 3", "language": "python", - "name": "python3" + "name": "bento_kernel_default" }, "language_info": { "codemirror_mode": { @@ -732,7 +743,5 @@ "pygments_lexer": "ipython3", "version": "3.9.13" } - }, - "nbformat": 4, - "nbformat_minor": 2 + } } diff --git a/tutorials/fit_model_with_torch_optimizer.ipynb b/tutorials/fit_model_with_torch_optimizer.ipynb index 4d46338183..9b7426a9bf 100644 --- a/tutorials/fit_model_with_torch_optimizer.ipynb +++ b/tutorials/fit_model_with_torch_optimizer.ipynb @@ -20,6 +20,7 @@ "outputs": [], "source": [ "import math\n", + "\n", "import torch\n", "\n", "# use a GPU if available\n", @@ -190,7 +191,7 @@ " if (epoch + 1) % 10 == 0:\n", " print(\n", " f\"Epoch {epoch+1:>3}/{NUM_EPOCHS} - Loss: {loss.item():>4.3f} \"\n", - " f\"lengthscale: {model.covar_module.base_kernel.lengthscale.item():>4.3f} \"\n", + " f\"lengthscale: {model.covar_module.lengthscale.item():>4.3f} \"\n", " f\"noise: {model.likelihood.noise.item():>4.3f}\"\n", " )\n", " optimizer.step()" @@ -215,7 +216,7 @@ "outputs": [], "source": [ "# set model (and likelihood)\n", - "model.eval();" + "model.eval()" ] }, { @@ -309,12 +310,13 @@ } ], "metadata": { + "fileHeader": "", + "fileUid": "29414a8f-010b-41a8-837d-70d9294b809e", + "isAdHoc": false, "kernelspec": { - "display_name": "python3", + "display_name": "Python 3", "language": "python", - "name": "python3" + "name": "bento_kernel_default" } - }, - "nbformat": 4, - "nbformat_minor": 1 + } } diff --git a/tutorials/ibnn_bo.ipynb b/tutorials/ibnn_bo.ipynb index 9b17b40936..72dbf5963d 100644 --- a/tutorials/ibnn_bo.ipynb +++ b/tutorials/ibnn_bo.ipynb @@ -42,10 +42,6 @@ "\n", "import matplotlib.pyplot as plt\n", "import torch\n", - "from torch import nn\n", - "\n", - "from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel\n", - "from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\n", "\n", "from botorch import manual_seed\n", "from botorch.acquisition import ExpectedImprovement\n", @@ -56,7 +52,11 @@ "from botorch.optim.optimize import optimize_acqf\n", "from botorch.utils.sampling import draw_sobol_samples\n", "\n", - "warnings.filterwarnings('ignore')\n", + "from gpytorch.kernels import MaternKernel, RBFKernel, ScaleKernel\n", + "from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\n", + "from torch import nn\n", + "\n", + "warnings.filterwarnings(\"ignore\")\n", "\n", "%matplotlib inline\n", "\n", @@ -94,10 +94,13 @@ ], "source": [ "torch.manual_seed(1111)\n", + "\n", + "\n", "def f(x):\n", " x = -(x - 0.15)\n", " return torch.sin(x * (2 * torch.pi)) + torch.sin(x * (2 * torch.pi) * 2)\n", "\n", + "\n", "x = torch.linspace(0, 1, 100).to(dtype).unsqueeze(-1)\n", "true_y = f(x)\n", "\n", @@ -135,7 +138,9 @@ "model.eval()\n", "\n", "# I-BNN with optimized hyperparameters\n", - "model_optimize = SingleTaskGP(train_x, train_y, train_Yvar, covar_module=InfiniteWidthBNNKernel(depth=3))\n", + "model_optimize = SingleTaskGP(\n", + " train_x, train_y, train_Yvar, covar_module=InfiniteWidthBNNKernel(depth=3)\n", + ")\n", "mll = ExactMarginalLogLikelihood(model_optimize.likelihood, model_optimize)\n", "fit_gpytorch_mll(mll)\n", "model_optimize.eval()\n", @@ -144,7 +149,7 @@ "model_matern = SingleTaskGP(train_x, train_y, train_Yvar)\n", "mll_matern = ExactMarginalLogLikelihood(model_matern.likelihood, model_matern)\n", "fit_gpytorch_mll(mll_matern)\n", - "model_matern.eval();" + "model_matern.eval()" ] }, { @@ -162,21 +167,44 @@ "source": [ "def plot_posterior(ax, model, n_draws=5):\n", " with torch.no_grad():\n", - " ax.plot(x.cpu(), true_y.cpu(), linewidth=2, color=\"black\", label=\"True Objective\", linestyle=\"--\")\n", - " ax.scatter(train_x.cpu(), train_y.cpu(), color=\"black\", s=80, label=\"Observations\")\n", + " ax.plot(\n", + " x.cpu(),\n", + " true_y.cpu(),\n", + " linewidth=2,\n", + " color=\"black\",\n", + " label=\"True Objective\",\n", + " linestyle=\"--\",\n", + " )\n", + " ax.scatter(\n", + " train_x.cpu(), train_y.cpu(), color=\"black\", s=80, label=\"Observations\"\n", + " )\n", "\n", " test_x = torch.linspace(0, 1, 100).to(**tkwargs)\n", " pred_f = model(test_x)\n", "\n", " ax.plot(test_x.cpu(), pred_f.mean.cpu(), linewidth=2, label=\"Mean\")\n", " lower, upper = pred_f.confidence_region()\n", - " ax.fill_between(test_x.cpu(), lower.cpu(), upper.cpu(), alpha=0.2, label=r'$\\mu \\pm 2\\sigma$')\n", + " ax.fill_between(\n", + " test_x.cpu(),\n", + " lower.cpu(),\n", + " upper.cpu(),\n", + " alpha=0.2,\n", + " label=r\"$\\mu \\pm 2\\sigma$\",\n", + " )\n", "\n", " for i in range(n_draws):\n", " if i == 0:\n", - " ax.plot(test_x.cpu(), pred_f.sample().cpu(), color=\"green\", linewidth=0.5, label=\"Function Draw\")\n", + " ax.plot(\n", + " test_x.cpu(),\n", + " pred_f.sample().cpu(),\n", + " color=\"green\",\n", + " linewidth=0.5,\n", + " label=\"Function Draw\",\n", + " )\n", " else:\n", - " ax.plot(test_x.cpu(), pred_f.sample().cpu(), color=\"green\", linewidth=0.5)" + " ax.plot(\n", + " test_x.cpu(), pred_f.sample().cpu(), color=\"green\", linewidth=0.5\n", + " )" ] }, { @@ -199,22 +227,31 @@ "fig, axs = plt.subplots(1, 3, figsize=(18, 5))\n", "\n", "plot_posterior(axs[0], model)\n", - "axs[0].set_title(\"I-BNN (Fixed Hypers)\\nWeight Var: %.2f, Bias Var: %.2f\" % \n", - " (model.covar_module.weight_var.item(), model.covar_module.bias_var.item()), \n", - " fontsize=20)\n", + "axs[0].set_title(\n", + " \"I-BNN (Fixed Hypers)\\nWeight Var: %.2f, Bias Var: %.2f\"\n", + " % (model.covar_module.weight_var.item(), model.covar_module.bias_var.item()),\n", + " fontsize=20,\n", + ")\n", "axs[0].set_ylim(-7, 8)\n", "axs[0].legend()\n", "\n", "plot_posterior(axs[1], model_optimize)\n", - "axs[1].set_title(\"I-BNN (Optimized Hypers)\\nWeight Var: %.2f, Bias Var: %.2f\" % \n", - " (model_optimize.covar_module.weight_var.item(), model_optimize.covar_module.bias_var.item()),\n", - " fontsize=20)\n", + "axs[1].set_title(\n", + " \"I-BNN (Optimized Hypers)\\nWeight Var: %.2f, Bias Var: %.2f\"\n", + " % (\n", + " model_optimize.covar_module.weight_var.item(),\n", + " model_optimize.covar_module.bias_var.item(),\n", + " ),\n", + " fontsize=20,\n", + ")\n", "axs[1].set_ylim(-7, 8)\n", "\n", "plot_posterior(axs[2], model_matern)\n", - "axs[2].set_title(\"GP (Matern Kernel)\\nLength Scale: %.2f\" % \n", - " model_matern.covar_module.base_kernel.lengthscale.item(), \n", - " fontsize=20)\n", + "axs[2].set_title(\n", + " \"GP (RBF Kernel)\\nLength Scale: %.2f\"\n", + " % model_matern.covar_module.lengthscale.item(),\n", + " fontsize=20,\n", + ")\n", "axs[2].set_ylim(-7, 8)\n", "\n", "plt.show()" @@ -256,7 +293,7 @@ "fig, axs = plt.subplots(1, 4, figsize=(20, 4))\n", "\n", "for i, ax in enumerate(axs):\n", - " ibnn_kernel = InfiniteWidthBNNKernel(depth=(i+1), device=device)\n", + " ibnn_kernel = InfiniteWidthBNNKernel(depth=(i + 1), device=device)\n", " ibnn_kernel.weight_var = 10.0\n", " ibnn_kernel.bias_var = 2.0\n", "\n", @@ -289,12 +326,12 @@ "\n", "for i, ax in enumerate(axs):\n", " ibnn_kernel = InfiniteWidthBNNKernel(depth=3, device=device)\n", - " ibnn_kernel.weight_var = (i+1) * 5\n", + " ibnn_kernel.weight_var = (i + 1) * 5\n", " ibnn_kernel.bias_var = 2.0\n", "\n", " model = SingleTaskGP(train_x, train_y, train_Yvar, covar_module=ibnn_kernel).eval()\n", " plot_posterior(ax, model, n_draws=5)\n", - " ax.set_title(\"Weight Var: %.1f\" % ((i+1) * 5))\n", + " ax.set_title(\"Weight Var: %.1f\" % ((i + 1) * 5))\n", " ax.set_ylim(-10, 10)\n", " if i == 0:\n", " ax.legend()" @@ -326,7 +363,7 @@ "\n", " model = SingleTaskGP(train_x, train_y, train_Yvar, covar_module=ibnn_kernel).eval()\n", " plot_posterior(ax, model, n_draws=5)\n", - " ax.set_title(\"Bias Var: %.1f\" % ((i+1) * 5))\n", + " ax.set_title(\"Bias Var: %.1f\" % ((i + 1) * 5))\n", " ax.set_ylim(-5, 6)\n", " if i == 0:\n", " ax.legend()" @@ -362,12 +399,13 @@ " nn.ReLU(),\n", " nn.Linear(50, 50, dtype=torch.float64),\n", " nn.ReLU(),\n", - " nn.Linear(50, 1, dtype=torch.float64)\n", + " nn.Linear(50, 1, dtype=torch.float64),\n", " )\n", "\n", " def forward(self, x):\n", " return self.layers(x)\n", "\n", + "\n", "def create_f(input_dims, seed):\n", " # create MLP with weights and biases sampled from N(0, 1)\n", " with manual_seed(seed):\n", @@ -382,6 +420,7 @@ "\n", " return f\n", "\n", + "\n", "INPUT_DIMS = 200\n", "N_ITERATIONS = 100 if not SMOKE_TEST else 5\n", "N_INIT = 50 if not SMOKE_TEST else 2\n", @@ -405,7 +444,7 @@ "source": [ "def generate_initial_data(f, bounds, n, input_dims):\n", " train_x = draw_sobol_samples(bounds=bounds, n=n, q=1).to(**tkwargs)\n", - " train_x = train_x.squeeze(-2) # remove batch dimension\n", + " train_x = train_x.squeeze(-2) # remove batch dimension\n", " train_y = f(train_x)\n", " return train_x, train_y\n", "\n", @@ -415,9 +454,11 @@ " train_y = init_y.clone()\n", "\n", " for iteration in range(n_iterations):\n", - " \n", + "\n", " # fit model to data\n", - " model = SingleTaskGP(train_x, train_y, outcome_transform=Standardize(m=1), covar_module=kernel)\n", + " model = SingleTaskGP(\n", + " train_x, train_y, outcome_transform=Standardize(m=1), covar_module=kernel\n", + " )\n", " if optimize_hypers:\n", " mll = ExactMarginalLogLikelihood(model.likelihood, model)\n", " fit_gpytorch_mll(mll)\n", @@ -437,8 +478,7 @@ " train_x = torch.cat([train_x, candidate_x])\n", " train_y = torch.cat([train_y, f(candidate_x)])\n", "\n", - " return train_x, train_y\n", - " " + " return train_x, train_y" ] }, { @@ -467,9 +507,33 @@ "train_x, train_y = generate_initial_data(f, bounds, n=N_INIT, input_dims=INPUT_DIMS)\n", "\n", "# run BO loop\n", - "ibnn_x, ibnn_y = gp_bo_loop(f, bounds, train_x, train_y, ibnn_kernel, n_iterations=N_ITERATIONS, optimize_hypers=False)\n", - "matern_x, matern_y = gp_bo_loop(f, bounds, train_x, train_y, matern_kernel, n_iterations=N_ITERATIONS, optimize_hypers=True)\n", - "rbf_x, rbf_y = gp_bo_loop(f, bounds, train_x, train_y, rbf_kernel, n_iterations=N_ITERATIONS, optimize_hypers=True)" + "ibnn_x, ibnn_y = gp_bo_loop(\n", + " f,\n", + " bounds,\n", + " train_x,\n", + " train_y,\n", + " ibnn_kernel,\n", + " n_iterations=N_ITERATIONS,\n", + " optimize_hypers=False,\n", + ")\n", + "matern_x, matern_y = gp_bo_loop(\n", + " f,\n", + " bounds,\n", + " train_x,\n", + " train_y,\n", + " matern_kernel,\n", + " n_iterations=N_ITERATIONS,\n", + " optimize_hypers=True,\n", + ")\n", + "rbf_x, rbf_y = gp_bo_loop(\n", + " f,\n", + " bounds,\n", + " train_x,\n", + " train_y,\n", + " rbf_kernel,\n", + " n_iterations=N_ITERATIONS,\n", + " optimize_hypers=True,\n", + ")" ] }, { @@ -493,11 +557,12 @@ " cum_max = (torch.cummax(y, dim=0)[0]).cpu()\n", " plt.plot(range(len(cum_max)), cum_max, label=label)\n", "\n", + "\n", "plt.figure(figsize=(8, 6))\n", "\n", - "plot_cum_max(ibnn_y[N_INIT-1:], \"I-BNN\")\n", - "plot_cum_max(matern_y[N_INIT-1:], \"Matern\")\n", - "plot_cum_max(rbf_y[N_INIT-1:], \"RBF\")\n", + "plot_cum_max(ibnn_y[N_INIT - 1 :], \"I-BNN\")\n", + "plot_cum_max(matern_y[N_INIT - 1 :], \"Matern\")\n", + "plot_cum_max(rbf_y[N_INIT - 1 :], \"RBF\")\n", "\n", "plt.xlabel(\"BO Iterations\")\n", "plt.ylabel(\"Max Value\")\n", @@ -516,10 +581,12 @@ ], "metadata": { "fileHeader": "", + "fileUid": "3dc8ac81-18e1-41e8-8c4a-69d3574a9999", + "isAdHoc": false, "kernelspec": { - "display_name": "python3", + "display_name": "Python 3", "language": "python", - "name": "python3" + "name": "bento_kernel_default" }, "language_info": { "codemirror_mode": { @@ -533,7 +600,5 @@ "pygments_lexer": "ipython3", "version": "3.7.11" } - }, - "nbformat": 4, - "nbformat_minor": 2 + } }