Skip to content

Commit

Permalink
Merge branch 'main' into python_version_39
Browse files Browse the repository at this point in the history
  • Loading branch information
esantorella committed Jul 25, 2023
2 parents ec5a213 + 71fd34e commit bb9c29b
Show file tree
Hide file tree
Showing 30 changed files with 3,100 additions and 227 deletions.
14 changes: 14 additions & 0 deletions botorch/acquisition/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
AnalyticAcquisitionFunction,
ConstrainedExpectedImprovement,
ExpectedImprovement,
LogExpectedImprovement,
LogNoisyExpectedImprovement,
NoisyExpectedImprovement,
PosteriorMean,
ProbabilityOfImprovement,
Expand All @@ -32,6 +34,11 @@
qKnowledgeGradient,
qMultiFidelityKnowledgeGradient,
)
from botorch.acquisition.logei import (
LogImprovementMCAcquisitionFunction,
qLogExpectedImprovement,
qLogNoisyExpectedImprovement,
)
from botorch.acquisition.max_value_entropy_search import (
MaxValueBase,
qLowerBoundMaxValueEntropy,
Expand All @@ -46,6 +53,7 @@
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
SampleReducingMCAcquisitionFunction,
)
from botorch.acquisition.multi_step_lookahead import qMultiStepLookahead
from botorch.acquisition.objective import (
Expand All @@ -71,6 +79,8 @@
"AnalyticExpectedUtilityOfBestOption",
"ConstrainedExpectedImprovement",
"ExpectedImprovement",
"LogExpectedImprovement",
"LogNoisyExpectedImprovement",
"FixedFeatureAcquisitionFunction",
"GenericCostAwareUtility",
"InverseCostWeightedUtility",
Expand All @@ -85,6 +95,9 @@
"UpperConfidenceBound",
"qAnalyticProbabilityOfImprovement",
"qExpectedImprovement",
"LogImprovementMCAcquisitionFunction",
"qLogExpectedImprovement",
"qLogNoisyExpectedImprovement",
"qKnowledgeGradient",
"MaxValueBase",
"qMultiFidelityKnowledgeGradient",
Expand All @@ -104,6 +117,7 @@
"LearnedObjective",
"LinearMCObjective",
"MCAcquisitionFunction",
"SampleReducingMCAcquisitionFunction",
"MCAcquisitionObjective",
"ScalarizedPosteriorTransform",
"get_acquisition_function",
Expand Down
49 changes: 38 additions & 11 deletions botorch/acquisition/fixed_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,35 @@
from torch.nn import Module


def get_dtype_of_sequence(values: Sequence[Union[Tensor, float]]) -> torch.dtype:
"""
Return torch.float32 if everything is single-precision and torch.float64
otherwise.
Numbers (non-tensors) are double-precision.
"""

def _is_single(value: Union[Tensor, float]) -> bool:
return isinstance(value, Tensor) and value.dtype == torch.float32

all_single_precision = all(_is_single(value) for value in values)
return torch.float32 if all_single_precision else torch.float64


def get_device_of_sequence(values: Sequence[Union[Tensor, float]]) -> torch.dtype:
"""
CPU if everything is on the CPU; Cuda otherwise.
Numbers (non-tensors) are considered to be on the CPU.
"""

def _is_cuda(value: Union[Tensor, float]) -> bool:
return hasattr(value, "device") and value.device == torch.device("cuda")

any_cuda = any(_is_cuda(value) for value in values)
return torch.device("cuda") if any_cuda else torch.device("cpu")


class FixedFeatureAcquisitionFunction(AcquisitionFunction):
"""A wrapper around AquisitionFunctions to fix a subset of features.
Expand Down Expand Up @@ -58,27 +87,25 @@ def __init__(
"""
Module.__init__(self)
self.acq_func = acq_function
dtype = torch.float
device = torch.device("cpu")
self.d = d

if isinstance(values, Tensor):
new_values = values.detach().clone()
else:

dtype = get_dtype_of_sequence(values)
device = get_device_of_sequence(values)

new_values = []
for value in values:
if isinstance(value, Number):
new_values.append(torch.tensor([float(value)]))
value = torch.tensor([value], dtype=dtype)
else:
# if any value uses double, use double for all values
# likewise if any value uses cuda, use cuda for all values
dtype = value.dtype if value.dtype == torch.double else dtype
device = value.device if value.device.type == "cuda" else device
if value.ndim == 0: # since we can't broadcast with zero-d tensors
value = value.unsqueeze(0)
new_values.append(value.detach().clone())
# move all values to same device
for i, val in enumerate(new_values):
new_values[i] = val.to(dtype=dtype, device=device)
value = value.detach().clone()

new_values.append(value.to(dtype=dtype, device=device))

# There are 3 cases for when `values` is a `Sequence`.
# 1) `values` == list of floats as earlier.
Expand Down
Loading

0 comments on commit bb9c29b

Please sign in to comment.