Skip to content

Commit

Permalink
Merge 24a778d into 04e235d
Browse files Browse the repository at this point in the history
  • Loading branch information
jduerholt authored Apr 24, 2023
2 parents 04e235d + 24a778d commit 21a260c
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 30 deletions.
7 changes: 1 addition & 6 deletions botorch/generation/gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,16 +213,11 @@ def f_np_wrapper(x: np.ndarray, f: Callable):
return fval

if nonlinear_inequality_constraints:
# Make sure `batch_limit` is 1 for now.
if not (len(shapeX) == 3 and shapeX[:2] == torch.Size([1, 1])):
raise ValueError(
"`batch_limit` must be 1 when non-linear inequality constraints "
"are given."
)
constraints += make_scipy_nonlinear_inequality_constraints(
nonlinear_inequality_constraints=nonlinear_inequality_constraints,
f_np_wrapper=f_np_wrapper,
x0=x0,
shapeX=shapeX,
)
x0 = _arrayify(x0)

Expand Down
53 changes: 43 additions & 10 deletions botorch/optim/parameter_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,33 @@ def _make_linear_constraints(
return constraints


def _make_nonlinear_constraints(
f_np_wrapper: Callable, nlc: Callable, shapeX: torch.Size
) -> List:
shapeX = _validate_linear_constraints_shape_input(shapeX)
b, q, _ = shapeX
constraints = []

def get_interpoint_constraint(b: int, q: int, nlc: Callable) -> Callable:
return lambda x: nlc(x[b, q])

for i in range(b):
for j in range(q):
f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints(
f_np_wrapper=f_np_wrapper,
nlc=get_interpoint_constraint(b=i, q=j, nlc=nlc),
)
constraints.append(
{
"type": "ineq",
"fun": f_obj,
"jac": f_grad,
}
)

return constraints


def _generate_unfixed_lin_constraints(
constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
fixed_features: Dict[int, float],
Expand Down Expand Up @@ -381,7 +408,7 @@ def _make_f_and_grad_nonlinear_inequality_constraints(
"""
Create callables for objective + grad for the nonlinear inequality constraints.
The Scipy interface requires specifying separate callables and we use caching to
avoid evaluating the same input twice. This caching onlh works if
avoid evaluating the same input twice. This caching only works if
the returned functions are evaluated on the same input in immediate
sequence (i.e., calling `f_obj(X_1)`, `f_grad(X_1)` will result in a
single forward pass, while `f_obj(X_1)`, `f_grad(X_2)`, `f_obj(X_1)`
Expand Down Expand Up @@ -415,6 +442,7 @@ def make_scipy_nonlinear_inequality_constraints(
nonlinear_inequality_constraints: List[Callable],
f_np_wrapper: Callable,
x0: Tensor,
shapeX: torch.Size,
) -> List[Dict]:
r"""Generate Scipy nonlinear inequality constraints from callables.
Expand Down Expand Up @@ -447,14 +475,19 @@ def make_scipy_nonlinear_inequality_constraints(
"`batch_initial_conditions` must satisfy the non-linear inequality "
"constraints."
)
f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints(
f_np_wrapper=f_np_wrapper, nlc=nlc
)
scipy_nonlinear_inequality_constraints.append(
{
"type": "ineq",
"fun": f_obj,
"jac": f_grad,
}

scipy_nonlinear_inequality_constraints += _make_nonlinear_constraints(
f_np_wrapper=f_np_wrapper, nlc=nlc, shapeX=shapeX
)

# f_obj, f_grad = _make_f_and_grad_nonlinear_inequality_constraints(
# f_np_wrapper=f_np_wrapper, nlc=nlc
# )
# scipy_nonlinear_inequality_constraints.append(
# {
# "type": "ineq",
# "fun": f_obj,
# "jac": f_grad,
# }
# )
return scipy_nonlinear_inequality_constraints
28 changes: 14 additions & 14 deletions test/optim/test_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -850,20 +850,20 @@ def nlc4(x):
batch_initial_conditions=4 * torch.ones(1, 1, 3, **tkwargs),
)
# Explicitly setting batch_limit to be >1 should raise
with self.assertRaisesRegex(
ValueError,
"`batch_limit` must be 1 when non-linear inequality constraints "
"are given.",
):
optimize_acqf(
acq_function=mock_acq_function,
bounds=bounds,
q=1,
nonlinear_inequality_constraints=[nlc1],
batch_initial_conditions=torch.rand(5, 1, 3, **tkwargs),
num_restarts=5,
options={"batch_limit": 5},
)
# with self.assertRaisesRegex(
# ValueError,
# "`batch_limit` must be 1 when non-linear inequality constraints "
# "are given.",
# ):
# optimize_acqf(
# acq_function=mock_acq_function,
# bounds=bounds,
# q=1,
# nonlinear_inequality_constraints=[nlc1],
# batch_initial_conditions=torch.rand(5, 1, 3, **tkwargs),
# num_restarts=5,
# options={"batch_limit": 5},
# )
# If there are non-linear inequality constraints an initial condition
# generator object `ic_generator` must be supplied.
with self.assertRaisesRegex(
Expand Down

0 comments on commit 21a260c

Please sign in to comment.