Skip to content

Commit

Permalink
Delete some unneeded tests (#2766)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2766

Context: There used to be more `BenchmarkProblem` subclasses, and they used to implement their own '__repr__' methods, so there were tests for the custom repr methods. Now `BenchmarkProblem` and its subclass `SurrogateBenchmarkProblem` get their `__repr__` methods from being data classes. These tests have become annoying because they break with any change to `BenchmarkProblem, even if just changing the order of arguments.

This PR:
* Removes two `test_repr` methods.

Reviewed By: Balandat

Differential Revision: D62518032

fbshipit-source-id: aa3374645196e7ff922bc13205cbd1095460acef
  • Loading branch information
esantorella authored and facebook-github-bot committed Sep 13, 2024
1 parent acc9df2 commit 81c945f
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 39 deletions.
17 changes: 0 additions & 17 deletions ax/benchmark/tests/problems/test_surrogate_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@
class TestSurrogateProblems(TestCase):
def setUp(self) -> None:
super().setUp()
# print max output so errors in 'repr' can be fully shown
self.maxDiff = None

def test_conforms_to_api(self) -> None:
sbp = get_soo_surrogate()
Expand All @@ -26,21 +24,6 @@ def test_conforms_to_api(self) -> None:
mbp = get_moo_surrogate()
self.assertIsInstance(mbp, BenchmarkProblem)

def test_repr(self) -> None:

sbp = get_soo_surrogate()

expected_repr = (
"SurrogateBenchmarkProblem(name='test', "
"optimization_config=OptimizationConfig(objective=Objective(metric_name="
'"branin", '
"minimize=True), "
"outcome_constraints=[]), num_trials=6, "
"observe_noise_stds=True, "
"optimal_value=0.0)"
)
self.assertEqual(repr(sbp), expected_repr)

def test_compute_score_trace(self) -> None:
soo_problem = get_soo_surrogate()
score_trace = compute_score_trace(
Expand Down
21 changes: 0 additions & 21 deletions ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,15 +149,6 @@ def test_single_objective_from_botorch(self) -> None:
self.assertEqual(
test_problem.optimization_config.outcome_constraints, []
)
expected_repr = (
"BenchmarkProblem(name='Ackley', "
"optimization_config=OptimizationConfig(objective=Objective("
'metric_name="Ackley", '
"minimize=True), outcome_constraints=[]), "
"num_trials=1, "
"observe_noise_stds=False, "
"optimal_value=0.0)"
)
else:
outcome_constraint = (
test_problem.optimization_config.outcome_constraints[0]
Expand All @@ -166,18 +157,6 @@ def test_single_objective_from_botorch(self) -> None:
self.assertEqual(outcome_constraint.op, ComparisonOp.GEQ)
self.assertFalse(outcome_constraint.relative)
self.assertEqual(outcome_constraint.bound, 0.0)
expected_repr = (
"BenchmarkProblem(name='ConstrainedHartmann', "
"optimization_config=OptimizationConfig(objective=Objective("
'metric_name="ConstrainedHartmann", minimize=True), '
"outcome_constraints=[OutcomeConstraint(constraint_slack_0"
" >= 0.0)]), "
"num_trials=1, "
"observe_noise_stds=False, "
"optimal_value=-3.32237)"
)

self.assertEqual(repr(test_problem), expected_repr)

def _test_constrained_from_botorch(
self,
Expand Down
2 changes: 1 addition & 1 deletion ax/service/utils/best_point.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def get_best_parameters_from_model_predictions_with_trial_index(
except ValueError:
return _gr_to_prediction_with_trial_index(idx, gr)

# If model is not TorchModelBridge, just use the best arm frmo the
# If model is not TorchModelBridge, just use the best arm from the
# last good generator run
if not isinstance(model, TorchModelBridge):
return _gr_to_prediction_with_trial_index(idx, gr)
Expand Down

0 comments on commit 81c945f

Please sign in to comment.