From 8c1a73a7ad0c84406d60e554a3c0a515f28fe604 Mon Sep 17 00:00:00 2001 From: vizier-team Date: Mon, 11 Mar 2024 12:04:46 -0700 Subject: [PATCH] Internal clean up. PiperOrigin-RevId: 614747664 --- .../eagle_strategy_utils_test.py | 2 +- .../eagle_strategy/serialization_test.py | 6 ++++-- .../designers/scalarizing_designer.py | 4 ++-- .../designers/scalarizing_designer_test.py | 2 +- .../optimizers/vectorized_base_test.py | 16 ++++++++++---- .../algorithms/testing/comparator_runner.py | 8 ++++--- .../experimenters/combo_experimenter_test.py | 2 +- .../discretizing_experimenter_test.py | 3 ++- .../experimenters/hpob_experimenter_test.py | 21 ++++++++++++------- .../l1_categorical_experimenter_test.py | 8 +++++-- .../multiobjective_experimenter_test.py | 4 ++-- .../nasbench101_experimenter_test.py | 4 +++- .../nasbench201_experimenter_test.py | 2 +- .../experimenters/noisy_experimenter_test.py | 17 ++++++++------- .../normalizing_experimenter_test.py | 2 +- .../experimenters/numpy_experimenter_test.py | 5 +++-- .../shifting_experimenter_test.py | 10 +++++---- .../sign_flip_experimenter_test.py | 16 ++++++++------ .../experimenters/sparse_experimenter_test.py | 8 +++++-- .../surrogate_experimenter_test.py | 2 +- 20 files changed, 90 insertions(+), 52 deletions(-) diff --git a/vizier/_src/algorithms/designers/eagle_strategy/eagle_strategy_utils_test.py b/vizier/_src/algorithms/designers/eagle_strategy/eagle_strategy_utils_test.py index 75d3e5d06..79d503c98 100644 --- a/vizier/_src/algorithms/designers/eagle_strategy/eagle_strategy_utils_test.py +++ b/vizier/_src/algorithms/designers/eagle_strategy/eagle_strategy_utils_test.py @@ -281,7 +281,7 @@ def test_replace_trial_metric_name(self): trial.complete(measurement=vz.Measurement(metrics={'obj123': 1123.3})) new_trial = utils.standardize_trial_metric_name(trial) self.assertEqual( - new_trial.final_measurement.metrics['objective'].value, 1123.3 + new_trial.final_measurement_or_die.metrics['objective'].value, 1123.3 ) self.assertEqual(new_trial.parameters['f1'].value, 0.0) self.assertEqual(new_trial.metadata.ns('eagle')['parent_fly_id'], '123') diff --git a/vizier/_src/algorithms/designers/eagle_strategy/serialization_test.py b/vizier/_src/algorithms/designers/eagle_strategy/serialization_test.py index 21fa146b2..821b8cd3b 100644 --- a/vizier/_src/algorithms/designers/eagle_strategy/serialization_test.py +++ b/vizier/_src/algorithms/designers/eagle_strategy/serialization_test.py @@ -60,8 +60,10 @@ def test_restore_pool(self, x_values, obj_values): firefly.trial._infeasibility_reason, ) self.assertEqual( - restored_firefly.trial.final_measurement.metrics['objective'].value, - firefly.trial.final_measurement.metrics['objective'].value, + restored_firefly.trial.final_measurement_or_die.metrics[ + 'objective' + ].value, + firefly.trial.final_measurement_or_die.metrics['objective'].value, ) def test_restore_rng(self): diff --git a/vizier/_src/algorithms/designers/scalarizing_designer.py b/vizier/_src/algorithms/designers/scalarizing_designer.py index faf5795e7..30cd35e8c 100644 --- a/vizier/_src/algorithms/designers/scalarizing_designer.py +++ b/vizier/_src/algorithms/designers/scalarizing_designer.py @@ -77,13 +77,13 @@ def update( ) -> None: for trial in completed.trials: objectives = [ - trial.final_measurement.metrics.get_value( + trial.final_measurement_or_die.metrics.get_value( config.name, default=jnp.nan ) for config in self._objectives ] # Simply append the scalarized value. - trial.final_measurement.metrics[self._scalarized_metric_name] = ( + trial.final_measurement_or_die.metrics[self._scalarized_metric_name] = ( self._scalarizer(jnp.array(objectives)) ) diff --git a/vizier/_src/algorithms/designers/scalarizing_designer_test.py b/vizier/_src/algorithms/designers/scalarizing_designer_test.py index abb6072a2..a2652d741 100644 --- a/vizier/_src/algorithms/designers/scalarizing_designer_test.py +++ b/vizier/_src/algorithms/designers/scalarizing_designer_test.py @@ -135,7 +135,7 @@ def eagle_designer_factory(ps, seed): all_active=vza.ActiveTrials(trials=[]), ) self.assertTrue( - jnp.isnan(trial.final_measurement.metrics['scalarized'].value) + jnp.isnan(trial.final_measurement_or_die.metrics['scalarized'].value) ) diff --git a/vizier/_src/algorithms/optimizers/vectorized_base_test.py b/vizier/_src/algorithms/optimizers/vectorized_base_test.py index fdf4ec690..13d750dc9 100644 --- a/vizier/_src/algorithms/optimizers/vectorized_base_test.py +++ b/vizier/_src/algorithms/optimizers/vectorized_base_test.py @@ -224,7 +224,9 @@ def test_best_candidates_count_is_1(self, use_fori): self.assertEqual(best_candidates[0].parameters['f1'].value, 0.5) self.assertEqual(best_candidates[0].parameters['f2'].value, 0.5) self.assertAlmostEqual( - best_candidates[0].final_measurement.metrics['acquisition'].value, + best_candidates[0] + .final_measurement_or_die.metrics['acquisition'] + .value, -((0.5 - 0.52) ** 2), ) @@ -251,21 +253,27 @@ def test_best_candidates_count_is_3(self, use_fori): self.assertAlmostEqual(best_candidates[0].parameters['f1'].value, 0.5) self.assertAlmostEqual(best_candidates[0].parameters['f2'].value, 0.5) self.assertAlmostEqual( - best_candidates[0].final_measurement.metrics['acquisition'].value, + best_candidates[0] + .final_measurement_or_die.metrics['acquisition'] + .value, -((0.5 - 0.52) ** 2), ) # check 2nd best candidate self.assertAlmostEqual(best_candidates[1].parameters['f1'].value, 0.6) self.assertAlmostEqual(best_candidates[1].parameters['f2'].value, 0.6) self.assertAlmostEqual( - best_candidates[1].final_measurement.metrics['acquisition'].value, + best_candidates[1] + .final_measurement_or_die.metrics['acquisition'] + .value, -((0.6 - 0.52) ** 2), ) # check 3rd best candidate self.assertAlmostEqual(best_candidates[2].parameters['f1'].value, 0.4) self.assertAlmostEqual(best_candidates[2].parameters['f2'].value, 0.4) self.assertAlmostEqual( - best_candidates[2].final_measurement.metrics['acquisition'].value, + best_candidates[2] + .final_measurement_or_die.metrics['acquisition'] + .value, -((0.4 - 0.52) ** 2), ) diff --git a/vizier/_src/algorithms/testing/comparator_runner.py b/vizier/_src/algorithms/testing/comparator_runner.py index 614521fe2..af65dd271 100644 --- a/vizier/_src/algorithms/testing/comparator_runner.py +++ b/vizier/_src/algorithms/testing/comparator_runner.py @@ -170,13 +170,15 @@ def assert_optimizer_better_simple_regret( res = baseline_optimizer(score_fn, count=1, seed=random.PRNGKey(i)) # pytype: disable=wrong-arg-types trial = vb.best_candidates_to_trials(res, converter) baseline_obj_values.append( - trial[0].final_measurement.metrics['acquisition'].value) + trial[0].final_measurement_or_die.metrics['acquisition'].value + ) for i in range(self.candidate_num_repeats): res = candidate_optimizer(score_fn, count=1, seed=random.PRNGKey(i)) # pytype: disable=wrong-arg-types trial = vb.best_candidates_to_trials(res, converter) candidate_obj_values.append( - trial[0].final_measurement.metrics['acquisition'].value) + trial[0].final_measurement_or_die.metrics['acquisition'].value + ) self._conclude_test(baseline_obj_values, candidate_obj_values) @@ -199,7 +201,7 @@ def _run_one(benchmark_state_factory: benchmarks.BenchmarkStateFactory, best_trial = benchmark_state.algorithm.supporter.GetBestTrials(count=1)[0] metric_name = benchmark_state.experimenter.problem_statement( ).single_objective_metric_name - return best_trial.final_measurement.metrics[metric_name].value + return best_trial.final_measurement_or_die.metrics[metric_name].value baseline_obj_values = [] candidate_obj_values = [] diff --git a/vizier/_src/benchmarks/experimenters/combo_experimenter_test.py b/vizier/_src/benchmarks/experimenters/combo_experimenter_test.py index 0817c1a13..0ecb6869d 100644 --- a/vizier/_src/benchmarks/experimenters/combo_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/combo_experimenter_test.py @@ -58,7 +58,7 @@ def test_experimenters(self, experimenter_class, objective_min, logging.info('Evaluated Trial: %s', trial) self.assertEqual(trial.status, pyvizier.TrialStatus.COMPLETED) metric_name = problem_statement.metric_information.item().name - eval_objective = trial.final_measurement.metrics[metric_name].value + eval_objective = trial.final_measurement_or_die.metrics[metric_name].value self.assertLessEqual(eval_objective, objective_max) self.assertGreaterEqual(eval_objective, objective_min) diff --git a/vizier/_src/benchmarks/experimenters/discretizing_experimenter_test.py b/vizier/_src/benchmarks/experimenters/discretizing_experimenter_test.py index 3af69452a..d6cdfab4d 100644 --- a/vizier/_src/benchmarks/experimenters/discretizing_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/discretizing_experimenter_test.py @@ -76,7 +76,8 @@ def testNumpyExperimenter(self, func): metric_name = exptr.problem_statement().metric_information.item().name self.assertAlmostEqual( func(np.array([0.0, 1.0, 1.5])), - t.final_measurement.metrics[metric_name].value) + t.final_measurement_or_die.metrics[metric_name].value, + ) self.assertEqual(t.status, pyvizier.TrialStatus.COMPLETED) self.assertDictEqual(t.parameters.as_dict(), parameters) diff --git a/vizier/_src/benchmarks/experimenters/hpob_experimenter_test.py b/vizier/_src/benchmarks/experimenters/hpob_experimenter_test.py index c784f5d24..b910bc2e5 100644 --- a/vizier/_src/benchmarks/experimenters/hpob_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/hpob_experimenter_test.py @@ -61,18 +61,25 @@ def test_all(self, **kwargs): experimenter.evaluate([problem_and_trials.trials[0]]) - logging.info('First five trial metrics: %s', [ - t.final_measurement.metrics[hpob_experimenter.METRIC_NAME].value - for t in problem_and_trials.trials[:5] - ]) + logging.info( + 'First five trial metrics: %s', + [ + t.final_measurement_or_die.metrics[ + hpob_experimenter.METRIC_NAME + ].value + for t in problem_and_trials.trials[:5] + ], + ) logging.info('objective: %f', objective) logging.info('evaluated: %s', problem_and_trials.trials[0]) self.assertAlmostEqual( - problem_and_trials.trials[0].final_measurement.metrics[ - hpob_experimenter.METRIC_NAME].value, + problem_and_trials.trials[0] + .final_measurement_or_die.metrics[hpob_experimenter.METRIC_NAME] + .value, objective, - places=1) + places=1, + ) break diff --git a/vizier/_src/benchmarks/experimenters/l1_categorical_experimenter_test.py b/vizier/_src/benchmarks/experimenters/l1_categorical_experimenter_test.py index 9f1ce76dd..b342f6a57 100644 --- a/vizier/_src/benchmarks/experimenters/l1_categorical_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/l1_categorical_experimenter_test.py @@ -32,7 +32,9 @@ def test_evaluate_optimum(self): num_categories=[2, 2], optimum=optimum) suggestion = vz.Trial(parameters={'c0': '0', 'c1': '1'}) exptr.evaluate([suggestion]) - self.assertEqual(suggestion.final_measurement.metrics['objective'].value, 0) + self.assertEqual( + suggestion.final_measurement_or_die.metrics['objective'].value, 0 + ) def test_evaluate_non_optimum(self): optimum = [0, 1] @@ -40,7 +42,9 @@ def test_evaluate_non_optimum(self): num_categories=[2, 2], optimum=optimum) suggestion = vz.Trial(parameters={'c0': '1', 'c1': '0'}) exptr.evaluate([suggestion]) - self.assertEqual(suggestion.final_measurement.metrics['objective'].value, 2) + self.assertEqual( + suggestion.final_measurement_or_die.metrics['objective'].value, 2 + ) @parameterized.parameters({'num_categories': [10, 3, 2]}, {'num_categories': [10, 2, 10]}, diff --git a/vizier/_src/benchmarks/experimenters/multiobjective_experimenter_test.py b/vizier/_src/benchmarks/experimenters/multiobjective_experimenter_test.py index 3ac1cbbf1..c5f08a82b 100644 --- a/vizier/_src/benchmarks/experimenters/multiobjective_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/multiobjective_experimenter_test.py @@ -50,11 +50,11 @@ def test_mulitobjective_numpy(self): exptr.evaluate([t]) self.assertAlmostEqual( func1(np.array([0.0, 1.0])), - t.final_measurement.metrics['m1'].value, + t.final_measurement_or_die.metrics['m1'].value, ) self.assertAlmostEqual( func2(np.array([0.0, 1.0])), - t.final_measurement.metrics['m2'].value, + t.final_measurement_or_die.metrics['m2'].value, ) self.assertEqual(t.status, pyvizier.TrialStatus.COMPLETED) diff --git a/vizier/_src/benchmarks/experimenters/nasbench101_experimenter_test.py b/vizier/_src/benchmarks/experimenters/nasbench101_experimenter_test.py index 69d412686..e12178021 100644 --- a/vizier/_src/benchmarks/experimenters/nasbench101_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/nasbench101_experimenter_test.py @@ -42,7 +42,9 @@ def test_experimenter(self): self.assertEqual(trial.status, pyvizier.TrialStatus.COMPLETED) if not trial.infeasible: metric_name = problem_statement.metric_information.item().name - eval_objective = trial.final_measurement.metrics[metric_name].value + eval_objective = trial.final_measurement_or_die.metrics[ + metric_name + ].value self.assertGreaterEqual(eval_objective, 0.0) self.assertLessEqual(eval_objective, 100.0) diff --git a/vizier/_src/benchmarks/experimenters/nasbench201_experimenter_test.py b/vizier/_src/benchmarks/experimenters/nasbench201_experimenter_test.py index f3072489e..ab011a7b5 100644 --- a/vizier/_src/benchmarks/experimenters/nasbench201_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/nasbench201_experimenter_test.py @@ -41,7 +41,7 @@ def test_experimenter(self): logging.info('Evaluated Trial: %s', trial) self.assertEqual(trial.status, pyvizier.TrialStatus.COMPLETED) metric_name = problem_statement.metric_information.item().name - eval_objective = trial.final_measurement.metrics[metric_name].value + eval_objective = trial.final_measurement_or_die.metrics[metric_name].value self.assertGreaterEqual(eval_objective, 0.0) self.assertLessEqual(eval_objective, 100.0) diff --git a/vizier/_src/benchmarks/experimenters/noisy_experimenter_test.py b/vizier/_src/benchmarks/experimenters/noisy_experimenter_test.py index 054a196f9..0af762052 100644 --- a/vizier/_src/benchmarks/experimenters/noisy_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/noisy_experimenter_test.py @@ -59,14 +59,15 @@ def testDeterministicNoiseApply(self, func): exptr.evaluate([t]) metric_name = exptr.problem_statement().metric_information.item().name - unnoised_value = t.final_measurement.metrics[metric_name].value + unnoised_value = t.final_measurement_or_die.metrics[metric_name].value noisy_exptr.evaluate([t]) - noised_value = t.final_measurement.metrics[metric_name].value + noised_value = t.final_measurement_or_die.metrics[metric_name].value self.assertEqual(unnoised_value - 1, noised_value) self.assertEqual( unnoised_value, - t.final_measurement.metrics[metric_name + '_before_noise'].value) + t.final_measurement_or_die.metrics[metric_name + '_before_noise'].value, + ) @parameterized.named_parameters( ('NO_NOISE', 'NO_NOISE', 1e-5), @@ -95,13 +96,13 @@ def testGaussianNoiseApply(self, noise: str, delta: float): exptr.evaluate([t]) metric_name = exptr.problem_statement().metric_information.item().name - unnoised_value = t.final_measurement.metrics[metric_name].value + unnoised_value = t.final_measurement_or_die.metrics[metric_name].value noisy_exptr.evaluate([t]) - noised_value1 = t.final_measurement.metrics[metric_name].value + noised_value1 = t.final_measurement_or_die.metrics[metric_name].value noisy_exptr.evaluate([t]) - noised_value2 = t.final_measurement.metrics[metric_name].value + noised_value2 = t.final_measurement_or_die.metrics[metric_name].value # Seldom noise is only injected sporadically. if 'SELDOM' not in noise and noise != 'NO_NOISE': @@ -131,7 +132,7 @@ def testSeedDeterminism(self): for _ in range(10): noisy_exptr.evaluate([t]) noise_value_sequence.append( - t.final_measurement.metrics[metric_name].value + t.final_measurement_or_die.metrics[metric_name].value ) # Global NP seed should not affect randomness. @@ -144,7 +145,7 @@ def testSeedDeterminism(self): for _ in range(10): noisy_exptr.evaluate([t]) noise_value_sequence_after.append( - t.final_measurement.metrics[metric_name].value + t.final_measurement_or_die.metrics[metric_name].value ) self.assertSequenceAlmostEqual( noise_value_sequence, noise_value_sequence_after diff --git a/vizier/_src/benchmarks/experimenters/normalizing_experimenter_test.py b/vizier/_src/benchmarks/experimenters/normalizing_experimenter_test.py index dc276bc6e..5b6c02aa6 100644 --- a/vizier/_src/benchmarks/experimenters/normalizing_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/normalizing_experimenter_test.py @@ -68,7 +68,7 @@ def testNormalizationApply(self, func): metric_name = exptr.problem_statement().metric_information.item().name normalizing_exptr.evaluate([t]) - normalized_value = t.final_measurement.metrics[metric_name].value + normalized_value = t.final_measurement_or_die.metrics[metric_name].value self.assertBetween(normalized_value, -10, 10) diff --git a/vizier/_src/benchmarks/experimenters/numpy_experimenter_test.py b/vizier/_src/benchmarks/experimenters/numpy_experimenter_test.py index 0d777fa33..6b032fbfc 100644 --- a/vizier/_src/benchmarks/experimenters/numpy_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/numpy_experimenter_test.py @@ -57,7 +57,8 @@ def testNumpyExperimenter(self, func): metric_name = exptr.problem_statement().metric_information.item().name self.assertAlmostEqual( func(np.array([0.0, 1.0])), - t.final_measurement.metrics[metric_name].value) + t.final_measurement_or_die.metrics[metric_name].value, + ) self.assertEqual(t.status, pyvizier.TrialStatus.COMPLETED) def testNonFinite(self): @@ -79,7 +80,7 @@ def testNonFinite(self): trials = [t1, t2] exptr.evaluate(trials) for trial in trials: - self.assertEmpty(trial.final_measurement.metrics) + self.assertEmpty(trial.final_measurement_or_die.metrics) self.assertTrue(trial.infeasible) def testNotInSearchSpace(self): diff --git a/vizier/_src/benchmarks/experimenters/shifting_experimenter_test.py b/vizier/_src/benchmarks/experimenters/shifting_experimenter_test.py index daa3670cd..9b7234ae9 100644 --- a/vizier/_src/benchmarks/experimenters/shifting_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/shifting_experimenter_test.py @@ -68,8 +68,9 @@ def test_numpy_experimenter(self, func): metric_name = exptr.problem_statement().metric_information.item().name self.assertAlmostEqual( - t_shifted.final_measurement.metrics[metric_name].value, - t.final_measurement.metrics[metric_name].value) + t_shifted.final_measurement_or_die.metrics[metric_name].value, + t.final_measurement_or_die.metrics[metric_name].value, + ) self.assertEqual(t.status, t_shifted.status) # Check parameter bounds are shifted. @@ -106,8 +107,9 @@ def test_evaluate_shift(self): metric_name = exptr.problem_statement().metric_information.item().name self.assertAlmostEqual( - t_shifted.final_measurement.metrics[metric_name].value, - t.final_measurement.metrics[metric_name].value) + t_shifted.final_measurement_or_die.metrics[metric_name].value, + t.final_measurement_or_die.metrics[metric_name].value, + ) self.assertEqual(t.status, t_shifted.status) self.assertNotEqual(t.parameters, t_shifted.parameters) diff --git a/vizier/_src/benchmarks/experimenters/sign_flip_experimenter_test.py b/vizier/_src/benchmarks/experimenters/sign_flip_experimenter_test.py index 226ea3b8e..7bb57c25f 100644 --- a/vizier/_src/benchmarks/experimenters/sign_flip_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/sign_flip_experimenter_test.py @@ -55,28 +55,32 @@ def test_flipped_bbob(self, flip_objectives_only: bool): flipped_exptr.evaluate([suggestion_for_flipped]) self.assertEqual( - suggestion_for_original.final_measurement.metrics[metric_name].value, + suggestion_for_original.final_measurement_or_die.metrics[ + metric_name + ].value, -1.0 - * suggestion_for_flipped.final_measurement.metrics[metric_name].value, + * suggestion_for_flipped.final_measurement_or_die.metrics[ + metric_name + ].value, ) aux_metric_name = metric_name + '_before_noise' if flip_objectives_only: self.assertEqual( - suggestion_for_original.final_measurement.metrics[ + suggestion_for_original.final_measurement_or_die.metrics[ aux_metric_name ].value, - suggestion_for_flipped.final_measurement.metrics[ + suggestion_for_flipped.final_measurement_or_die.metrics[ aux_metric_name ].value, ) else: self.assertEqual( - suggestion_for_original.final_measurement.metrics[ + suggestion_for_original.final_measurement_or_die.metrics[ aux_metric_name ].value, -1.0 - * suggestion_for_flipped.final_measurement.metrics[ + * suggestion_for_flipped.final_measurement_or_die.metrics[ aux_metric_name ].value, ) diff --git a/vizier/_src/benchmarks/experimenters/sparse_experimenter_test.py b/vizier/_src/benchmarks/experimenters/sparse_experimenter_test.py index c5dec55b9..98205f069 100644 --- a/vizier/_src/benchmarks/experimenters/sparse_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/sparse_experimenter_test.py @@ -53,7 +53,9 @@ def test_sparse_problem_statement_and_evaluate(self, sparse_param_config): trial.parameters[pc.name] = 2.0 # Test that the evaluation uses only the non-sparse parameters. experimenter.evaluate([trial]) - self.assertEqual(trial.final_measurement.metrics['bbob_eval'].value, 8.0) + self.assertEqual( + trial.final_measurement_or_die.metrics['bbob_eval'].value, 8.0 + ) # Test that the evaluated trial parameters remained the same. self.assertLen(trial.parameters, 10) for trial_param_name, exptr_param_config in zip( @@ -106,7 +108,9 @@ def _count_param_by_type(problem): trial.parameters[pc.name] = 2.0 # Test that the evaluation uses only the non-sparse parameters. experimenter.evaluate([trial]) - self.assertEqual(trial.final_measurement.metrics['bbob_eval'].value, 8.0) + self.assertEqual( + trial.final_measurement_or_die.metrics['bbob_eval'].value, 8.0 + ) # Test that the evaluated trial parameters remained the same. total_param_count = 2 + int_count + cat_count + discrete_count + float_count self.assertLen(trial.parameters, total_param_count) diff --git a/vizier/_src/benchmarks/experimenters/surrogate_experimenter_test.py b/vizier/_src/benchmarks/experimenters/surrogate_experimenter_test.py index c2511b84d..94529c923 100644 --- a/vizier/_src/benchmarks/experimenters/surrogate_experimenter_test.py +++ b/vizier/_src/benchmarks/experimenters/surrogate_experimenter_test.py @@ -65,7 +65,7 @@ def test_e2e(self): for trial in trials: self.assertEqual(trial.status, vz.TrialStatus.COMPLETED) self.assertContainsSubset( - trial.final_measurement.metrics.keys(), ['metric'] + trial.final_measurement_or_die.metrics.keys(), ['metric'] )