diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 01fa2b1be5..9ff6afd034 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -1027,6 +1027,10 @@ def check_performance(self): return with os_ext.change_dir(self._stagedir): + # We first evaluate and log all performance values and then we + # check them against the reference. This way we always log them + # even if the don't meet the reference. + perf_values = [] for tag, expr in self.perf_patterns.items(): value = evaluate(expr) key = '%s:%s' % (self._current_partition.fullname, tag) @@ -1037,9 +1041,13 @@ def check_performance(self): "tag `%s' not resolved in references for `%s'" % (tag, self._current_partition.fullname)) + perf_values.append((value, self.reference[key])) self._perf_logger.log_performance(logging.INFO, tag, value, ref, low_thres, high_thres) - evaluate(assert_reference(value, ref, low_thres, high_thres)) + + for val, reference in perf_values: + refval, low_thres, high_thres = reference + evaluate(assert_reference(val, refval, low_thres, high_thres)) def _copy_to_outputdir(self): """Copy checks interesting files to the output directory.""" diff --git a/unittests/test_pipeline.py b/unittests/test_pipeline.py index 55ccfe5f2d..63aae7f073 100644 --- a/unittests/test_pipeline.py +++ b/unittests/test_pipeline.py @@ -605,3 +605,39 @@ def test_tag_resolution(self): } } self.test.check_performance() + + def test_perf_var_evaluation(self): + # All performance values must be evaluated, despite the first one + # failing To test this, we need an extract function that will have a + # side effect when evaluated, whose result we will check after calling + # `check_performance()`. + logfile = 'perf.log' + + @sn.sanity_function + def extract_perf(patt, tag): + val = sn.evaluate( + sn.extractsingle(patt, self.perf_file.name, tag, float)) + + with open('perf.log', 'a') as fp: + fp.write('%s=%s' % (tag, val)) + + return val + + self.test.perf_patterns = { + 'value1': extract_perf(r'performance1 = (?P\S+)', 'v1'), + 'value2': extract_perf(r'performance2 = (?P\S+)', 'v2'), + 'value3': extract_perf(r'performance3 = (?P\S+)', 'v3') + } + self.write_performance_output(performance1=1.0, + performance2=1.8, + performance3=3.3) + with self.assertRaises(SanityError) as cm: + self.test.check_performance() + + logfile = os.path.join(self.test.stagedir, logfile) + with open(logfile) as fp: + log_output = fp.read() + + self.assertIn('v1', log_output) + self.assertIn('v2', log_output) + self.assertIn('v3', log_output)