Skip to content

Commit

Permalink
Merge pull request #359 from vkarak/feature/eval-always-all-perf-data
Browse files Browse the repository at this point in the history
[feat] Evaluate and log all performance values before asserting them
  • Loading branch information
vkarak authored Jul 19, 2018
2 parents d91ab74 + 0885181 commit 571a258
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 1 deletion.
10 changes: 9 additions & 1 deletion reframe/core/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -1027,6 +1027,10 @@ def check_performance(self):
return

with os_ext.change_dir(self._stagedir):
# We first evaluate and log all performance values and then we
# check them against the reference. This way we always log them
# even if the don't meet the reference.
perf_values = []
for tag, expr in self.perf_patterns.items():
value = evaluate(expr)
key = '%s:%s' % (self._current_partition.fullname, tag)
Expand All @@ -1037,9 +1041,13 @@ def check_performance(self):
"tag `%s' not resolved in references for `%s'" %
(tag, self._current_partition.fullname))

perf_values.append((value, self.reference[key]))
self._perf_logger.log_performance(logging.INFO, tag, value,
ref, low_thres, high_thres)
evaluate(assert_reference(value, ref, low_thres, high_thres))

for val, reference in perf_values:
refval, low_thres, high_thres = reference
evaluate(assert_reference(val, refval, low_thres, high_thres))

def _copy_to_outputdir(self):
"""Copy checks interesting files to the output directory."""
Expand Down
36 changes: 36 additions & 0 deletions unittests/test_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,3 +605,39 @@ def test_tag_resolution(self):
}
}
self.test.check_performance()

def test_perf_var_evaluation(self):
# All performance values must be evaluated, despite the first one
# failing To test this, we need an extract function that will have a
# side effect when evaluated, whose result we will check after calling
# `check_performance()`.
logfile = 'perf.log'

@sn.sanity_function
def extract_perf(patt, tag):
val = sn.evaluate(
sn.extractsingle(patt, self.perf_file.name, tag, float))

with open('perf.log', 'a') as fp:
fp.write('%s=%s' % (tag, val))

return val

self.test.perf_patterns = {
'value1': extract_perf(r'performance1 = (?P<v1>\S+)', 'v1'),
'value2': extract_perf(r'performance2 = (?P<v2>\S+)', 'v2'),
'value3': extract_perf(r'performance3 = (?P<v3>\S+)', 'v3')
}
self.write_performance_output(performance1=1.0,
performance2=1.8,
performance3=3.3)
with self.assertRaises(SanityError) as cm:
self.test.check_performance()

logfile = os.path.join(self.test.stagedir, logfile)
with open(logfile) as fp:
log_output = fp.read()

self.assertIn('v1', log_output)
self.assertIn('v2', log_output)
self.assertIn('v3', log_output)

0 comments on commit 571a258

Please sign in to comment.