diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 9fd2321..0ea00ad 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -25,4 +25,4 @@ jobs: python -m pip install numba==0.54 - name: Test run: | - python run_framework.py -f numba -r 1 --ignore-errors=0 + python run_framework.py -f numba -r 1 --ignore-errors=0 --expected-fail=azimint_hist,contour_integral,correlation,covariance,durbin,mlp diff --git a/run_framework.py b/run_framework.py index 411b739..8ff2a4b 100644 --- a/run_framework.py +++ b/run_framework.py @@ -57,6 +57,10 @@ def run_benchmark(benchname, fname, preset, validate, repeat, timeout, type=util.str2bool, nargs="?", default=False) + parser.add_argument("--expected-fail", + type=str, + nargs="?", + default="") args = vars(parser.parse_args()) parent_folder = pathlib.Path(__file__).parent.absolute() @@ -64,7 +68,10 @@ def run_benchmark(benchname, fname, preset, validate, repeat, timeout, pathlist = pathlib.Path(bench_dir).rglob('*.json') benchnames = [os.path.basename(path)[:-5] for path in pathlist] benchnames.sort() + + xfail = set(filter(None, args["expected_fail"].split(','))) failed = [] + xpassed = [] for benchname in benchnames: p = Process(target=run_benchmark, args=(benchname, args["framework"], args["preset"], @@ -74,10 +81,22 @@ def run_benchmark(benchname, fname, preset, validate, repeat, timeout, p.start() p.join() exit_code = p.exitcode - if exit_code != 0: - failed.append(benchname) + if benchname in xfail: + if exit_code == 0: + xpassed.append(benchname) + else: + if exit_code != 0: + failed.append(benchname) if len(failed) != 0: print(f"Failed: {len(failed)} out of {len(benchnames)}") for bench in failed: print(bench) + + if len(xpassed) != 0: + print(f"Unexpectedly passed: {len(xpassed)} out of {len(benchnames)}") + for bench in xpassed: + print(bench) + + if len(failed) != 0 or len(xpassed) != 0: + sys.exit(1)