Skip to content

Commit

Permalink
Add zfs-test facility to automatically rerun failing tests
Browse files Browse the repository at this point in the history
This was a project proposed as part of the Quality theme for the
hackthon for the 2021 OpenZFS Developer Summit. The idea is to improve
the usability of the automated tests that get run when a PR is created
by having failing tests automatically rerun in order to make flaky
tests less impactful.

Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes openzfs#12740
  • Loading branch information
pcd1193182 authored and nicman23 committed Aug 22, 2022
1 parent f8e54bd commit 8a761d2
Show file tree
Hide file tree
Showing 5 changed files with 144 additions and 29 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/zfs-tests-functional.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ jobs:
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Tests
run: |
/usr/share/zfs/zfs-tests.sh -v -s 3G
/usr/share/zfs/zfs-tests.sh -vR -s 3G
- name: Prepare artifacts
if: failure()
run: |
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/zfs-tests-sanity.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ jobs:
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Tests
run: |
/usr/share/zfs/zfs-tests.sh -v -s 3G -r sanity
/usr/share/zfs/zfs-tests.sh -vR -s 3G -r sanity
- name: Prepare artifacts
if: failure()
run: |
Expand Down
38 changes: 35 additions & 3 deletions scripts/zfs-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@
# CDDL HEADER END
#

#
# Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
#

BASE_DIR=$(dirname "$0")
SCRIPT_COMMON=common.sh
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
Expand Down Expand Up @@ -48,6 +52,7 @@ ITERATIONS=1
ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
UNAME=$(uname -s)
RERUN=""

# Override some defaults if on FreeBSD
if [ "$UNAME" = "FreeBSD" ] ; then
Expand Down Expand Up @@ -322,6 +327,7 @@ OPTIONS:
-f Use files only, disables block device tests
-S Enable stack tracer (negative performance impact)
-c Only create and populate constrained path
-R Automatically rerun failing tests
-n NFSFILE Use the nfsfile to determine the NFS configuration
-I NUM Number of iterations
-d DIR Use DIR for files and loopback devices
Expand All @@ -348,7 +354,7 @@ $0 -x
EOF
}

while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do
while getopts 'hvqxkfScRn:d:s:r:?t:T:u:I:' OPTION; do
case $OPTION in
h)
usage
Expand Down Expand Up @@ -376,6 +382,9 @@ while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do
constrain_path
exit
;;
R)
RERUN="yes"
;;
n)
nfsfile=$OPTARG
[ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile"
Expand Down Expand Up @@ -694,12 +703,35 @@ ${TEST_RUNNER} ${QUIET:+-q} \
-i "${STF_SUITE}" \
-I "${ITERATIONS}" \
2>&1 | tee "$RESULTS_FILE"

#
# Analyze the results.
#
${ZTS_REPORT} "$RESULTS_FILE" >"$REPORT_FILE"
${ZTS_REPORT} ${RERUN:+--no-maybes} "$RESULTS_FILE" >"$REPORT_FILE"
RESULT=$?

if [ "$RESULT" -eq "2" ] && [ -n "$RERUN" ]; then
MAYBES="$($ZTS_REPORT --list-maybes)"
TEMP_RESULTS_FILE=$(mktemp -u -t zts-results-tmp.XXXXX -p "$FILEDIR")
TEST_LIST=$(mktemp -u -t test-list.XXXXX -p "$FILEDIR")
grep "^Test:.*\[FAIL\]" "$RESULTS_FILE" >"$TEMP_RESULTS_FILE"
for test_name in $MAYBES; do
grep "$test_name " "$TEMP_RESULTS_FILE" >>"$TEST_LIST"
done
${TEST_RUNNER} ${QUIET:+-q} \
-c "${RUNFILES}" \
-T "${TAGS}" \
-i "${STF_SUITE}" \
-I "${ITERATIONS}" \
-l "${TEST_LIST}" \
2>&1 | tee "$RESULTS_FILE"
#
# Analyze the results.
#
${ZTS_REPORT} --no-maybes "$RESULTS_FILE" >"$REPORT_FILE"
RESULT=$?
fi


cat "$REPORT_FILE"

RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
Expand Down
85 changes: 68 additions & 17 deletions tests/test-runner/bin/test-runner.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ except ImportError:
import os
import sys
import ctypes
import re

from datetime import datetime
from optparse import OptionParser
Expand Down Expand Up @@ -495,6 +496,9 @@ Tags: %s
self.timeout, self.user, self.pre, pre_user, self.post, post_user,
self.failsafe, failsafe_user, self.tags)

def filter(self, keeplist):
self.tests = [x for x in self.tests if x in keeplist]

def verify(self):
"""
Check the pre/post/failsafe scripts, user and tests in this TestGroup.
Expand Down Expand Up @@ -656,6 +660,24 @@ class TestRun(object):

testgroup.verify()

def filter(self, keeplist):
for group in list(self.testgroups.keys()):
if group not in keeplist:
del self.testgroups[group]
continue

g = self.testgroups[group]

if g.pre and os.path.basename(g.pre) in keeplist[group]:
continue

g.filter(keeplist[group])

for test in list(self.tests.keys()):
directory, base = os.path.split(test)
if directory not in keeplist or base not in keeplist[directory]:
del self.tests[test]

def read(self, options):
"""
Read in the specified runfiles, and apply the TestRun properties
Expand Down Expand Up @@ -743,10 +765,18 @@ class TestRun(object):

for test in sorted(self.tests.keys()):
config.add_section(test)
for prop in Test.props:
if prop not in self.props:
config.set(test, prop,
getattr(self.tests[test], prop))

for testgroup in sorted(self.testgroups.keys()):
config.add_section(testgroup)
config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
for prop in TestGroup.props:
if prop not in self.props:
config.set(testgroup, prop,
getattr(self.testgroups[testgroup], prop))

try:
with open(options.template, 'w') as f:
Expand Down Expand Up @@ -796,7 +826,7 @@ class TestRun(object):
return

global LOG_FILE_OBJ
if options.cmd != 'wrconfig':
if not options.template:
try:
old = os.umask(0)
os.makedirs(self.outputdir, mode=0o777)
Expand Down Expand Up @@ -939,26 +969,44 @@ def find_tests(testrun, options):
testrun.addtest(p, options)


def filter_tests(testrun, options):
try:
fh = open(options.logfile, "r")
except Exception as e:
fail('%s' % e)

failed = {}
while True:
line = fh.readline()
if not line:
break
m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line)
if not m:
continue
group, test = m.group(1, 2)
try:
failed[group].append(test)
except KeyError:
failed[group] = [test]
fh.close()

testrun.filter(failed)


def fail(retstr, ret=1):
print('%s: %s' % (sys.argv[0], retstr))
exit(ret)


def options_cb(option, opt_str, value, parser):
path_options = ['outputdir', 'template', 'testdir']

if option.dest == 'runfiles' and '-w' in parser.rargs or \
option.dest == 'template' and '-c' in parser.rargs:
fail('-c and -w are mutually exclusive.')
path_options = ['outputdir', 'template', 'testdir', 'logfile']

if opt_str in parser.rargs:
fail('%s may only be specified once.' % opt_str)

if option.dest == 'runfiles':
parser.values.cmd = 'rdconfig'
value = set(os.path.abspath(p) for p in value.split(','))
if option.dest == 'template':
parser.values.cmd = 'wrconfig'
if option.dest == 'tags':
value = [x.strip() for x in value.split(',')]

Expand All @@ -975,6 +1023,10 @@ def parse_args():
help='Specify tests to run via config files.')
parser.add_option('-d', action='store_true', default=False, dest='dryrun',
help='Dry run. Print tests, but take no other action.')
parser.add_option('-l', action='callback', callback=options_cb,
default=None, dest='logfile', metavar='logfile',
type='string',
help='Read logfile and re-run tests which failed.')
parser.add_option('-g', action='store_true', default=False,
dest='do_groups', help='Make directories TestGroups.')
parser.add_option('-o', action='callback', callback=options_cb,
Expand Down Expand Up @@ -1021,9 +1073,6 @@ def parse_args():
help='Number of times to run the test run.')
(options, pathnames) = parser.parse_args()

if not options.runfiles and not options.template:
options.cmd = 'runtests'

if options.runfiles and len(pathnames):
fail('Extraneous arguments.')

Expand All @@ -1034,18 +1083,20 @@ def parse_args():

def main():
options = parse_args()

testrun = TestRun(options)

if options.cmd == 'runtests':
find_tests(testrun, options)
elif options.cmd == 'rdconfig':
if options.runfiles:
testrun.read(options)
elif options.cmd == 'wrconfig':
else:
find_tests(testrun, options)

if options.logfile:
filter_tests(testrun, options)

if options.template:
testrun.write(options)
exit(0)
else:
fail('Unknown command specified')

testrun.complete_outputdirs()
testrun.run(options)
Expand Down
46 changes: 39 additions & 7 deletions tests/test-runner/bin/zts-report.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import os
import re
import sys
import argparse

#
# This script parses the stdout of zfstest, which has this format:
Expand Down Expand Up @@ -381,10 +382,33 @@ def process_results(pathname):
return d


class ListMaybesAction(argparse.Action):
def __init__(self,
option_strings,
dest="SUPPRESS",
default="SUPPRESS",
help="list flaky tests and exit"):
super(ListMaybesAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)

def __call__(self, parser, namespace, values, option_string=None):
for test in maybe:
print(test)
sys.exit(0)


if __name__ == "__main__":
if len(sys.argv) != 2:
usage('usage: %s <pathname>' % sys.argv[0])
results = process_results(sys.argv[1])
parser = argparse.ArgumentParser(description='Analyze ZTS logs')
parser.add_argument('logfile')
parser.add_argument('--list-maybes', action=ListMaybesAction)
parser.add_argument('--no-maybes', action='store_false', dest='maybes')
args = parser.parse_args()

results = process_results(args.logfile)

if summary['total'] == 0:
print("\n\nNo test results were found.")
Expand All @@ -393,6 +417,7 @@ if __name__ == "__main__":

expected = []
unexpected = []
all_maybes = True

for test in list(results.keys()):
if results[test] == "PASS":
Expand All @@ -405,11 +430,16 @@ if __name__ == "__main__":
if setup in maybe and maybe[setup][0] == "SKIP":
continue

if ((test not in known or results[test] not in known[test][0]) and
(test not in maybe or results[test] not in maybe[test][0])):
unexpected.append(test)
else:
if (test in known and results[test] in known[test][0]):
expected.append(test)
elif test in maybe and results[test] in maybe[test][0]:
if results[test] == 'SKIP' or args.maybes:
expected.append(test)
elif not args.maybes:
unexpected.append(test)
else:
unexpected.append(test)
all_maybes = False

print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
Expand Down Expand Up @@ -455,5 +485,7 @@ if __name__ == "__main__":

if len(unexpected) == 0:
sys.exit(0)
elif not args.maybes and all_maybes:
sys.exit(2)
else:
sys.exit(1)

0 comments on commit 8a761d2

Please sign in to comment.