From d80f008584e596dfa304f52f158f33c81171f73d Mon Sep 17 00:00:00 2001 From: sobolevn <mail@sobolevn.me> Date: Tue, 25 Jan 2022 23:44:30 +0300 Subject: [PATCH 1/3] bpo-46523: fix tests rerun when `setUp[Class|Module]` fails --- Lib/test/libregrtest/main.py | 26 ++++++++++++++++++++++++-- Lib/test/support/__init__.py | 2 +- Lib/test/test_typing.py | 1 + 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index fc3c2b96920556..6e7f8c90b769c9 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -27,6 +27,14 @@ # Must be smaller than buildbot "1200 seconds without output" limit. EXIT_TIMEOUT = 120.0 +# bpo-46523: When rerunning tests, we might need to rerun the whole +# class or module suite if some its life-cycle hooks fail. +_TEST_LIFECYCLE_HOOKS = frozenset(( + 'setUp', 'tearDown', + 'setUpClass', 'tearDownClass', + 'setUpModule', 'tearDownModule', +)) + class Regrtest: """Execute a test suite. @@ -321,8 +329,12 @@ def rerun_failed_tests(self): errors = result.errors or [] failures = result.failures or [] - error_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in errors] - failure_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in failures] + error_names = [ + self.normalize_test_name(test_full_name, is_error=True) + for (test_full_name, *_) in errors] + failure_names = [ + self.normalize_test_name(test_full_name) + for (test_full_name, *_) in failures] self.ns.verbose = True orig_match_tests = self.ns.match_tests if errors or failures: @@ -348,6 +360,16 @@ def rerun_failed_tests(self): self.display_result() + def normalize_test_name(self, test_full_name, is_error=False): + short_name = test_full_name.split(" ")[0] + if is_error and short_name in _TEST_LIFECYCLE_HOOKS: + # This means that we have a failure in a life-cycle hook, + # we need to rerun the whole module or class suite. + lpar = test_full_name.index('(') + rpar = test_full_name.index(')') + return test_full_name[lpar + 1: rpar].split('.')[-1] + return short_name + def display_result(self): # If running the test suite for PGO then no one cares about results. if self.ns.pgo: diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py index 1e4935fc3e6174..bf173d46864053 100644 --- a/Lib/test/support/__init__.py +++ b/Lib/test/support/__init__.py @@ -1030,7 +1030,7 @@ def _run_suite(suite): if junit_xml_list is not None: junit_xml_list.append(result.get_xml_element()) - if not result.testsRun and not result.skipped: + if not result.testsRun and not result.skipped and not result.errors: raise TestDidNotRun if not result.wasSuccessful(): if len(result.errors) == 1 and not result.failures: diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index 5777656552d797..73141c6669a0d4 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -4001,6 +4001,7 @@ def cleanup(self): @classmethod def setUpClass(cls): + raise ValueError sys.modules['typing'] = cls.module global UserId UserId = cls.module.NewType('UserId', int) From 99b4887337120fd8f7c0756a1db2cfd6f51ea0c9 Mon Sep 17 00:00:00 2001 From: sobolevn <mail@sobolevn.me> Date: Wed, 26 Jan 2022 00:04:02 +0300 Subject: [PATCH 2/3] Fix CI failure --- Lib/test/test_typing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index 73141c6669a0d4..5777656552d797 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -4001,7 +4001,6 @@ def cleanup(self): @classmethod def setUpClass(cls): - raise ValueError sys.modules['typing'] = cls.module global UserId UserId = cls.module.NewType('UserId', int) From 9f2596193d64545ef0c326f4ad1f7a120f34c3a0 Mon Sep 17 00:00:00 2001 From: sobolevn <mail@sobolevn.me> Date: Fri, 7 Apr 2023 12:48:44 +0300 Subject: [PATCH 3/3] Add tests cases --- Lib/test/libregrtest/main.py | 11 ++- Lib/test/test_regrtest.py | 154 +++++++++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+), 3 deletions(-) diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py index 87d3514281e82f..3c3509d0303371 100644 --- a/Lib/test/libregrtest/main.py +++ b/Lib/test/libregrtest/main.py @@ -29,10 +29,10 @@ # Must be smaller than buildbot "1200 seconds without output" limit. EXIT_TIMEOUT = 120.0 -# bpo-46523: When rerunning tests, we might need to rerun the whole +# gh-90681: When rerunning tests, we might need to rerun the whole # class or module suite if some its life-cycle hooks fail. +# Test level hooks are not affected. _TEST_LIFECYCLE_HOOKS = frozenset(( - 'setUp', 'tearDown', 'setUpClass', 'tearDownClass', 'setUpModule', 'tearDownModule', )) @@ -376,11 +376,16 @@ def rerun_failed_tests(self): self.display_result() - def normalize_test_name(self, test_full_name, is_error=False): + def normalize_test_name(self, test_full_name, *, is_error=False): short_name = test_full_name.split(" ")[0] if is_error and short_name in _TEST_LIFECYCLE_HOOKS: # This means that we have a failure in a life-cycle hook, # we need to rerun the whole module or class suite. + # Basically the error looks like this: + # ERROR: setUpClass (test.test_reg_ex.RegTest) + # or + # ERROR: setUpModule (test.test_reg_ex) + # So, we need to parse the class / module name. lpar = test_full_name.index('(') rpar = test_full_name.index(')') return test_full_name[lpar + 1: rpar].split('.')[-1] diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py index baae4efc2ad789..ac49fbae847726 100644 --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -1120,6 +1120,160 @@ def test_fail_once(self): self.check_executed_tests(output, [testname], rerun={testname: "test_fail_once"}) + def test_rerun_setup_class_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + @classmethod + def setUpClass(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: "ExampleTests"}) + + def test_rerun_teardown_class_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + @classmethod + def tearDownClass(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: "ExampleTests"}) + + def test_rerun_setup_module_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + def setUpModule(): + raise RuntimeError('Fail') + + class ExampleTests(unittest.TestCase): + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: testname}) + + def test_rerun_teardown_module_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + def tearDownModule(): + raise RuntimeError('Fail') + + class ExampleTests(unittest.TestCase): + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: testname}) + + def test_rerun_setup_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + def setUp(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: "test_success"}) + + def test_rerun_teardown_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.TestCase): + def tearDown(self): + raise RuntimeError('Fail') + + def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: "test_success"}) + + def test_rerun_async_setup_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.IsolatedAsyncioTestCase): + async def asyncSetUp(self): + raise RuntimeError('Fail') + + async def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: "test_success"}) + + def test_rerun_async_teardown_hook_failure(self): + # FAILURE then FAILURE + code = textwrap.dedent(""" + import unittest + + class ExampleTests(unittest.IsolatedAsyncioTestCase): + async def asyncTearDown(self): + raise RuntimeError('Fail') + + async def test_success(self): + return + """) + testname = self.create_test(code=code) + + output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST) + self.check_executed_tests(output, testname, + failed=[testname], + rerun={testname: "test_success"}) + def test_no_tests_ran(self): code = textwrap.dedent(""" import unittest