diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ffa500d..67ca4c08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com). * (flags) `absl.flags.argparse_flags.ArgumentParser` now correctly inherits an empty instance of `FlagValues` to ensure that absl flags, such as `--flagfile`, `--undefok` are supported. +* (testing) Do not exit 5 if tests were skipped on Python 3.12. This follows + the CPython change in https://github.com/python/cpython/pull/113856. ### Fixed diff --git a/absl/testing/BUILD b/absl/testing/BUILD index 154d9ce1..b9e47909 100644 --- a/absl/testing/BUILD +++ b/absl/testing/BUILD @@ -197,7 +197,10 @@ py_test( name = "tests/absltest_test", size = "small", srcs = ["tests/absltest_test.py"], - data = [":tests/absltest_test_helper"], + data = [ + ":tests/absltest_test_helper", + ":tests/absltest_test_helper_skipped", + ], python_version = "PY3", srcs_version = "PY3", deps = [ @@ -221,6 +224,13 @@ py_binary( ], ) +py_binary( + name = "tests/absltest_test_helper_skipped", + testonly = 1, + srcs = ["tests/absltest_test_helper_skipped.py"], + deps = [":absltest"], +) + py_test( name = "tests/flagsaver_test", srcs = ["tests/flagsaver_test.py"], diff --git a/absl/testing/absltest.py b/absl/testing/absltest.py index a3b4e348..e43cb820 100644 --- a/absl/testing/absltest.py +++ b/absl/testing/absltest.py @@ -2689,9 +2689,9 @@ def run_tests( result, fail_when_no_tests_ran = _run_and_get_tests_result( argv, args, kwargs, xml_reporter.TextAndXMLTestRunner ) - if fail_when_no_tests_ran and result.testsRun == 0: - # Python 3.12 unittest exits with 5 when no tests ran. The code comes from - # pytest which does the same thing. + if fail_when_no_tests_ran and result.testsRun == 0 and not result.skipped: + # Python 3.12 unittest exits with 5 when no tests ran. The exit code 5 comes + # from pytest which does the same thing. sys.exit(5) sys.exit(not result.wasSuccessful()) diff --git a/absl/testing/tests/absltest_test.py b/absl/testing/tests/absltest_test.py index baeccaf5..034cfca5 100644 --- a/absl/testing/tests/absltest_test.py +++ b/absl/testing/tests/absltest_test.py @@ -24,6 +24,7 @@ import stat import string import subprocess +import sys import tempfile import textwrap from typing import Optional @@ -37,11 +38,18 @@ class BaseTestCase(absltest.TestCase): - def _get_helper_exec_path(self): - helper = 'absl/testing/tests/absltest_test_helper' + def _get_helper_exec_path(self, helper_name): + helper = 'absl/testing/tests/' + helper_name return _bazelize_command.get_executable_path(helper) - def run_helper(self, test_id, args, env_overrides, expect_success): + def run_helper( + self, + test_id, + args, + env_overrides, + expect_success, + helper_name=None, + ): env = absltest_env.inherited_env() for key, value in env_overrides.items(): if value is None: @@ -50,31 +58,48 @@ def run_helper(self, test_id, args, env_overrides, expect_success): else: env[key] = value - command = [self._get_helper_exec_path(), - '--test_id={}'.format(test_id)] + args + if helper_name is None: + helper_name = 'absltest_test_helper' + command = [self._get_helper_exec_path(helper_name)] + if test_id is not None: + command.append('--test_id={}'.format(test_id)) + command.extend(args) process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True) stdout, stderr = process.communicate() if expect_success: self.assertEqual( - 0, process.returncode, - 'Expected success, but failed with ' - 'stdout:\n{}\nstderr:\n{}\n'.format(stdout, stderr)) + 0, + process.returncode, + 'Expected success, but failed with exit code {},' + ' stdout:\n{}\nstderr:\n{}\n'.format( + process.returncode, stdout, stderr + ), + ) else: - self.assertEqual( - 1, process.returncode, + self.assertGreater( + process.returncode, + 0, 'Expected failure, but succeeded with ' - 'stdout:\n{}\nstderr:\n{}\n'.format(stdout, stderr)) - return stdout, stderr + 'stdout:\n{}\nstderr:\n{}\n'.format(stdout, stderr), + ) + return stdout, stderr, process.returncode class TestCaseTest(BaseTestCase): longMessage = True - def run_helper(self, test_id, args, env_overrides, expect_success): - return super(TestCaseTest, self).run_helper(test_id, args + ['HelperTest'], - env_overrides, expect_success) + def run_helper( + self, test_id, args, env_overrides, expect_success, helper_name=None + ): + return super(TestCaseTest, self).run_helper( + test_id, + args + ['HelperTest'], + env_overrides, + expect_success, + helper_name, + ) def test_flags_no_env_var_no_flags(self): self.run_helper( @@ -190,11 +215,12 @@ def test_xml_output_file_from_flag(self): expect_success=True) def test_app_run(self): - stdout, _ = self.run_helper( + stdout, _, _ = self.run_helper( 7, ['--name=cat', '--name=dog'], {'ABSLTEST_TEST_HELPER_USE_APP_RUN': '1'}, - expect_success=True) + expect_success=True, + ) self.assertIn('Names in main() are: cat dog', stdout) self.assertIn('Names in test_name_flag() are: cat dog', stdout) @@ -226,7 +252,7 @@ def test_expected_failure_if(self): self.assertEqual(1, 2) # the expected failure def test_expected_failure_success(self): - _, stderr = self.run_helper(5, ['--', '-v'], {}, expect_success=False) + _, stderr, _ = self.run_helper(5, ['--', '-v'], {}, expect_success=False) self.assertRegex(stderr, r'FAILED \(.*unexpected successes=1\)') def test_assert_equal(self): @@ -2099,8 +2125,9 @@ def run_tempfile_helper(self, cleanup, expected_paths): 'ABSLTEST_TEST_HELPER_TEMPFILE_CLEANUP': cleanup, 'TEST_TMPDIR': tmpdir.full_path, } - stdout, stderr = self.run_helper(0, ['TempFileHelperTest'], env, - expect_success=False) + stdout, stderr, _ = self.run_helper( + 0, ['TempFileHelperTest'], env, expect_success=False + ) output = ('\n=== Helper output ===\n' '----- stdout -----\n{}\n' '----- end stdout -----\n' @@ -2473,6 +2500,30 @@ def test_foo(self): self.assertEmpty(res.errors) +class ExitCodeTest(BaseTestCase): + + def test_exits_5_when_no_tests(self): + expect_success = sys.version_info < (3, 12) + _, _, exit_code = self.run_helper( + None, + [], + {}, + expect_success=expect_success, + helper_name='absltest_test_helper_skipped', + ) + if not expect_success: + self.assertEqual(exit_code, 5) + + def test_exits_5_when_all_skipped(self): + self.run_helper( + None, + [], + {'ABSLTEST_TEST_HELPER_DEFINE_CLASS': '1'}, + expect_success=True, + helper_name='absltest_test_helper_skipped', + ) + + def _listdir_recursive(path): for dirname, _, filenames in os.walk(path): yield dirname diff --git a/absl/testing/tests/absltest_test_helper_skipped.py b/absl/testing/tests/absltest_test_helper_skipped.py new file mode 100644 index 00000000..506fa4a7 --- /dev/null +++ b/absl/testing/tests/absltest_test_helper_skipped.py @@ -0,0 +1,17 @@ +"""Test helper for ExitCodeTest in absltest_test.py.""" + +import os +from absl.testing import absltest + + +if os.environ.get("ABSLTEST_TEST_HELPER_DEFINE_CLASS") == "1": + + class MyTest(absltest.TestCase): + + @absltest.skip("Skipped for testing the exit code behavior") + def test_foo(self): + pass + + +if __name__ == "__main__": + absltest.main()