diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 92670a7..af7ffaa 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,9 +4,12 @@ CHANGELOG UNRELEASED ---------- -* Fix `pytest` requirement to `>=7.3` (`#159`_). +* Fix output when using ``TestCase.skipTest`` (`#169`_). + +* Fix ``pytest`` requirement to ``>=7.3`` (`#159`_). .. _#159: https://github.com/pytest-dev/pytest-subtests/issues/159 +.. _#169: https://github.com/pytest-dev/pytest-subtests/pull/169 0.13.1 (2024-07-16) ------------------- diff --git a/src/pytest_subtests/plugin.py b/src/pytest_subtests/plugin.py index 9914800..e87f717 100644 --- a/src/pytest_subtests/plugin.py +++ b/src/pytest_subtests/plugin.py @@ -98,6 +98,29 @@ def _from_test_report(cls, test_report: TestReport) -> SubTestReport: return super()._from_json(test_report._to_json()) +def _addSkip(self: TestCaseFunction, testcase: TestCase, reason: str) -> None: + from unittest.case import _SubTest # type: ignore[attr-defined] + + if isinstance(testcase, _SubTest): + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + if self._excinfo is not None: + exc_info = self._excinfo[-1] + self.addSubTest(testcase.test_case, testcase, exc_info) # type: ignore[attr-defined] + else: + # For python < 3.11: the non-subtest skips have to be added by `_originaladdSkip` only after all subtest + # failures are processed by `_addSubTest`. + if sys.version_info < (3, 11): + subtest_errors = [ + x + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + if len(subtest_errors) == 0: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + else: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + + def _addSubTest( self: TestCaseFunction, test_case: Any, @@ -122,10 +145,41 @@ def _addSubTest( node=self, call=call_info, report=sub_report ) + # For python < 3.11: add non-subtest skips once all subtest failures are processed by # `_addSubTest`. + if sys.version_info < (3, 11): + from unittest.case import _SubTest # type: ignore[attr-defined] + + non_subtest_skip = [ + (x, y) + for x, y in self.instance._outcome.skipped + if not isinstance(x, _SubTest) + ] + subtest_errors = [ + (x, y) + for x, y in self.instance._outcome.errors + if isinstance(x, _SubTest) and y is not None + ] + # Check if we have non-subtest skips: if there are also sub failures, non-subtest skips are not treated in + # `_addSubTest` and have to be added using `_originaladdSkip` after all subtest failures are processed. + if len(non_subtest_skip) > 0 and len(subtest_errors) > 0: + # Make sure we have processed the last subtest failure + last_subset_error = subtest_errors[-1] + if exc_info is last_subset_error[-1]: + # Add non-subtest skips (as they could not be treated in `_addSkip`) + for testcase, reason in non_subtest_skip: + self._originaladdSkip(testcase, reason) # type: ignore[attr-defined] + def pytest_configure(config: pytest.Config) -> None: TestCaseFunction.addSubTest = _addSubTest # type: ignore[attr-defined] TestCaseFunction.failfast = False # type: ignore[attr-defined] + # This condition is to prevent `TestCaseFunction._originaladdSkip` being assigned again in a subprocess from a + # parent python process where `addSkip` is already `_addSkip`. A such case is when running tests in + # `test_subtests.py` where `pytester.runpytest` is used. Without this guard condition, `_originaladdSkip` is + # assigned to `_addSkip` which is wrong as well as causing an infinite recursion in some cases. + if not hasattr(TestCaseFunction, "_originaladdSkip"): + TestCaseFunction._originaladdSkip = TestCaseFunction.addSkip # type: ignore[attr-defined] + TestCaseFunction.addSkip = _addSkip # type: ignore[method-assign] # Hack (#86): the terminal does not know about the "subtests" # status, so it will by default turn the output to yellow. @@ -154,6 +208,9 @@ def pytest_unconfigure() -> None: del TestCaseFunction.addSubTest if hasattr(TestCaseFunction, "failfast"): del TestCaseFunction.failfast + if hasattr(TestCaseFunction, "_originaladdSkip"): + TestCaseFunction.addSkip = TestCaseFunction._originaladdSkip # type: ignore[method-assign] + del TestCaseFunction._originaladdSkip @pytest.fixture diff --git a/tests/test_subtests.py b/tests/test_subtests.py index e0607e5..6de4b54 100644 --- a/tests/test_subtests.py +++ b/tests/test_subtests.py @@ -340,6 +340,153 @@ def test_foo(self): ["collected 1 item", "* 3 xfailed, 1 passed in *"] ) + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + "FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + "FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + "Ran 1 test in .*", + "FAILED \(failures=6, skipped=4\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + "FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + "FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + "Ran 1 test in .*", + "FAILED \(failures=6, skipped=4\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=0\) SUBSKIP \(skip subtest i=0\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=3\) SUBSKIP \(skip subtest i=3\) .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure.py::T::test_foo \[custom message\] \(i=9\) SUBFAIL .*", + "test_skip_with_failure.py::T::test_foo PASSED .*", + "[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=0", + "[custom message] (i=0) SUBSKIP [1] test_skip_with_failure.py:5: skip subtest i=3", + "[custom message] (i=4) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 4 < 4", + "[custom message] (i=9) SUBFAIL test_skip_with_failure.py::T::test_foo - AssertionError: assert 9 < 4", + ".* 6 failed, 1 passed, 4 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + + @pytest.mark.parametrize("runner", ["unittest", "pytest-normal", "pytest-xdist"]) + def test_skip_with_failure_and_non_subskip( + self, + pytester: pytest.Pytester, + monkeypatch: pytest.MonkeyPatch, + runner: Literal["unittest", "pytest-normal", "pytest-xdist"], + ) -> None: + monkeypatch.setenv("COLUMNS", "200") + p = pytester.makepyfile( + """ + import pytest + from unittest import expectedFailure, TestCase, main + + class T(TestCase): + def test_foo(self): + for i in range(10): + with self.subTest("custom message", i=i): + if i < 4: + self.skipTest(f"skip subtest i={i}") + assert i < 4 + self.skipTest(f"skip the test") + + if __name__ == '__main__': + main() + """ + ) + if runner == "unittest": + result = pytester.runpython(p) + if sys.version_info < (3, 11): + result.stderr.re_match_lines( + [ + "FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=4\).*", + "FAIL: test_foo \(__main__\.T\) \[custom message\] \(i=9\).*", + "Ran 1 test in .*", + "FAILED \(failures=6, skipped=5\)", + ] + ) + else: + result.stderr.re_match_lines( + [ + "FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=4\).*", + "FAIL: test_foo \(__main__\.T\.test_foo\) \[custom message\] \(i=9\).*", + "Ran 1 test in .*", + "FAILED \(failures=6, skipped=5\)", + ] + ) + elif runner == "pytest-normal": + result = pytester.runpytest(p, "-v", "-rsf") + # The `(i=0)` is not correct but it's given by pytest `TerminalReporter` without `--no-fold-skipped` + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\)", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip subtest i=3", + r"\[custom message\] \(i=0\) SUBSKIP \[1\] test_skip_with_failure_and_non_subskip.py:5: skip the test", + r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + # check with `--no-fold-skipped` (which gives the correct information) + if sys.version_info >= (3, 10): + result = pytester.runpytest(p, "-v", "--no-fold-skipped", "-rsf") + result.stdout.re_match_lines( + [ + r"test_skip_with_failure_and_non_subskip.py::T::test_foo \[custom message\] \(i=4\) SUBFAIL .*", + r"test_skip_with_failure_and_non_subskip.py::T::test_foo SKIPPED \(skip the test\).*", + r"\[custom message\] \(i=3\) SUBSKIP test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip subtest i=3", + r"SKIPPED test_skip_with_failure_and_non_subskip.py::T::test_foo - Skipped: skip the test", + r"\[custom message\] \(i=4\) SUBFAIL test_skip_with_failure_and_non_subskip.py::T::test_foo", + r".* 6 failed, 5 skipped in .*", + ] + ) + else: + pytest.xfail("Not producing the expected results (#5)") + result = pytester.runpytest(p) # type:ignore[unreachable] + result.stdout.fnmatch_lines( + ["collected 1 item", "* 3 skipped, 1 passed in *"] + ) + class TestCapture: def create_file(self, pytester: pytest.Pytester) -> None: diff --git a/tox.ini b/tox.ini index a9a6ff8..f62010b 100644 --- a/tox.ini +++ b/tox.ini @@ -2,11 +2,6 @@ envlist = py38,py39,py310,py311,py312 [testenv] -passenv = - USER - USERNAME - TRAVIS - PYTEST_ADDOPTS deps = pytest-xdist>=3.3.0