Skip to content

Commit

Permalink
Merge pull request #65 from DavideCanton/fix/64
Browse files Browse the repository at this point in the history
Report correct outcome in the replay file (#64)
  • Loading branch information
prusse-martin authored Aug 28, 2024
2 parents 5761e53 + 9ed915a commit 0ae6bc8
Show file tree
Hide file tree
Showing 2 changed files with 151 additions and 11 deletions.
29 changes: 18 additions & 11 deletions src/pytest_replay/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ def __init__(self, config):
skip_cleanup = config.getoption("skip_cleanup", False)
if not skip_cleanup:
self.cleanup_scripts()
self.node_start_time = dict()
self.node_start_time = {}
self.node_outcome = {}
self.session_start_time = config.replay_start_time

def cleanup_scripts(self):
Expand Down Expand Up @@ -88,16 +89,22 @@ def pytest_runtest_logstart(self, nodeid):
def pytest_runtest_makereport(self, item):
report = yield
result = report.get_result()
if self.dir and result.when == "teardown":
json_content = json.dumps(
{
"nodeid": item.nodeid,
"start": self.node_start_time[item.nodeid],
"finish": time.perf_counter() - self.session_start_time,
"outcome": result.outcome,
}
)
self.append_test_to_script(item.nodeid, json_content)
if self.dir:
current = self.node_outcome.setdefault(item.nodeid, result.outcome)
if not result.passed and current != "failed":
# do not overwrite a failed outcome with a skipped one
self.node_outcome[item.nodeid] = result.outcome

if result.when == "teardown":
json_content = json.dumps(
{
"nodeid": item.nodeid,
"start": self.node_start_time[item.nodeid],
"finish": time.perf_counter() - self.session_start_time,
"outcome": self.node_outcome.pop(item.nodeid),
}
)
self.append_test_to_script(item.nodeid, json_content)

def pytest_collection_modifyitems(self, items, config):
replay_file = config.getoption("replay_file")
Expand Down
133 changes: 133 additions & 0 deletions tests/test_replay.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import itertools as it
import json
import re

import pytest

Expand Down Expand Up @@ -270,3 +271,135 @@ def test_filter_out_tests_not_in_file(testdir):
],
consecutive=True,
)


def test_replay_file_outcome_is_correct(testdir):
"""Tests that the outcomes in the replay file are correct."""
testdir.makepyfile(
test_module="""
import pytest
def test_success():
pass
def test_failure():
assert False
@pytest.fixture
def failing_teardown_fixture():
yield
assert False
def test_failure_fixture_teardown(failing_teardown_fixture):
assert True
@pytest.fixture
def failing_setup_fixture():
assert False
def test_failure_fixture_setup(failing_setup_fixture):
assert True
"""
)
dir = testdir.tmpdir / "replay"
result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}")
assert result.ret != 0

contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()]
outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r}
assert outcomes == {
"test_module.py::test_success": "passed",
"test_module.py::test_failure": "failed",
"test_module.py::test_failure_fixture_teardown": "failed",
"test_module.py::test_failure_fixture_setup": "failed",
}


def test_replay_file_outcome_is_correct_xdist(testdir):
"""Tests that the outcomes in the replay file are correct when running in parallel."""
testdir.makepyfile(
test_module="""
import pytest
@pytest.mark.parametrize('i', range(10))
def test_val(i):
assert i < 5
"""
)
dir = testdir.tmpdir / "replay"
procs = 2
result = testdir.runpytest_subprocess(f"--replay-record-dir={dir}", f"-n {procs}")
assert result.ret != 0

contents = [
s
for n in range(procs)
for s in (dir / f".pytest-replay-gw{n}.txt").read().splitlines()
]
pattern = re.compile(r"test_val\[(\d+)\]")
for content in contents:
parsed = json.loads(content)
if "outcome" not in parsed:
continue

i = int(pattern.search(parsed["nodeid"]).group(1))
if i < 5:
assert parsed["outcome"] == "passed", i
else:
assert parsed["outcome"] == "failed", i


def test_outcomes_in_replay_file(testdir):
"""Tests that checks how the outcomes are handled in the report hook when the various
phases yield failure or skipped."""
testdir.makepyfile(
test_module="""
import pytest
@pytest.fixture()
def skip_setup():
pytest.skip("skipping")
yield
@pytest.fixture()
def skip_teardown():
yield
pytest.skip("skipping")
@pytest.fixture()
def fail_setup():
assert False
@pytest.fixture()
def fail_teardown():
yield
assert False
def test_skip_fail(skip_setup, fail_teardown):
pass
def test_fail_skip(fail_setup, skip_teardown):
pass
def test_skip_setup(skip_setup):
pass
def test_skip_teardown(skip_teardown):
pass
def test_test_fail_skip_teardown(skip_teardown):
assert False
"""
)
dir = testdir.tmpdir / "replay"
testdir.runpytest_subprocess(f"--replay-record-dir={dir}")

contents = [json.loads(s) for s in (dir / ".pytest-replay.txt").read().splitlines()]
outcomes = {r["nodeid"]: r["outcome"] for r in contents if "outcome" in r}
assert outcomes == {
"test_module.py::test_skip_fail": "skipped",
"test_module.py::test_fail_skip": "failed",
"test_module.py::test_skip_setup": "skipped",
"test_module.py::test_skip_teardown": "skipped",
"test_module.py::test_test_fail_skip_teardown": "failed",
}

0 comments on commit 0ae6bc8

Please sign in to comment.