From e3d4efa0b649b641440eab9a17b3c036f99c60a5 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 18 Jul 2024 11:55:40 +0300 Subject: [PATCH] Add support of reporting subtest when one uses `pytest-subtests` plugin, this plugin was able to report only the last report from the test, which is passing always (if no setup/teardown issues, or failure outside of the context of subtests), and the subtest failure were lost and not counted/reported. this change is fixing this, and collects all of the reports during a test and report them each on it's own. this introduce a new field to the output of a test report, `subtest` if it's a report from subtest, it would be the message of that subtest. Fixes: #28 --- pytest_elk_reporter.py | 30 +++++++++++--------- requirements-dev.txt | 1 + tests/test_elk_reporter.py | 56 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 71 insertions(+), 16 deletions(-) diff --git a/pytest_elk_reporter.py b/pytest_elk_reporter.py index f1914ba..9c9f705 100644 --- a/pytest_elk_reporter.py +++ b/pytest_elk_reporter.py @@ -200,7 +200,7 @@ def __init__(self, config): self.session_data["username"] = get_username() self.session_data["hostname"] = socket.gethostname() self.test_data = defaultdict(dict) - self.reports = {} + self.reports = defaultdict(list) self.config = config self.is_slave = False @@ -225,13 +225,13 @@ def cache_report(self, report_item, outcome): nodeid = getattr(report_item, "nodeid", report_item) # local hack to handle xdist report order slavenode = getattr(report_item, "node", None) - self.reports[nodeid, slavenode] = (report_item, outcome) + self.reports[nodeid, slavenode].append((report_item, outcome)) - def get_report(self, report_item): + def get_reports(self, report_item): nodeid = getattr(report_item, "nodeid", report_item) # local hack to handle xdist report order slavenode = getattr(report_item, "node", None) - return self.reports.get((nodeid, slavenode), None) + return self.reports.get((nodeid, slavenode), []) @staticmethod def get_failure_messge(item_report): @@ -281,15 +281,16 @@ def pytest_runtest_logreport(self, report): if report.when == "teardown": # in xdist, report only on worker nodes if self.get_worker_id() != "master": - old_report = self.get_report(report) - if report.passed and old_report: - self.report_test(old_report[0], old_report[1]) - if report.failed and old_report: - self.report_test( - report, old_report[1] + " & error", old_report=old_report[0] - ) - if report.skipped: - self.report_test(report, "skipped") + old_reports = self.get_reports(report) + for old_report in old_reports: + if report.passed and old_report: + self.report_test(old_report[0], old_report[1]) + if report.failed and old_report: + self.report_test( + report, old_report[1] + " & error", old_report=old_report[0] + ) + if report.skipped: + self.report_test(report, "skipped") def report_test(self, item_report, outcome, old_report=None): self.stats[outcome] += 1 @@ -302,6 +303,9 @@ def report_test(self, item_report, outcome, old_report=None): markers=item_report.keywords, **self.session_data, ) + context = getattr(item_report, "context", None) + if context: + test_data.update(subtest=context.msg) test_data.update(self.test_data[item_report.nodeid]) del self.test_data[item_report.nodeid] diff --git a/requirements-dev.txt b/requirements-dev.txt index 319eacf..4704219 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,4 +7,5 @@ pytest-cov tox wheel>=0.33.0 pytest-xdist +pytest-subtests six diff --git a/tests/test_elk_reporter.py b/tests/test_elk_reporter.py index 46f6386..496d575 100644 --- a/tests/test_elk_reporter.py +++ b/tests/test_elk_reporter.py @@ -67,6 +67,12 @@ def fin(): def test_skip_in_teardown(skip_in_teardown): pass + def test_failing_subtests(subtests): + with subtests.test("failed subtest"): + raise Exception("should fail") + with subtests.test("succcess subtest"): + pass + """ ) @@ -93,12 +99,12 @@ def test_skip_in_teardown(skip_in_teardown): last_report = json.loads(requests_mock.request_history[-1].text) assert last_report["stats"] == { "error": 1, - "failure": 2, + "failure": 3, "failure & error": 1, - "passed": 0, + "passed": 2, "skipped & error": 1, "passed & error": 1, - "skipped": 3, + "skipped": 4, "xfailed": 1, "xpass": 1, "error & error": 0, @@ -531,3 +537,47 @@ def test_1(elk_reporter): assert ( not requests_mock.called ), "Requests are not made to Elasticsearch when es_post_reports is False" + + +def test_subtests(testdir, requests_mock): # pylint: disable=redefined-outer-name + """Make sure subtests are identified and reported.""" + + # create a temporary pytest test module + testdir.makepyfile( + """ + import pytest + + def test_failing_subtests(subtests): + with subtests.test("failed subtest"): + raise Exception("should fail") + with subtests.test("success subtest"): + pass + + """ + ) + # run pytest with the following cmd args + result = testdir.runpytest("--es-address=127.0.0.1:9200", "-v") + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines(["*::test_failing_subtests * SUBFAIL*"]) + result.stdout.fnmatch_lines(["*::test_failing_subtests * SUBPASS*"]) + result.stdout.fnmatch_lines(["*::test_failing_subtests PASSED*"]) + + # make sure that that we get a '1' exit code for the testsuite + assert result.ret == 1 + + # validate each subtest is being reported on its own + report = json.loads(requests_mock.request_history[-2].text) + assert report["name"] == "test_subtests.py::test_failing_subtests" + assert "subtest" not in report + assert report["outcome"] == "passed" + + report = json.loads(requests_mock.request_history[-3].text) + assert report["name"] == "test_subtests.py::test_failing_subtests" + assert report["subtest"] == "success subtest" + assert report["outcome"] == "passed" + + report = json.loads(requests_mock.request_history[-4].text) + assert report["name"] == "test_subtests.py::test_failing_subtests" + assert report["subtest"] == "failed subtest" + assert report["outcome"] == "failure"