diff --git a/database/models/reports.py b/database/models/reports.py index b3a96a28a..4eb7a97f4 100644 --- a/database/models/reports.py +++ b/database/models/reports.py @@ -289,6 +289,11 @@ def id(self): # for example: the same test being run on windows vs. mac flags_hash = Column(types.String(256), nullable=False) + framework = Column(types.String(100), nullable=True) + + computed_name = Column(types.Text, nullable=True) + filename = Column(types.Text, nullable=True) + __table_args__ = ( UniqueConstraint( "repoid", diff --git a/requirements.in b/requirements.in index 361fc8715..f0e25edf2 100644 --- a/requirements.in +++ b/requirements.in @@ -1,6 +1,6 @@ https://github.com/codecov/opentelem-python/archive/refs/tags/v0.0.4a1.tar.gz#egg=codecovopentelem https://github.com/codecov/shared/archive/106b0ae2b9a2870899fa3903fc6da0a9ba67eef2.tar.gz#egg=shared -https://github.com/codecov/test-results-parser/archive/1507de2241601d678e514c08b38426e48bb6d47d.tar.gz#egg=test-results-parser +https://github.com/codecov/test-results-parser/archive/ef39a0888acd62d02a316a852a15d755c74e78c6.tar.gz#egg=test-results-parser https://github.com/codecov/timestring/archive/d37ceacc5954dff3b5bd2f887936a98a668dda42.tar.gz#egg=timestring asgiref>=3.7.2 analytics-python==1.3.0b1 diff --git a/requirements.txt b/requirements.txt index b7dd741ea..cc8873a2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile +# pip-compile requirements.in # amqp==5.2.0 # via kombu @@ -391,7 +391,7 @@ statsd==3.3.0 # shared stripe==9.6.0 # via -r requirements.in -test-results-parser @ https://github.com/codecov/test-results-parser/archive/1507de2241601d678e514c08b38426e48bb6d47d.tar.gz +test-results-parser @ https://github.com/codecov/test-results-parser/archive/ef39a0888acd62d02a316a852a15d755c74e78c6.tar.gz # via -r requirements.in text-unidecode==1.3 # via faker diff --git a/services/test_results.py b/services/test_results.py index 6ea81a69f..73debe544 100644 --- a/services/test_results.py +++ b/services/test_results.py @@ -112,8 +112,7 @@ def generate_test_id(repoid, testsuite, name, flags_hash): @dataclass class TestResultsNotificationFailure: failure_message: str - testsuite: str - testname: str + display_name: str envs: List[str] test_id: str duration_seconds: float @@ -205,7 +204,7 @@ def generate_view_test_analytics_line(commit: Commit) -> str: def messagify_failure( failure: TestResultsNotificationFailure, ) -> str: - test_name = wrap_in_code(failure.testname.replace("\x1f", " ")) + test_name = wrap_in_code(failure.display_name.replace("\x1f", " ")) formatted_duration = display_duration(failure.duration_seconds) stack_trace_summary = f"Stack Traces | {formatted_duration}s run time" stack_trace = wrap_in_details( @@ -219,7 +218,7 @@ def messagify_flake( flaky_failure: TestResultsNotificationFailure, flake_info: FlakeInfo, ) -> str: - test_name = wrap_in_code(flaky_failure.testname.replace("\x1f", " ")) + test_name = wrap_in_code(flaky_failure.display_name.replace("\x1f", " ")) formatted_duration = display_duration(flaky_failure.duration_seconds) flake_rate = flake_info.failed / flake_info.count * 100 flake_rate_section = f"**Flake rate in main:** {flake_rate:.2f}% (Passed {flake_info.count - flake_info.failed} times, Failed {flake_info.failed} times)" @@ -256,7 +255,7 @@ def build_message(self) -> str: lambda x: x.test_id not in self.payload.flaky_tests, self.payload.failures, ), - key=lambda x: (x.duration_seconds, x.testname), + key=lambda x: (x.duration_seconds, x.display_name), ) if failures: diff --git a/services/tests/test_test_results.py b/services/tests/test_test_results.py index 6ae9be28a..93ef6ebb1 100644 --- a/services/tests/test_test_results.py +++ b/services/tests/test_test_results.py @@ -74,7 +74,6 @@ def test_generate_failure_info(): test_id = generate_test_id(1, "testsuite", "testname", flags_hash) fail = TestResultsNotificationFailure( "hello world", - "testsuite", "testname", [], test_id, @@ -100,7 +99,6 @@ def test_build_message(): test_id = generate_test_id(1, "testsuite", "testname", flags_hash) fail = TestResultsNotificationFailure( "hello world", - "testsuite", "testname", [], test_id, @@ -147,7 +145,6 @@ def test_build_message_with_flake(): test_id = generate_test_id(1, "testsuite", "testname", flags_hash) fail = TestResultsNotificationFailure( "hello world", - "testsuite", "testname", [], test_id, diff --git a/tasks/test_results_finisher.py b/tasks/test_results_finisher.py index 29705e4b7..c4dd3f38d 100644 --- a/tasks/test_results_finisher.py +++ b/tasks/test_results_finisher.py @@ -232,8 +232,9 @@ def process_impl_within_lock( failures.append( TestResultsNotificationFailure( - testsuite=test_instance.test.testsuite, - testname=test_instance.test.name, + display_name=test_instance.test.computed_name + if test_instance.test.computed_name is not None + else test_instance.test.name, failure_message=failure_message, test_id=test_instance.test_id, envs=flag_names, diff --git a/tasks/test_results_processor.py b/tasks/test_results_processor.py index 6eff167a2..62fa572ee 100644 --- a/tasks/test_results_processor.py +++ b/tasks/test_results_processor.py @@ -2,6 +2,7 @@ import json import logging import zlib +from dataclasses import dataclass from datetime import date, datetime from io import BytesIO from typing import List @@ -12,12 +13,12 @@ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.orm import Session from test_results_parser import ( + Framework, Outcome, ParserError, + ParsingInfo, Testrun, parse_junit_xml, - parse_pytest_reportlog, - parse_vitest_json, ) from app import celery_app @@ -76,9 +77,49 @@ def get_repo_flags( return repo_flag_mapping +@dataclass +class PytestName: + actual_class_name: str + test_file_path: str + + class TestResultsProcessorTask(BaseCodecovTask, name=test_results_processor_task_name): __test__ = False + def compute_name( + self, + framework: Framework, + raw_classname: str, + raw_name: str, + filename: str | None, + ) -> str: + match framework: + case Framework.Jest: + name = raw_name + return name + case Framework.Pytest: + split_name = raw_classname.split(".") + name_candidates: list[PytestName] = [] + for i in range(len(split_name)): + test_file_path = "/".join(split_name[: len(split_name) - i]) + ".py" + actual_class_name = "::".join(split_name[len(split_name) - i :]) + + name_candidates.append( + PytestName(actual_class_name, test_file_path) + ) + + for candidate in name_candidates: + if candidate.test_file_path == filename or ( + self.network is not None + and candidate.test_file_path in self.network + ): + return f"{candidate.test_file_path}::{candidate.actual_class_name}::{raw_name}" + case Framework.Vitest: + return f"{raw_classname} > {raw_name}" + case Framework.PHPUnit: + return f"{raw_classname}::{raw_name}" + return f"{raw_classname}\x1f{raw_name}" + def run_impl( self, db_session, @@ -149,11 +190,11 @@ def _bulk_write_tests_to_db( commitid: str, upload_id: int, branch: str, - parsed_testruns: List[Testrun], + parsing_results: List[ParsingInfo], flaky_test_set: set[str], flags: list[str], ): - test_data = [] + test_data = {} test_instance_data = [] test_flag_bridge_data = [] daily_totals = dict() @@ -163,24 +204,34 @@ def _bulk_write_tests_to_db( existing_tests: dict[str, Test] = get_existing_tests(db_session, repoid) - for testrun in parsed_testruns: - # Build up the data for bulk insert - name = testrun.name - testsuite = testrun.testsuite - outcome = str(testrun.outcome) - duration_seconds = testrun.duration - failure_message = testrun.failure_message - test_id = generate_test_id(repoid, testsuite, name, flags_hash) - - test_data.append( - dict( + for p in parsing_results: + framework = str(p.framework) + + for testrun in p.testruns: + # Build up the data for bulk insert + name: str = f"{testrun.classname}\x1f{testrun.name}" + testsuite: str = testrun.testsuite + outcome: str = str(testrun.outcome) + duration_seconds: float = testrun.duration + failure_message: str | None = testrun.failure_message + test_id: str = generate_test_id(repoid, testsuite, name, flags_hash) + + filename: str | None = testrun.filename + + computed_name: str = self.compute_name( + p.framework, testrun.classname, testrun.name, filename + ) + + test_data[(repoid, name, testsuite, flags_hash)] = dict( id=test_id, repoid=repoid, name=name, testsuite=testsuite, flags_hash=flags_hash, + framework=framework, + filename=filename, + computed_name=computed_name, ) - ) if test_id not in existing_tests: test_flag_bridge_data += [ @@ -188,87 +239,99 @@ def _bulk_write_tests_to_db( for flag in flags ] - test_instance_data.append( - dict( - test_id=test_id, - upload_id=upload_id, - duration_seconds=duration_seconds, - outcome=outcome, - failure_message=failure_message, - commitid=commitid, - branch=branch, - reduced_error_id=None, - repoid=repoid, + test_instance_data.append( + dict( + test_id=test_id, + upload_id=upload_id, + duration_seconds=duration_seconds, + outcome=outcome, + failure_message=failure_message, + commitid=commitid, + branch=branch, + reduced_error_id=None, + repoid=repoid, + ) ) - ) - - def update_daily_total(): - daily_totals[test_id]["last_duration_seconds"] = duration_seconds - - # logic below is a little complicated but we're basically doing: - # (old_avg * num of values used to compute old avg) + new value - # ------------------------------------------------------------- - # num of values used to compute old avg + 1 - daily_totals[test_id]["avg_duration_seconds"] = ( - daily_totals[test_id]["avg_duration_seconds"] - * ( + def update_daily_total(): + daily_totals[test_id]["last_duration_seconds"] = duration_seconds + + # logic below is a little complicated but we're basically doing: + + # (old_avg * num of values used to compute old avg) + new value + # ------------------------------------------------------------- + # num of values used to compute old avg + 1 + daily_totals[test_id]["avg_duration_seconds"] = ( + daily_totals[test_id]["avg_duration_seconds"] + * ( + daily_totals[test_id]["pass_count"] + + daily_totals[test_id]["fail_count"] + ) + + duration_seconds + ) / ( daily_totals[test_id]["pass_count"] + daily_totals[test_id]["fail_count"] + + 1 ) - + duration_seconds - ) / ( - daily_totals[test_id]["pass_count"] - + daily_totals[test_id]["fail_count"] - + 1 - ) - if outcome == str(Outcome.Pass): - daily_totals[test_id]["pass_count"] += 1 - elif outcome == str(Outcome.Failure) or outcome == str(Outcome.Error): - daily_totals[test_id]["fail_count"] += 1 - elif outcome == str(Outcome.Skip): - daily_totals[test_id]["skip_count"] += 1 - - def create_daily_total(): - daily_totals[test_id] = { - "test_id": test_id, - "repoid": repoid, - "last_duration_seconds": duration_seconds, - "avg_duration_seconds": duration_seconds, - "pass_count": 1 if outcome == str(Outcome.Pass) else 0, - "fail_count": 1 - if outcome == str(Outcome.Failure) or outcome == str(Outcome.Error) - else 0, - "skip_count": 1 if outcome == str(Outcome.Skip) else 0, - "flaky_fail_count": 1 - if test_id in flaky_test_set - and ( - outcome == str(Outcome.Failure) or outcome == str(Outcome.Error) - ) - else 0, - "branch": branch, - "date": date.today(), - "latest_run": datetime.now(), - "commits_where_fail": [commitid] - if ( - outcome == str(Outcome.Failure) or outcome == str(Outcome.Error) - ) - else [], - } - - if outcome != str(Outcome.Skip): - if test_id in daily_totals: - update_daily_total() - else: - create_daily_total() - - # Save Tests - insert_on_conflict_do_nothing = ( - insert(Test.__table__).values(test_data).on_conflict_do_nothing() - ) - db_session.execute(insert_on_conflict_do_nothing) - db_session.flush() + if outcome == str(Outcome.Pass): + daily_totals[test_id]["pass_count"] += 1 + elif outcome == str(Outcome.Failure) or outcome == str( + Outcome.Error + ): + daily_totals[test_id]["fail_count"] += 1 + elif outcome == str(Outcome.Skip): + daily_totals[test_id]["skip_count"] += 1 + + def create_daily_total(): + daily_totals[test_id] = { + "test_id": test_id, + "repoid": repoid, + "last_duration_seconds": duration_seconds, + "avg_duration_seconds": duration_seconds, + "pass_count": 1 if outcome == str(Outcome.Pass) else 0, + "fail_count": 1 + if outcome == str(Outcome.Failure) + or outcome == str(Outcome.Error) + else 0, + "skip_count": 1 if outcome == str(Outcome.Skip) else 0, + "flaky_fail_count": 1 + if test_id in flaky_test_set + and ( + outcome == str(Outcome.Failure) + or outcome == str(Outcome.Error) + ) + else 0, + "branch": branch, + "date": date.today(), + "latest_run": datetime.now(), + "commits_where_fail": [commitid] + if ( + outcome == str(Outcome.Failure) + or outcome == str(Outcome.Error) + ) + else [], + } + + if outcome != str(Outcome.Skip): + if test_id in daily_totals: + update_daily_total() + else: + create_daily_total() + + # Upsert Tests + if len(test_data) > 0: + test_insert = insert(Test.__table__).values(list(test_data.values())) + insert_on_conflict_do_update = test_insert.on_conflict_do_update( + index_elements=["repoid", "name", "testsuite", "flags_hash"], + set_={ + "framework": test_insert.excluded.framework, + "computed_name": test_insert.excluded.computed_name, + "filename": test_insert.excluded.filename, + }, + ) + db_session.execute(insert_on_conflict_do_update) + db_session.flush() if len(test_flag_bridge_data): insert_on_conflict_do_nothing_flags = ( @@ -280,53 +343,55 @@ def create_daily_total(): db_session.flush() # Upsert Daily Test Totals - rollup_table = DailyTestRollup.__table__ - stmt = insert(rollup_table).values(list(daily_totals.values())) - stmt = stmt.on_conflict_do_update( - index_elements=[ - "repoid", - "branch", - "test_id", - "date", - ], - set_={ - "last_duration_seconds": stmt.excluded.last_duration_seconds, - "avg_duration_seconds": ( - rollup_table.c.avg_duration_seconds - * (rollup_table.c.pass_count + rollup_table.c.fail_count) - + stmt.excluded.avg_duration_seconds - ) - / (rollup_table.c.pass_count + rollup_table.c.fail_count + 1), - "latest_run": stmt.excluded.latest_run, - "pass_count": rollup_table.c.pass_count + stmt.excluded.pass_count, - "skip_count": rollup_table.c.skip_count + stmt.excluded.skip_count, - "fail_count": rollup_table.c.fail_count + stmt.excluded.fail_count, - "flaky_fail_count": rollup_table.c.flaky_fail_count - + stmt.excluded.flaky_fail_count, - "commits_where_fail": rollup_table.c.commits_where_fail - + stmt.excluded.commits_where_fail, - }, - ) + if len(daily_totals) > 0: + rollup_table = DailyTestRollup.__table__ + stmt = insert(rollup_table).values(list(daily_totals.values())) + stmt = stmt.on_conflict_do_update( + index_elements=[ + "repoid", + "branch", + "test_id", + "date", + ], + set_={ + "last_duration_seconds": stmt.excluded.last_duration_seconds, + "avg_duration_seconds": ( + rollup_table.c.avg_duration_seconds + * (rollup_table.c.pass_count + rollup_table.c.fail_count) + + stmt.excluded.avg_duration_seconds + ) + / (rollup_table.c.pass_count + rollup_table.c.fail_count + 1), + "latest_run": stmt.excluded.latest_run, + "pass_count": rollup_table.c.pass_count + stmt.excluded.pass_count, + "skip_count": rollup_table.c.skip_count + stmt.excluded.skip_count, + "fail_count": rollup_table.c.fail_count + stmt.excluded.fail_count, + "flaky_fail_count": rollup_table.c.flaky_fail_count + + stmt.excluded.flaky_fail_count, + "commits_where_fail": rollup_table.c.commits_where_fail + + stmt.excluded.commits_where_fail, + }, + ) - db_session.execute(stmt) - db_session.flush() + db_session.execute(stmt) + db_session.flush() # Save TestInstances - insert_test_instances = insert(TestInstance.__table__).values( - test_instance_data - ) - db_session.execute(insert_test_instances) - db_session.flush() + if len(test_instance_data) > 0: + insert_test_instances = insert(TestInstance.__table__).values( + test_instance_data + ) + db_session.execute(insert_test_instances) + db_session.flush() def process_individual_upload( self, db_session, repoid, commitid, upload_obj: Upload, flaky_test_set: set[str] ): upload_id = upload_obj.id with metrics.timer("test_results.processor.process_individual_arg"): - parsed_testruns: List[Testrun] = self.process_individual_arg( + parsing_results: list[ParsingInfo] = self.process_individual_arg( upload_obj, upload_obj.report.commit.repository ) - if not parsed_testruns: + if all([len(result.testruns) == 0 for result in parsing_results]): log.error( "No test result files were successfully parsed for this upload", extra=dict( @@ -346,7 +411,7 @@ def process_individual_upload( commitid, upload_id, branch, - parsed_testruns, + parsing_results, flaky_test_set, upload_obj.flag_names, ) @@ -361,14 +426,16 @@ def process_individual_arg(self, upload: Upload, repository) -> List[Testrun]: payload_bytes = archive_service.read_file(upload.storage_path) data = json.loads(payload_bytes) - testrun_list = [] + parsing_results: list[ParsingInfo] = [] + + # TODO: this is bad + self.network = data.get("network_files") for file_dict in data["test_results_files"]: - filename = file_dict["filename"] file = file_dict["data"] file_bytes = BytesIO(zlib.decompress(base64.b64decode(file))) try: - testrun_list += self.parse_single_file(filename, file_bytes) + parsing_results.append(self.parse_single_file(file_bytes)) except ParserFailureError as exc: log.error( exc.err_msg, @@ -377,35 +444,23 @@ def process_individual_arg(self, upload: Upload, repository) -> List[Testrun]: commitid=upload.report.commit_id, uploadid=upload.id, file_content=exc.file_content, - parser=exc.parser, parser_err_msg=exc.parser_err_msg, ), ) - return testrun_list + return parsing_results def parse_single_file( self, - filename: str, file_bytes: BytesIO, ): - try: - with metrics.timer("test_results.processor.parser_matching"): - parser, parsing_function = self.match_report(filename, file_bytes) - except ParserNotSupportedError as e: - metrics.incr( - "test_results.processor.parsing.failure.match_report_failure", - ) - raise ParserFailureError( - err_msg="File did not match any parser format", - file_content=file_bytes.read().decode()[:300], - ) from e - try: file_content = file_bytes.read() with metrics.timer("test_results.processor.file_parsing"): - res = parsing_function(file_content) + res = parse_junit_xml(file_content) + print("BBBBBBBBBBBBBBBBBBBBBBBBBBBBB") except ParserError as e: + print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAA") # aware of cardinality issues with using a variable here in the reason field but # parser is defined by us and limited to the amount of different parsers we will # write, so I don't expect this to be a problem for us @@ -415,7 +470,6 @@ def parse_single_file( raise ParserFailureError( err_msg="Error parsing file", file_content=file_content.decode()[:300], - parser=parser, parser_err_msg=str(e), ) from e metrics.incr( @@ -424,29 +478,6 @@ def parse_single_file( return res - def match_report(self, filename: str, file_bytes: BytesIO): - first_line = file_bytes.readline() - second_line = file_bytes.readline() - file_bytes.seek(0) - first_line = self.remove_space_from_line(first_line) - second_line = self.remove_space_from_line(second_line) - first_two_lines = first_line + second_line - - parser = "no parser" - if filename.endswith(".xml") or first_two_lines.startswith(b"View the top 3 failed tests by shortest run time + +> +> ``` +> hello_test_name1 +> ``` +> +>
Stack Traces | 2s run time +> +> > ````````` +> > Shared +> > +> > +> > +> >
 ````````
+> >  
+> > 
+> >  | test | test | test 
failure message +> > ````````` +> > [View](https://example.com/build_url_1) the CI Build +> +>
+ + +> +> ``` +> hello_Other Class Name test_name2 +> ``` +> +>
Stack Traces | 3s run time +> +> > ````````` +> > Shared +> > +> > +> > +> >
 
+> >   ````````  
+> >  
+> > 
+> >  | test | test | test 
failure message +> > ````````` +> > [View](https://example.com/build_url_2) the CI Build +> +>
+ + +> +> ``` +> hello_Class Name test_name0 +> ``` +> +>
Stack Traces | 4s run time +> +> > +> > ``` +> >
Fourth 
+> > 
+> > 
| test | instance | +> > ``` +> > +> > [View](https://example.com/build_url_3) the CI Build +> +>
+ + + +To view individual test run time comparison to the main branch, go to the [Test Analytics Dashboard](https://app.codecov.io/gh/test-username/test-repo-name/tests/main)""", + ) diff --git a/tasks/tests/unit/test_test_results_processor_task.py b/tasks/tests/unit/test_test_results_processor_task.py index 57990f048..a87886dda 100644 --- a/tasks/tests/unit/test_test_results_processor_task.py +++ b/tasks/tests/unit/test_test_results_processor_task.py @@ -13,7 +13,6 @@ from services.test_results import generate_test_id from tasks.test_results_processor import ( ParserError, - ParserNotSupportedError, TestResultsProcessorTask, ) @@ -93,192 +92,6 @@ def test_upload_processor_task_call( assert expected_result == result assert commit.message == "hello world" - @pytest.mark.integration - def test_upload_processor_task_call_pytest_reportlog( - self, - mocker, - mock_configuration, - dbsession, - codecov_vcr, - mock_storage, - mock_redis, - celery_app, - ): - url = "v4/raw/2019-05-22/C3C4715CA57C910D11D5EB899FC86A7E/4c4e4654ac25037ae869caeb3619d485970b6304/a84d445c-9c1e-434f-8275-f18f1f320f81.txt" - with open(here.parent.parent / "samples" / "sample_pytest_reportlog.txt") as f: - content = f.read() - mock_storage.write_file("archive", url, content) - upload = UploadFactory.create(storage_path=url) - dbsession.add(upload) - dbsession.flush() - redis_queue = [{"url": url, "upload_pk": upload.id_}] - mocker.patch.object(TestResultsProcessorTask, "app", celery_app) - - commit = CommitFactory.create( - message="hello world", - commitid="cd76b0821854a780b60012aed85af0a8263004ad", - repository__owner__unencrypted_oauth_token="test7lk5ndmtqzxlx06rip65nac9c7epqopclnoy", - repository__owner__username="joseph-sentry", - repository__owner__service="github", - repository__name="codecov-demo", - ) - dbsession.add(commit) - dbsession.flush() - current_report_row = CommitReport(commit_id=commit.id_) - dbsession.add(current_report_row) - dbsession.flush() - result = TestResultsProcessorTask().run_impl( - dbsession, - repoid=commit.repoid, - commitid=commit.commitid, - commit_yaml={"codecov": {"max_report_age": False}}, - arguments_list=redis_queue, - ) - expected_result = [ - { - "successful": True, - } - ] - - tests = dbsession.query(Test).all() - test_instances = dbsession.query(TestInstance).all() - failures = ( - dbsession.query(TestInstance).filter_by(outcome=str(Outcome.Failure)).all() - ) - - assert len(tests) == 2 - assert len(test_instances) == 2 - assert len(failures) == 0 - - assert ( - tests[0].flags_hash - == "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - ) - assert test_instances[0].test.id == tests[0].id - assert test_instances[0].commitid == commit.commitid - assert test_instances[0].branch == commit.branch - assert test_instances[0].repoid == commit.repoid - assert expected_result == result - assert commit.message == "hello world" - - @pytest.mark.integration - def test_upload_processor_task_call_vitest( - self, - mocker, - mock_configuration, - dbsession, - codecov_vcr, - mock_storage, - mock_redis, - celery_app, - ): - url = "v4/raw/2019-05-22/C3C4715CA57C910D11D5EB899FC86A7E/4c4e4654ac25037ae869caeb3619d485970b6304/a84d445c-9c1e-434f-8275-f18f1f320f81.txt" - with open(here.parent.parent / "samples" / "sample_vitest.txt") as f: - content = f.read() - mock_storage.write_file("archive", url, content) - upload = UploadFactory.create(storage_path=url) - dbsession.add(upload) - dbsession.flush() - redis_queue = [{"url": url, "upload_pk": upload.id_}] - mocker.patch.object(TestResultsProcessorTask, "app", celery_app) - - commit = CommitFactory.create( - message="hello world", - commitid="cd76b0821854a780b60012aed85af0a8263004ad", - repository__owner__unencrypted_oauth_token="test7lk5ndmtqzxlx06rip65nac9c7epqopclnoy", - repository__owner__username="joseph-sentry", - repository__owner__service="github", - repository__name="codecov-demo", - ) - dbsession.add(commit) - dbsession.flush() - current_report_row = CommitReport(commit_id=commit.id_) - dbsession.add(current_report_row) - dbsession.flush() - result = TestResultsProcessorTask().run_impl( - dbsession, - repoid=commit.repoid, - commitid=commit.commitid, - commit_yaml={"codecov": {"max_report_age": False}}, - arguments_list=redis_queue, - ) - expected_result = [ - { - "successful": True, - } - ] - - tests = dbsession.query(Test).all() - test_instances = dbsession.query(TestInstance).all() - failures = ( - dbsession.query(TestInstance).filter_by(outcome=str(Outcome.Failure)).all() - ) - - assert len(tests) == 1 - assert len(test_instances) == 4 - assert len(failures) == 4 - - assert ( - tests[0].flags_hash - == "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - ) - assert test_instances[0].test.id == tests[0].id - - assert expected_result == result - assert commit.message == "hello world" - - @pytest.mark.integration - def test_test_result_processor_task_error_report_matching( - self, - caplog, - mocker, - mock_configuration, - dbsession, - codecov_vcr, - mock_storage, - mock_redis, - celery_app, - ): - url = "v4/raw/2019-05-22/C3C4715CA57C910D11D5EB899FC86A7E/4c4e4654ac25037ae869caeb3619d485970b6304/a84d445c-9c1e-434f-8275-f18f1f320f81.txt" - with open(here.parent.parent / "samples" / "sample_vitest.txt") as f: - content = f.read() - mock_storage.write_file("archive", url, content) - upload = UploadFactory.create(storage_path=url) - dbsession.add(upload) - dbsession.flush() - redis_queue = [{"url": url, "upload_pk": upload.id_}] - mocker.patch.object(TestResultsProcessorTask, "app", celery_app) - mocker.patch.object( - TestResultsProcessorTask, - "match_report", - side_effect=ParserNotSupportedError(), - ) - - commit = CommitFactory.create( - message="hello world", - commitid="cd76b0821854a780b60012aed85af0a8263004ad", - repository__owner__unencrypted_oauth_token="test7lk5ndmtqzxlx06rip65nac9c7epqopclnoy", - repository__owner__username="joseph-sentry", - repository__owner__service="github", - repository__name="codecov-demo", - ) - - dbsession.add(commit) - dbsession.flush() - current_report_row = CommitReport(commit_id=commit.id_) - dbsession.add(current_report_row) - dbsession.flush() - - result = TestResultsProcessorTask().run_impl( - dbsession, - repoid=commit.repoid, - commitid=commit.commitid, - commit_yaml={"codecov": {"max_report_age": False}}, - arguments_list=redis_queue, - ) - print(caplog.text) - assert "File did not match any parser format" in caplog.text - @pytest.mark.integration def test_test_result_processor_task_error_parsing_file( self, @@ -292,7 +105,7 @@ def test_test_result_processor_task_error_parsing_file( celery_app, ): url = "v4/raw/2019-05-22/C3C4715CA57C910D11D5EB899FC86A7E/4c4e4654ac25037ae869caeb3619d485970b6304/a84d445c-9c1e-434f-8275-f18f1f320f81.txt" - with open(here.parent.parent / "samples" / "sample_vitest.txt") as f: + with open(here.parent.parent / "samples" / "sample_test.txt") as f: content = f.read() mock_storage.write_file("archive", url, content) upload = UploadFactory.create(storage_path=url) @@ -300,10 +113,9 @@ def test_test_result_processor_task_error_parsing_file( dbsession.flush() redis_queue = [{"url": url, "upload_pk": upload.id_}] mocker.patch.object(TestResultsProcessorTask, "app", celery_app) - mocker.patch.object( - TestResultsProcessorTask, - "match_report", - return_value=("test_parser", mocker.MagicMock(side_effect=ParserError)), + mocker.patch( + "tasks.test_results_processor.parse_junit_xml", + side_effect=ParserError, ) commit = CommitFactory.create( @@ -344,7 +156,7 @@ def test_test_result_processor_task_delete_archive( celery_app, ): url = "v4/raw/2019-05-22/C3C4715CA57C910D11D5EB899FC86A7E/4c4e4654ac25037ae869caeb3619d485970b6304/a84d445c-9c1e-434f-8275-f18f1f320f81.txt" - with open(here.parent.parent / "samples" / "sample_vitest.txt") as f: + with open(here.parent.parent / "samples" / "sample_test.txt") as f: content = f.read() mock_storage.write_file("archive", url, content) upload = UploadFactory.create(storage_path=url) @@ -391,9 +203,9 @@ def test_test_result_processor_task_delete_archive( assert result == expected_result - assert len(tests) == 1 + assert len(tests) == 4 assert len(test_instances) == 4 - assert len(failures) == 4 + assert len(failures) == 1 assert ( tests[0].flags_hash @@ -452,7 +264,10 @@ def test_test_result_processor_task_bad_file( expected_result = [{"successful": False}] assert expected_result == result - assert "File did not match any parser format" in caplog.text + assert ( + "No test result files were successfully parsed for this upload" + in caplog.text + ) @pytest.mark.integration def test_upload_processor_task_call_existing_test( @@ -801,3 +616,87 @@ def test_upload_processor_task_call_daily_test_totals( assert [r.avg_duration_seconds for r in rollups] == [0.001, 7.2, 0.002, 3.6] assert [r.last_duration_seconds for r in rollups] == [0.001, 7.2, 0.002, 3.6] traveller.stop() + + @pytest.mark.integration + @pytest.mark.parametrize( + "source_file_name", + ["sample_test_network.txt", "sample_test_missing_network.txt"], + ) + def test_upload_processor_task_call_network( + self, + mocker, + mock_configuration, + dbsession, + codecov_vcr, + mock_storage, + mock_redis, + celery_app, + source_file_name, + ): + tests = dbsession.query(Test).all() + test_instances = dbsession.query(TestInstance).all() + assert len(tests) == 0 + assert len(test_instances) == 0 + + url = "v4/raw/2019-05-22/C3C4715CA57C910D11D5EB899FC86A7E/4c4e4654ac25037ae869caeb3619d485970b6304/a84d445c-9c1e-434f-8275-f18f1f320f81.txt" + with open(here.parent.parent / "samples" / source_file_name) as f: + content = f.read() + mock_storage.write_file("archive", url, content) + upload = UploadFactory.create(storage_path=url) + dbsession.add(upload) + dbsession.flush() + redis_queue = [{"url": url, "upload_pk": upload.id_}] + mocker.patch.object(TestResultsProcessorTask, "app", celery_app) + + commit = CommitFactory.create( + message="hello world", + commitid="cd76b0821854a780b60012aed85af0a8263004ad", + repository__owner__unencrypted_oauth_token="test7lk5ndmtqzxlx06rip65nac9c7epqopclnoy", + repository__owner__username="joseph-sentry", + repository__owner__service="github", + repository__name="codecov-demo", + ) + dbsession.add(commit) + dbsession.flush() + current_report_row = CommitReport(commit_id=commit.id_) + dbsession.add(current_report_row) + dbsession.flush() + result = TestResultsProcessorTask().run_impl( + dbsession, + repoid=upload.report.commit.repoid, + commitid=commit.commitid, + commit_yaml={"codecov": {"max_report_age": False}}, + arguments_list=redis_queue, + ) + expected_result = [ + { + "successful": True, + } + ] + tests = dbsession.query(Test).all() + test_instances = dbsession.query(TestInstance).all() + failures = ( + dbsession.query(TestInstance).filter_by(outcome=str(Outcome.Failure)).all() + ) + + assert len(tests) == 4 + assert len(test_instances) == 4 + assert len(failures) == 1 + + for test in tests: + assert test.framework == "Pytest" + print(test.computed_name) + assert test.computed_name.startswith( + "api/temp/calculator/test_calculator.py::" + ) + + assert ( + failures[0].failure_message.replace(" ", "").replace("\n", "") + == """deftest_divide():>assertCalculator.divide(1,2)==0.5Eassert1.0==0.5E+where1.0=<functionCalculator.divideat0x104c9eb90>(1,2)E+where<functionCalculator.divideat0x104c9eb90>=Calculator.divideapi/temp/calculator/test_calculator.py:30:AssertionError""" + ) + assert ( + failures[0].test.name + == "api.temp.calculator.test_calculator\x1ftest_divide" + ) + assert expected_result == result + assert commit.message == "hello world"