diff --git a/services/report/__init__.py b/services/report/__init__.py index 1224cb567..1f068b616 100644 --- a/services/report/__init__.py +++ b/services/report/__init__.py @@ -967,10 +967,9 @@ def build_report_from_raw_content( build = upload.build_code job = upload.job_code name = upload.name - url = upload.storage_path + archive_url = upload.storage_path reportid = upload.external_id - archive_url = url session = Session( provider=service, build=build, @@ -978,7 +977,7 @@ def build_report_from_raw_content( name=name, time=int(time()), flags=flags, - archive=archive_url or url, + archive=archive_url, url=build_url, ) try: diff --git a/tasks/tests/unit/test_upload_processing_task.py b/tasks/tests/unit/test_upload_processing_task.py index 7c11fcfa6..0caa946bf 100644 --- a/tasks/tests/unit/test_upload_processing_task.py +++ b/tasks/tests/unit/test_upload_processing_task.py @@ -7,6 +7,7 @@ from shared.config import get_config from shared.reports.enums import UploadState from shared.reports.resources import Report, ReportFile, ReportLine, ReportTotals +from shared.storage.exceptions import FileNotInStorageError from shared.torngit.exceptions import TorngitObjectNotFoundError from database.models import CommitReport, ReportDetails @@ -36,16 +37,6 @@ def test_default_acks_late() -> None: class TestUploadProcessorTask(object): - def test_schedule_for_later_try(self, mocker): - mock_retry = mocker.patch.object( - UploadProcessorTask, "retry", side_effect=Retry() - ) - task = UploadProcessorTask() - task.request.retries = 2 - with pytest.raises(Retry): - task.schedule_for_later_try() - mock_retry.assert_called_with(countdown=180, max_retries=5) - @pytest.mark.integration @pytest.mark.django_db(databases={"default"}) def test_upload_processor_task_call( @@ -740,44 +731,22 @@ def test_upload_task_process_individual_report_with_notfound_report( assert upload.state == "error" def test_upload_task_process_individual_report_with_notfound_report_no_retries_yet( - self, - mocker, - mock_configuration, - dbsession, - mock_repo_provider, - mock_storage, - mock_redis, + self, mocker ): - mocked_1 = mocker.patch.object(ArchiveService, "read_chunks") - mocked_1.return_value = None - mock_schedule_for_later_try = mocker.patch.object( - UploadProcessorTask, - "schedule_for_later_try", - side_effect=celery.exceptions.Retry, - ) - false_report = mocker.MagicMock( - to_database=mocker.MagicMock(return_value=({}, "{}")), totals=ReportTotals() - ) - # Mocking retry to also raise the exception so we can see how it is called - mocked_4 = mocker.patch.object(UploadProcessorTask, "app") - mocked_4.send_task.return_value = True - commit = CommitFactory.create( - message="", repository__yaml={"codecov": {"max_report_age": False}} + # throw an error thats retryable: + mocker.patch.object( + ReportService, + "parse_raw_report_from_storage", + side_effect=FileNotInStorageError(), ) - dbsession.add(commit) - dbsession.flush() - upload = UploadFactory.create(report__commit=commit) - dbsession.add(upload) task = UploadProcessorTask() - task.request.retries = 0 - with pytest.raises(celery.exceptions.Retry): + with pytest.raises(Retry): task.process_individual_report( - report_service=ReportService({"codecov": {"max_report_age": False}}), - commit=commit, - report=false_report, - upload=upload, + ReportService({}), + CommitFactory.create(), + Report(), + UploadFactory.create(), ) - mock_schedule_for_later_try.assert_called_with() @pytest.mark.django_db(databases={"default"}) def test_upload_task_call_with_empty_report( diff --git a/tasks/upload_processor.py b/tasks/upload_processor.py index 1c6a958cd..65b87aac3 100644 --- a/tasks/upload_processor.py +++ b/tasks/upload_processor.py @@ -34,6 +34,7 @@ log = logging.getLogger(__name__) +MAX_RETRIES = 5 FIRST_RETRY_DELAY = 20 @@ -69,10 +70,6 @@ class UploadProcessorTask(BaseCodecovTask, name=upload_processor_task_name): acks_late = get_config("setup", "tasks", "upload", "acks_late", default=False) - def schedule_for_later_try(self, max_retries=5): - retry_in = FIRST_RETRY_DELAY * 3**self.request.retries - self.retry(max_retries=max_retries, countdown=retry_in) - def run_impl( self, db_session, @@ -167,7 +164,7 @@ def run_impl( number_retries=self.request.retries, ), ) - self.retry(max_retries=5, countdown=retry_in) + self.retry(max_retries=MAX_RETRIES, countdown=retry_in) @sentry_sdk.trace def process_impl_within_lock( @@ -409,11 +406,10 @@ def process_individual_report( if ( processing_result.error is not None and processing_result.error.is_retryable - and self.request.retries == 0 + and self.request.retries == 0 # the error is only retried on the first pass ): log.info( - "Scheduling a retry in %d due to retryable error", # TODO: check if we have this in the logs - FIRST_RETRY_DELAY, + f"Scheduling a retry in {FIRST_RETRY_DELAY} due to retryable error", extra=dict( repoid=commit.repoid, commit=commit.commitid, @@ -423,7 +419,7 @@ def process_individual_report( parent_task=self.request.parent_id, ), ) - self.schedule_for_later_try() + self.retry(max_retries=MAX_RETRIES, countdown=FIRST_RETRY_DELAY) # for the parallel experiment, we don't want to modify anything in the # database, so we disable it here