diff --git a/.github/workflows/self-scheduled-caller.yml b/.github/workflows/self-scheduled-caller.yml new file mode 100644 index 00000000000000..59b992bcd250e2 --- /dev/null +++ b/.github/workflows/self-scheduled-caller.yml @@ -0,0 +1,59 @@ +name: Self-hosted runner (scheduled) + + +on: + repository_dispatch: + schedule: + - cron: "17 2 * * *" + push: + branches: + - run_scheduled_ci* + +jobs: + model-ci: + name: Model CI + uses: ./.github/workflows/self-scheduled.yml + with: + job: run_tests_gpu + slack_report_channel: "#transformers-ci-daily-models" + secrets: inherit + + torch-pipeline: + name: Torch pipeline CI + uses: ./.github/workflows/self-scheduled.yml + with: + job: run_pipelines_torch_gpu + slack_report_channel: "#transformers-ci-daily-pipeline-torch" + secrets: inherit + + tf-pipeline: + name: TF pipeline CI + uses: ./.github/workflows/self-scheduled.yml + with: + job: run_pipelines_tf_gpu + slack_report_channel: "#transformers-ci-daily-pipeline-tf" + secrets: inherit + + example-ci: + name: Example CI + uses: ./.github/workflows/self-scheduled.yml + with: + job: run_examples_gpu + slack_report_channel: "#transformers-ci-daily-examples" + secrets: inherit + + deepspeed-ci: + name: DeepSpeed CI + uses: ./.github/workflows/self-scheduled.yml + with: + job: run_all_tests_torch_cuda_extensions_gpu + slack_report_channel: "#transformers-ci-daily-deepspeed" + secrets: inherit + + quantization-ci: + name: Quantization CI + uses: ./.github/workflows/self-scheduled.yml + with: + job: run_tests_quantization_torch_gpu + slack_report_channel: "#transformers-ci-daily-quantization" + secrets: inherit diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 465c00dd13bbcd..3e563e94e15ca0 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -7,12 +7,14 @@ name: Self-hosted runner (scheduled) # `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile` on: - repository_dispatch: - schedule: - - cron: "17 2 * * *" - push: - branches: - - run_scheduled_ci* + workflow_call: + inputs: + job: + required: true + type: string + slack_report_channel: + required: true + type: string env: HF_HOME: /mnt/cache @@ -31,6 +33,7 @@ env: jobs: setup: + if: ${{ inputs.job == 'run_tests_gpu' }} name: Setup strategy: matrix: @@ -71,6 +74,7 @@ jobs: nvidia-smi run_tests_gpu: + if: ${{ inputs.job == 'run_tests_gpu' }} name: " " needs: setup strategy: @@ -85,17 +89,17 @@ jobs: slice_id: ${{ matrix.slice_id }} secrets: inherit - run_examples_gpu: - name: Examples directory + run_pipelines_torch_gpu: + if: ${{ inputs.job == 'run_pipelines_torch_gpu' }} + name: PyTorch pipelines strategy: fail-fast: false matrix: - machine_type: [single-gpu] + machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: - image: huggingface/transformers-all-latest-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + image: huggingface/transformers-pytorch-gpu + options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Update clone working-directory: /transformers @@ -118,39 +122,39 @@ jobs: working-directory: /transformers run: pip freeze - - name: Run examples tests on GPU + - name: Run all pipeline tests on GPU working-directory: /transformers run: | - pip install -r examples/pytorch/_tests_requirements.txt - python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_examples_gpu examples/pytorch + python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines - name: Failure short reports if: ${{ failure() }} continue-on-error: true - run: cat /transformers/reports/${{ matrix.machine_type }}_examples_gpu/failures_short.txt + run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu/failures_short.txt - - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_examples_gpu" + - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu" if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: ${{ matrix.machine_type }}_run_examples_gpu - path: /transformers/reports/${{ matrix.machine_type }}_examples_gpu + name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu + path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu - run_pipelines_torch_gpu: - name: PyTorch pipelines + run_pipelines_tf_gpu: + if: ${{ inputs.job == 'run_pipelines_tf_gpu' }} + name: TensorFlow pipelines strategy: fail-fast: false matrix: machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: - image: huggingface/transformers-pytorch-gpu + image: huggingface/transformers-tensorflow-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup steps: - name: Update clone working-directory: /transformers - run: git fetch && git checkout ${{ github.sha }} + run: | + git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /transformers @@ -172,36 +176,35 @@ jobs: - name: Run all pipeline tests on GPU working-directory: /transformers run: | - python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines + python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines - name: Failure short reports - if: ${{ failure() }} - continue-on-error: true - run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu/failures_short.txt + if: ${{ always() }} + run: | + cat /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu/failures_short.txt - - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu" + - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu" if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu - path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu + name: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu + path: /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu - run_pipelines_tf_gpu: - name: TensorFlow pipelines + run_examples_gpu: + if: ${{ inputs.job == 'run_examples_gpu' }} + name: Examples directory strategy: fail-fast: false matrix: - machine_type: [single-gpu, multi-gpu] + machine_type: [single-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] container: - image: huggingface/transformers-tensorflow-gpu - options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + image: huggingface/transformers-all-latest-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: Update clone working-directory: /transformers - run: | - git fetch && git checkout ${{ github.sha }} + run: git fetch && git checkout ${{ github.sha }} - name: Reinstall transformers in edit mode (remove the one installed during docker image build) working-directory: /transformers @@ -220,31 +223,32 @@ jobs: working-directory: /transformers run: pip freeze - - name: Run all pipeline tests on GPU + - name: Run examples tests on GPU working-directory: /transformers run: | - python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines + pip install -r examples/pytorch/_tests_requirements.txt + python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_examples_gpu examples/pytorch - name: Failure short reports - if: ${{ always() }} - run: | - cat /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu/failures_short.txt + if: ${{ failure() }} + continue-on-error: true + run: cat /transformers/reports/${{ matrix.machine_type }}_examples_gpu/failures_short.txt - - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu" + - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_examples_gpu" if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: ${{ matrix.machine_type }}_run_tests_tf_pipeline_gpu - path: /transformers/reports/${{ matrix.machine_type }}_tests_tf_pipeline_gpu + name: ${{ matrix.machine_type }}_run_examples_gpu + path: /transformers/reports/${{ matrix.machine_type }}_examples_gpu run_all_tests_torch_cuda_extensions_gpu: + if: ${{ inputs.job == 'run_all_tests_torch_cuda_extensions_gpu' }} name: Torch CUDA extension tests strategy: fail-fast: false matrix: machine_type: [single-gpu, multi-gpu] runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci] - needs: setup container: image: huggingface/transformers-pytorch-deepspeed-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ @@ -298,6 +302,7 @@ jobs: path: /workspace/transformers/reports/${{ matrix.machine_type }}_tests_torch_cuda_extensions_gpu run_tests_quantization_torch_gpu: + if: ${{ inputs.job == 'run_tests_quantization_torch_gpu' }} name: Quantization tests strategy: fail-fast: false @@ -307,7 +312,6 @@ jobs: container: image: huggingface/transformers-quantization-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup steps: - name: Update clone working-directory: /transformers @@ -348,18 +352,11 @@ jobs: path: /transformers/reports/${{ matrix.machine_type }}_tests_quantization_torch_gpu run_extract_warnings: + # Let's only do this for the job `run_tests_gpu` to simplify the (already complex) logic. + if: ${{ always() && inputs.job == 'run_tests_gpu' }} name: Extract warnings in CI artifacts runs-on: ubuntu-22.04 - if: always() - needs: [ - setup, - run_tests_gpu, - run_examples_gpu, - run_pipelines_tf_gpu, - run_pipelines_torch_gpu, - run_all_tests_torch_cuda_extensions_gpu, - run_tests_quantization_torch_gpu, - ] + needs: [setup, run_tests_gpu] steps: - name: Checkout transformers uses: actions/checkout@v3 @@ -396,52 +393,24 @@ jobs: path: warnings_in_ci/selected_warnings.json send_results: - name: Send results to webhook - runs-on: ubuntu-22.04 - if: always() + name: Slack Report needs: [ setup, run_tests_gpu, - run_examples_gpu, - run_pipelines_tf_gpu, run_pipelines_torch_gpu, + run_pipelines_tf_gpu, + run_examples_gpu, run_all_tests_torch_cuda_extensions_gpu, run_tests_quantization_torch_gpu, run_extract_warnings ] - steps: - - name: Preliminary job status - shell: bash - # For the meaning of these environment variables, see the job `Setup` - run: | - echo "Setup status: ${{ needs.setup.result }}" - - - uses: actions/checkout@v3 - - uses: actions/download-artifact@v3 - - name: Send message to Slack - env: - CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} - CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }} - CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} - CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} - CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} - ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} - CI_EVENT: scheduled - CI_SHA: ${{ github.sha }} - CI_WORKFLOW_REF: ${{ github.workflow_ref }} - SETUP_STATUS: ${{ needs.setup.result }} - # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change - # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. - run: | - sudo apt-get install -y curl - pip install slack_sdk - pip show slack_sdk - python utils/notification_service.py "${{ needs.setup.outputs.folder_slices }}" - - # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. - - name: Failure table artifacts - if: ${{ always() }} - uses: actions/upload-artifact@v3 - with: - name: prev_ci_results - path: prev_ci_results + if: ${{ always() }} + uses: ./.github/workflows/slack-report.yml + with: + job: ${{ inputs.job }} + # This would be `skipped` if `setup` is skipped. + setup_status: ${{ needs.setup.result }} + slack_report_channel: ${{ inputs.slack_report_channel }} + # This would be an empty string if `setup` is skipped. + folder_slices: ${{ needs.setup.outputs.folder_slices }} + secrets: inherit \ No newline at end of file diff --git a/.github/workflows/slack-report.yml b/.github/workflows/slack-report.yml new file mode 100644 index 00000000000000..0e964e8596a0f5 --- /dev/null +++ b/.github/workflows/slack-report.yml @@ -0,0 +1,64 @@ +name: CI slack report + +on: + workflow_call: + inputs: + job: + required: true + type: string + slack_report_channel: + required: true + type: string + setup_status: + required: true + type: string + folder_slices: + required: true + type: string + + +jobs: + send_results: + name: Send results to webhook + runs-on: ubuntu-22.04 + if: always() + steps: + - name: Preliminary job status + shell: bash + # For the meaning of these environment variables, see the job `Setup` + run: | + echo "Setup status: ${{ inputs.setup_status }}" + + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 + - name: Send message to Slack + env: + CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} + CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }} + CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} + CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} + SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }} + ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + CI_EVENT: scheduled + CI_SHA: ${{ github.sha }} + CI_WORKFLOW_REF: ${{ github.workflow_ref }} + CI_TEST_JOB: ${{ inputs.job }} + SETUP_STATUS: ${{ inputs.setup_status }} + # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change + # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. + # For a job that doesn't depend on (i.e. `needs`) `setup`, the value for `inputs.folder_slices` would be an + # empty string, and the called script still get one argument (which is the emtpy string). + run: | + sudo apt-get install -y curl + pip install slack_sdk + pip show slack_sdk + python utils/notification_service.py "${{ inputs.folder_slices }}" + + # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. + - name: Failure table artifacts + # Only the model testing job is concerned for this step + if: ${{ inputs.job == 'run_tests_gpu' }} + uses: actions/upload-artifact@v3 + with: + name: prev_ci_results + path: prev_ci_results diff --git a/utils/notification_service.py b/utils/notification_service.py index d29e6994a232b2..5378348ee9cc9e 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -227,10 +227,13 @@ def warnings(self) -> Dict: button_text = "Check warnings (Link not found)" # Use the workflow run link job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}" - if "Extract warnings in CI artifacts" in github_actions_job_links: - button_text = "Check warnings" - # Use the actual job link - job_link = f"{github_actions_job_links['Extract warnings in CI artifacts']}" + + for job in github_actions_jobs: + if "Extract warnings in CI artifacts" in job["name"] and job["conclusion"] == "success": + button_text = "Check warnings" + # Use the actual job link + job_link = job["html_url"] + break huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x] text = f"There are {len(self.selected_warnings)} warnings being selected." @@ -573,7 +576,7 @@ def error_out(title, ci_title="", runner_not_available=False, runner_failed=Fals print(json.dumps({"blocks": blocks})) client.chat_postMessage( - channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], + channel=SLACK_REPORT_CHANNEL_ID, text=text, blocks=payload, ) @@ -586,7 +589,7 @@ def post(self): text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." self.thread_ts = client.chat_postMessage( - channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], + channel=SLACK_REPORT_CHANNEL_ID, blocks=payload, text=text, ) @@ -712,7 +715,7 @@ def post_reply(self): print(json.dumps({"blocks": blocks})) client.chat_postMessage( - channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], + channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], @@ -735,7 +738,7 @@ def post_reply(self): print(json.dumps({"blocks": blocks})) client.chat_postMessage( - channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], + channel=SLACK_REPORT_CHANNEL_ID, text=f"Results for {job}", blocks=blocks, thread_ts=self.thread_ts["ts"], @@ -749,7 +752,7 @@ def post_reply(self): print(json.dumps({"blocks": blocks})) client.chat_postMessage( - channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], + channel=SLACK_REPORT_CHANNEL_ID, text="Results for new failures", blocks=blocks, thread_ts=self.thread_ts["ts"], @@ -852,6 +855,8 @@ def prepare_reports(title, header, reports, to_truncate=True): if __name__ == "__main__": + SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"] + # runner_status = os.environ.get("RUNNER_STATUS") # runner_env_status = os.environ.get("RUNNER_ENV_STATUS") setup_status = os.environ.get("SETUP_STATUS") @@ -861,7 +866,8 @@ def prepare_reports(title, header, reports, to_truncate=True): # Let's keep the lines regardig runners' status (we might be able to use them again in the future) runner_not_available = False runner_failed = False - setup_failed = True if setup_status is not None and setup_status != "success" else False + # Some jobs don't depend (`needs`) on the job `setup`: in this case, the status of the job `setup` is `skipped`. + setup_failed = False if setup_status in ["skipped", "success"] else True org = "huggingface" repo = "transformers" @@ -929,14 +935,21 @@ def prepare_reports(title, header, reports, to_truncate=True): Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed) exit(0) - arguments = sys.argv[1:][0] - try: - folder_slices = ast.literal_eval(arguments) - # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names). - models = [x.replace("models/", "models_") for folders in folder_slices for x in folders] - except SyntaxError: - Message.error_out(title, ci_title) - raise ValueError("Errored out.") + # sys.argv[0] is always `utils/notification_service.py`. + arguments = sys.argv[1:] + # In our usage in `.github/workflows/slack-report.yml`, we always pass an argument when calling this script. + # The argument could be an empty string `""` if a job doesn't depend on the job `setup`. + if arguments[0] == "": + models = [] + else: + model_list_as_str = arguments[0] + try: + folder_slices = ast.literal_eval(model_list_as_str) + # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names). + models = [x.replace("models/", "models_") for folders in folder_slices for x in folders] + except Exception: + Message.error_out(title, ci_title) + raise ValueError("Errored out.") github_actions_jobs = get_jobs( workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"] @@ -1039,9 +1052,9 @@ def prepare_reports(title, header, reports, to_truncate=True): # Additional runs additional_files = { - "Examples directory": "run_examples_gpu", "PyTorch pipelines": "run_tests_torch_pipeline_gpu", "TensorFlow pipelines": "run_tests_tf_pipeline_gpu", + "Examples directory": "run_examples_gpu", "Torch CUDA extension tests": "run_tests_torch_cuda_extensions_gpu_test_reports", "Quantization tests": "run_tests_quantization_torch_gpu", } @@ -1056,6 +1069,24 @@ def prepare_reports(title, header, reports, to_truncate=True): elif ci_event.startswith("Push CI (AMD)"): additional_files = {} + # A map associating the job names (specified by `inputs.job` in a workflow file) with the keys of + # `additional_files`. This is used to remove some entries in `additional_files` that are not concerned by a + # specific job. See below. + job_to_test_map = { + "run_pipelines_torch_gpu": "PyTorch pipelines", + "run_pipelines_tf_gpu": "TensorFlow pipelines", + "run_examples_gpu": "Examples directory", + "run_all_tests_torch_cuda_extensions_gpu": "Torch CUDA extension tests", + "run_tests_quantization_torch_gpu": "Quantization tests", + } + + # Remove some entries in `additional_files` if they are not concerned. + test_name = None + job_name = os.getenv("CI_TEST_JOB") + if job_name in job_to_test_map: + test_name = job_to_test_map[job_name] + additional_files = {k: v for k, v in additional_files.items() if k == test_name} + additional_results = { key: { "failed": {"unclassified": 0, "single": 0, "multi": 0}, @@ -1103,17 +1134,24 @@ def prepare_reports(title, header, reports, to_truncate=True): {"line": line, "trace": stacktraces.pop(0)} ) + # Let's only check the warning for the model testing job. Currently, the job `run_extract_warnings` is only run + # when `inputs.job` (in the workflow file) is `run_tests_gpu`. The reason is: otherwise we need to save several + # artifacts with different names which complicates the logic for an insignificant part of the CI workflow reporting. selected_warnings = [] - if "warnings_in_ci" in available_artifacts: - directory = available_artifacts["warnings_in_ci"].paths[0]["path"] - with open(os.path.join(directory, "selected_warnings.json")) as fp: - selected_warnings = json.load(fp) + if job_name == "run_tests_gpu": + if "warnings_in_ci" in available_artifacts: + directory = available_artifacts["warnings_in_ci"].paths[0]["path"] + with open(os.path.join(directory, "selected_warnings.json")) as fp: + selected_warnings = json.load(fp) if not os.path.isdir(os.path.join(os.getcwd(), "prev_ci_results")): os.makedirs(os.path.join(os.getcwd(), "prev_ci_results")) - with open("prev_ci_results/model_results.json", "w", encoding="UTF-8") as fp: - json.dump(model_results, fp, indent=4, ensure_ascii=False) + # Only the model testing job is concerned: this condition is to avoid other jobs to upload the empty list as + # results. + if job_name == "run_tests_gpu": + with open("prev_ci_results/model_results.json", "w", encoding="UTF-8") as fp: + json.dump(model_results, fp, indent=4, ensure_ascii=False) prev_ci_artifacts = None target_workflow = "huggingface/transformers/.github/workflows/self-scheduled.yml@refs/heads/main"