From 42021a7e79861ac1fbe7a638c8a10d875b8ccbed Mon Sep 17 00:00:00 2001 From: Alexey Sachkov Date: Mon, 24 Mar 2025 10:13:16 -0700 Subject: [PATCH 1/9] [SYCL][CI] Update workflow files This is an attempt to fix pre-commit on `sycl-rel-6_1_0` branch by updating all workflow files to make then in sync with `sycl` branch. --- .github/CODEOWNERS | 56 +- .github/new-issues-labeler.yml | 9 + .github/new-prs-labeler.yml | 25 + .github/workflows/bandit.yml | 32 ++ .../workflows/build-ci-container-windows.yml | 75 +++ .github/workflows/build-ci-container.yml | 123 ++-- .github/workflows/build-metrics-container.yml | 78 +++ .github/workflows/commit-access-review.py | 151 +++-- .../github-action-ci-windows/Dockerfile | 118 ++++ .../containers/github-action-ci/Dockerfile | 106 ++++ .github/workflows/coverity.yml | 83 +++ .github/workflows/docs.yml | 83 ++- .github/workflows/email-check.yaml | 5 +- .github/workflows/hlsl-matrix.yaml | 30 + .github/workflows/hlsl-test-all.yaml | 87 +++ .github/workflows/libc-fullbuild-tests.yml | 96 ++++ .github/workflows/libc-overlay-tests.yml | 120 ++++ .github/workflows/libclang-abi-tests.yml | 8 +- .github/workflows/libclang-python-tests.yml | 2 +- .github/workflows/libcxx-build-and-test.yaml | 66 +-- .github/workflows/libcxx-build-containers.yml | 71 +++ .../libcxx-restart-preempted-jobs.yaml | 125 ++++- .github/workflows/llvm-project-tests.yml | 12 +- .github/workflows/llvm-tests.yml | 12 +- .github/workflows/new-issues.yml | 2 +- .github/workflows/pr-code-format.yml | 7 +- .github/workflows/premerge.yaml | 134 +++++ .github/workflows/release-binaries-all.yml | 2 +- .github/workflows/release-binaries.yml | 283 +++------- .github/workflows/release-documentation.yml | 4 +- .github/workflows/release-doxygen.yml | 2 +- .github/workflows/release-sources.yml | 2 +- .github/workflows/scorecard.yml | 4 +- .github/workflows/spirv-tests.yml | 4 +- .github/workflows/sycl-aws.yml | 6 +- .../workflows/sycl-benchmark-aggregate.yml | 52 ++ .../workflows/sycl-containers-igc-dev.yaml | 12 +- .github/workflows/sycl-containers.yaml | 33 +- .github/workflows/sycl-detect-changes.yml | 5 +- .../workflows/sycl-issues-ping-assignee.yml | 4 +- .github/workflows/sycl-linux-build.yml | 82 ++- .../workflows/sycl-linux-precommit-aws.yml | 9 +- .github/workflows/sycl-linux-precommit.yml | 129 +++-- .github/workflows/sycl-linux-run-tests.yml | 258 ++++----- .../workflows/sycl-macos-build-and-test.yml | 6 +- .github/workflows/sycl-nightly.yml | 271 ++++++--- .github/workflows/sycl-post-commit.yml | 46 +- .github/workflows/sycl-rel-nightly.yml | 207 +++++++ .github/workflows/sycl-stale-issues.yml | 2 +- .github/workflows/sycl-sync-main.yml | 2 +- .github/workflows/sycl-update-gpu-driver.yml | 2 +- .../workflows/sycl-update-igc-dev-driver.yml | 2 +- .github/workflows/sycl-weekly.yml | 67 +++ .github/workflows/sycl-windows-build.yml | 99 +++- .github/workflows/sycl-windows-precommit.yml | 16 +- .github/workflows/sycl-windows-run-tests.yml | 117 +++- .github/workflows/trivy.yml | 37 ++ .github/workflows/ur-benchmarks-reusable.yml | 12 + .github/workflows/ur-benchmarks.yml | 12 + .github/workflows/ur-build-hw.yml | 159 ++++++ .github/workflows/ur-precommit.yml | 114 ++++ .github/workflows/ur-source-checks.yml | 64 +++ devops/.trivyignore.yaml | 5 + .../actions/benchmarking/aggregate/action.yml | 95 ++++ devops/actions/cached_checkout/action.yml | 3 +- devops/actions/run-tests/benchmark/action.yml | 128 +++++ devops/actions/run-tests/cts/action.yml | 176 ++++++ devops/actions/run-tests/e2e/action.yml | 83 +++ .../actions/run-tests/windows/cts/action.yml | 178 ++++++ .../actions/setup_linux_oneapi_env/action.yml | 22 + .../setup_windows_oneapi_env/action.yml | 28 + devops/bandit.config | 402 ++++++++++++++ devops/benchmarking/config.ini | 44 ++ devops/benchmarking/constants.ini | 48 ++ devops/benchmarking/enabled_tests.conf | 8 + devops/containers/nightly.Dockerfile | 18 + devops/containers/ubuntu2204_base.Dockerfile | 19 +- devops/containers/ubuntu2204_build.Dockerfile | 17 +- .../ubuntu2204_intel_drivers.Dockerfile | 8 +- devops/containers/ubuntu2404_base.Dockerfile | 28 + devops/containers/ubuntu2404_build.Dockerfile | 49 ++ .../ubuntu2404_intel_drivers.Dockerfile | 33 ++ ...buntu2404_intel_drivers_igc_dev.Dockerfile | 28 + devops/dependencies-igc-dev.json | 8 +- devops/dependencies.json | 18 +- devops/scripts/benchmarking/aggregate.py | 205 +++++++ devops/scripts/benchmarking/benchmark.sh | 300 ++++++++++ devops/scripts/benchmarking/common.py | 196 +++++++ devops/scripts/benchmarking/compare.py | 101 ++++ devops/scripts/benchmarking/load_config.py | 30 + devops/scripts/benchmarks/README.md | 74 +++ devops/scripts/benchmarks/benches/base.py | 99 ++++ devops/scripts/benchmarks/benches/compute.py | 473 ++++++++++++++++ devops/scripts/benchmarks/benches/llamacpp.py | 156 ++++++ devops/scripts/benchmarks/benches/oneapi.py | 95 ++++ devops/scripts/benchmarks/benches/result.py | 40 ++ .../scripts/benchmarks/benches/syclbench.py | 524 ++++++++++++++++++ devops/scripts/benchmarks/benches/test.py | 76 +++ devops/scripts/benchmarks/benches/umf.py | 252 +++++++++ devops/scripts/benchmarks/benches/velocity.py | 502 +++++++++++++++++ .../benchmark_results.html.template | 192 +++++++ devops/scripts/benchmarks/history.py | 147 +++++ devops/scripts/benchmarks/main.py | 466 ++++++++++++++++ devops/scripts/benchmarks/options.py | 45 ++ devops/scripts/benchmarks/output_html.py | 340 ++++++++++++ devops/scripts/benchmarks/output_markdown.py | 399 +++++++++++++ devops/scripts/benchmarks/requirements.txt | 4 + .../benchmarks/utils/compute_runtime.py | 224 ++++++++ devops/scripts/benchmarks/utils/utils.py | 146 +++++ devops/scripts/benchmarks/workflow.png | Bin 0 -> 217603 bytes devops/scripts/build_zstd_1_5_6_ub24.sh | 108 ++++ devops/scripts/create-sycl-user.sh | 23 + devops/scripts/docker_entrypoint.sh | 5 - devops/scripts/install_build_tools.sh | 17 +- devops/scripts/install_drivers.sh | 25 +- devops/scripts/update_drivers.py | 26 +- devops/scripts/windows_detect_hung_tests.ps1 | 8 + 117 files changed, 9656 insertions(+), 930 deletions(-) create mode 100644 .github/workflows/bandit.yml create mode 100644 .github/workflows/build-ci-container-windows.yml create mode 100644 .github/workflows/build-metrics-container.yml create mode 100644 .github/workflows/containers/github-action-ci-windows/Dockerfile create mode 100644 .github/workflows/containers/github-action-ci/Dockerfile create mode 100644 .github/workflows/coverity.yml create mode 100644 .github/workflows/hlsl-matrix.yaml create mode 100644 .github/workflows/hlsl-test-all.yaml create mode 100644 .github/workflows/libc-fullbuild-tests.yml create mode 100644 .github/workflows/libc-overlay-tests.yml create mode 100644 .github/workflows/libcxx-build-containers.yml create mode 100644 .github/workflows/premerge.yaml create mode 100644 .github/workflows/sycl-benchmark-aggregate.yml create mode 100644 .github/workflows/sycl-rel-nightly.yml create mode 100644 .github/workflows/sycl-weekly.yml create mode 100644 .github/workflows/trivy.yml create mode 100644 .github/workflows/ur-benchmarks-reusable.yml create mode 100644 .github/workflows/ur-benchmarks.yml create mode 100644 .github/workflows/ur-build-hw.yml create mode 100644 .github/workflows/ur-precommit.yml create mode 100644 .github/workflows/ur-source-checks.yml create mode 100644 devops/.trivyignore.yaml create mode 100644 devops/actions/benchmarking/aggregate/action.yml create mode 100644 devops/actions/run-tests/benchmark/action.yml create mode 100644 devops/actions/run-tests/cts/action.yml create mode 100644 devops/actions/run-tests/e2e/action.yml create mode 100644 devops/actions/run-tests/windows/cts/action.yml create mode 100644 devops/actions/setup_linux_oneapi_env/action.yml create mode 100644 devops/actions/setup_windows_oneapi_env/action.yml create mode 100644 devops/bandit.config create mode 100644 devops/benchmarking/config.ini create mode 100644 devops/benchmarking/constants.ini create mode 100644 devops/benchmarking/enabled_tests.conf create mode 100644 devops/containers/nightly.Dockerfile create mode 100644 devops/containers/ubuntu2404_base.Dockerfile create mode 100644 devops/containers/ubuntu2404_build.Dockerfile create mode 100644 devops/containers/ubuntu2404_intel_drivers.Dockerfile create mode 100644 devops/containers/ubuntu2404_intel_drivers_igc_dev.Dockerfile create mode 100644 devops/scripts/benchmarking/aggregate.py create mode 100755 devops/scripts/benchmarking/benchmark.sh create mode 100644 devops/scripts/benchmarking/common.py create mode 100644 devops/scripts/benchmarking/compare.py create mode 100644 devops/scripts/benchmarking/load_config.py create mode 100644 devops/scripts/benchmarks/README.md create mode 100644 devops/scripts/benchmarks/benches/base.py create mode 100644 devops/scripts/benchmarks/benches/compute.py create mode 100644 devops/scripts/benchmarks/benches/llamacpp.py create mode 100644 devops/scripts/benchmarks/benches/oneapi.py create mode 100644 devops/scripts/benchmarks/benches/result.py create mode 100644 devops/scripts/benchmarks/benches/syclbench.py create mode 100644 devops/scripts/benchmarks/benches/test.py create mode 100644 devops/scripts/benchmarks/benches/umf.py create mode 100644 devops/scripts/benchmarks/benches/velocity.py create mode 100644 devops/scripts/benchmarks/benchmark_results.html.template create mode 100644 devops/scripts/benchmarks/history.py create mode 100755 devops/scripts/benchmarks/main.py create mode 100644 devops/scripts/benchmarks/options.py create mode 100644 devops/scripts/benchmarks/output_html.py create mode 100644 devops/scripts/benchmarks/output_markdown.py create mode 100644 devops/scripts/benchmarks/requirements.txt create mode 100644 devops/scripts/benchmarks/utils/compute_runtime.py create mode 100644 devops/scripts/benchmarks/utils/utils.py create mode 100644 devops/scripts/benchmarks/workflow.png create mode 100755 devops/scripts/build_zstd_1_5_6_ub24.sh create mode 100755 devops/scripts/create-sycl-user.sh create mode 100644 devops/scripts/windows_detect_hung_tests.ps1 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 196fffa19c569..6079780044c68 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -35,6 +35,22 @@ sycl/doc/design/spirv-extensions/ @intel/dpcpp-spirv-doc-reviewers sycl/doc/extensions/ @intel/dpcpp-specification-reviewers # Unified Runtime +unified-runtime/ @intel/unified-runtime-reviewers +# TODO: Use specific UR Level Zero adapter team +unified-runtime/*/adapters/level_zero/ @intel/unified-runtime-reviewers +# TODO: Use specific UR OpenCL adapter team +unified-runtime/*/adapters/opencl/ @intel/unified-runtime-reviewers +unified-runtime/*/adapters/cuda/ @intel/llvm-reviewers-cuda +unified-runtime/*/adapters/hip/ @intel/llvm-reviewers-cuda +unified-runtime/*/adapters/native_cpu/ @intel/dpcpp-nativecpu-reviewers +unified-runtime/source/adapters/**/command_buffer.* @intel/sycl-graphs-reviewers +unified-runtime/scripts/core/EXP-COMMAND-BUFFER.rst @intel/sycl-graphs-reviewers +unified-runtime/scripts/core/exp-command-buffer.yml @intel/sycl-graphs-reviewers +unified-runtime/test/conformance/exp_command_buffer** @intel/sycl-graphs-reviewers +unified-runtime/source/adapters/**/image.* @intel/bindless-images-reviewers +unified-runtime/scripts/core/EXP-BINDLESS-IMAGES.rst @intel/bindless-images-reviewers +unified-runtime/scripts/core/exp-bindless-images.yml @intel/bindless-images-reviewers +unified-runtime/test/conformance/exp_bindless_images** @intel/bindless-images-reviewers sycl/cmake/modules/FetchUnifiedRuntime.cmake @intel/unified-runtime-reviewers sycl/cmake/modules/UnifiedRuntimeTag.cmake @intel/unified-runtime-reviewers sycl/include/sycl/detail/ur.hpp @intel/unified-runtime-reviewers @@ -47,12 +63,18 @@ sycl/test-e2e/Adapters/ @intel/unified-runtime-reviewers sycl/ur_win_proxy_loader @intel/llvm-reviewers-runtime sycl/test-e2e/Adapters/dll-detach-order.cpp @intel/llvm-reviewers-runtime -# CUDA specific runtime implementations +# CUDA and HIP sycl/include/sycl/ext/oneapi/experimental/cuda/ @intel/llvm-reviewers-cuda - -# CUDA and HIP device code tests sycl/test/check_device_code/cuda/ @intel/llvm-reviewers-cuda sycl/test/check_device_code/hip/ @intel/llvm-reviewers-cuda +llvm/include/llvm/SYCLLowerIR/GlobalOffset.h @intel/llvm-reviewers-cuda +llvm/lib/SYCLLowerIR/GlobalOffset.cpp @intel/llvm-reviewers-cuda +llvm/include/llvm/SYCLLowerIR/LocalAccessorToSharedMemory.h @intel/llvm-reviewers-cuda +llvm/lib/SYCLLowerIR/LocalAccessorToSharedMemory.cpp @intel/llvm-reviewers-cuda +llvm/include/llvm/SYCLLowerIR/SYCLCreateNVVMAnnotations.h @intel/llvm-reviewers-cuda +llvm/lib/SYCLLowerIR/SYCLCreateNVVMAnnotations.cpp @intel/llvm-reviewers-cuda +llvm/lib/Target/NVPTX @intel/llvm-reviewers-cuda +llvm/lib/Target/AMDGPU @intel/llvm-reviewers-cuda # XPTI instrumentation utilities xpti/ @intel/llvm-reviewers-runtime @@ -106,6 +128,9 @@ devops/ @intel/dpcpp-devops-reviewers # dev-igc driver update devops/dependencies-igc-dev.json @intel/sycl-matrix-reviewers @intel/dpcpp-esimd-reviewers @intel/dpcpp-devops-reviewers +# Benchmarking scripts +devops/scripts/benchmarks/ @intel/llvm-reviewers-benchmarking + # Kernel fusion JIT compiler sycl-jit/ @intel/dpcpp-kernel-fusion-reviewers sycl/doc/design/KernelFusionJIT.md @intel/dpcpp-kernel-fusion-reviewers @@ -126,14 +151,14 @@ sycl/test/matrix @intel/sycl-matrix-reviewers sycl/test/check_device_code/matrix @intel/sycl-matrix-reviewers # Native CPU -llvm/**/*SYCLNativeCPU* @intel/dpcpp-nativecpu-pi-reviewers -clang/include/clang/Basic/SYCLNativeCPUHelpers.h @intel/dpcpp-nativecpu-pi-reviewers -clang/test/CodeGenSYCL/native_cpu*.cpp @intel/dpcpp-nativecpu-pi-reviewers -clang/test/Driver/sycl-native-cpu*.cpp @intel/dpcpp-nativecpu-pi-reviewers -sycl/**/native_cpu/ @intel/dpcpp-nativecpu-pi-reviewers -sycl/doc/design/SYCLNativeCPU.md @intel/dpcpp-nativecpu-pi-reviewers -sycl/include/sycl/detail/native_cpu.hpp @intel/dpcpp-nativecpu-pi-reviewers -libdevice/nativecpu* @intel/dpcpp-nativecpu-pi-reviewers +llvm/**/*SYCLNativeCPU* @intel/dpcpp-nativecpu-reviewers +clang/include/clang/Basic/SYCLNativeCPUHelpers.h @intel/dpcpp-nativecpu-reviewers +clang/test/CodeGenSYCL/native_cpu*.cpp @intel/dpcpp-nativecpu-reviewers +clang/test/Driver/sycl-native-cpu*.cpp @intel/dpcpp-nativecpu-reviewers +sycl/**/native_cpu/ @intel/dpcpp-nativecpu-reviewers +sycl/doc/design/SYCLNativeCPU.md @intel/dpcpp-nativecpu-reviewers +sycl/include/sycl/detail/native_cpu.hpp @intel/dpcpp-nativecpu-reviewers +libdevice/nativecpu* @intel/dpcpp-nativecpu-reviewers # SYCL-Graphs extensions sycl/include/sycl/ext/oneapi/experimental/graph.hpp @intel/sycl-graphs-reviewers @@ -170,11 +195,20 @@ sycl/test-e2e/LLVMIntrinsicLowering/ @intel/dpcpp-spirv-reviewers # Sanitizer clang/lib/Driver/SanitizerArgs.cpp @intel/dpcpp-sanitizers-review libdevice/include/asan_rtl.hpp @intel/dpcpp-sanitizers-review +libdevice/include/msan_rtl.hpp @intel/dpcpp-sanitizers-review libdevice/include/sanitizer_defs.hpp @intel/dpcpp-sanitizers-review libdevice/sanitizer/ @intel/dpcpp-sanitizers-review llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h @intel/dpcpp-sanitizers-review llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @intel/dpcpp-sanitizers-review llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerOptions.h @intel/dpcpp-sanitizers-review +llvm/include/llvm/Transforms/Instrumentation/MemorySanitizer.h @intel/dpcpp-sanitizers-review +llvm/include/llvm/Transforms/Instrumentation/ThreadSanitizer.h @intel/dpcpp-sanitizers-review llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @intel/dpcpp-sanitizers-review +llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @intel/dpcpp-sanitizers-review +llvm/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @intel/dpcpp-sanitizers-review llvm/test/Instrumentation/AddressSanitizer/ @intel/dpcpp-sanitizers-review +llvm/test/Instrumentation/MemorySanitizer/ @intel/dpcpp-sanitizers-review +llvm/test/Instrumentation/ThreadSanitizer/ @intel/dpcpp-sanitizers-review sycl/test-e2e/AddressSanitizer/ @intel/dpcpp-sanitizers-review +sycl/test-e2e/MemorySanitizer/ @intel/dpcpp-sanitizers-review +sycl/test-e2e/ThreadSanitizer/ @intel/dpcpp-sanitizers-review diff --git a/.github/new-issues-labeler.yml b/.github/new-issues-labeler.yml index a5933d7fc9b37..ee7506c1366ef 100644 --- a/.github/new-issues-labeler.yml +++ b/.github/new-issues-labeler.yml @@ -27,3 +27,12 @@ 'bolt': - '/\bbolt(?!\-)\b/i' + +'infra:commit-access-request': + - '/Request Commit Access/' + +'false-positive': + - '\bfalse[- ]positive\b' + +'false-negative': + - '\bfalse[- ]negative\b' diff --git a/.github/new-prs-labeler.yml b/.github/new-prs-labeler.yml index cef4782331510..566308bb3df8d 100644 --- a/.github/new-prs-labeler.yml +++ b/.github/new-prs-labeler.yml @@ -587,6 +587,12 @@ llvm:ir: - llvm/docs/LangRef.rst - llvm/unittests/IR/** +llvm:SandboxIR: + - llvm/lib/SandboxIR/** + - llvm/include/llvm/SandboxIR/** + - llvm/docs/SandboxIR.md + - llvm/unittests/SandboxIR/** + llvm:analysis: - llvm/lib/Analysis/** - llvm/include/llvm/Analysis/** @@ -605,6 +611,14 @@ llvm:transforms: - llvm/test/Transforms/** - llvm/unittests/Transforms/** +llvm:instcombine: + - llvm/lib/Analysis/InstructionSimplify.cpp + - llvm/lib/Transforms/InstCombine/** + - llvm/include/llvm/Transforms/InstCombine/ + - llvm/include/llvm/Analysis/InstructionSimplify.h + - llvm/test/Transforms/InstCombine/** + - llvm/test/Transforms/InstSimplify/** + clangd: - clang-tools-extra/clangd/** @@ -647,6 +661,11 @@ backend:DirectX: backend:SPIR-V: - clang/lib/Driver/ToolChains/SPIRV.* + - clang/lib/Sema/SemaSPIRV.cpp + - clang/include/clang/Sema/SemaSPIRV.h + - clang/include/clang/Basic/BuiltinsSPIRV.td + - clang/test/CodeGenSPIRV/** + - clang/test/SemaSPIRV/** - llvm/lib/Target/SPIRV/** - llvm/test/CodeGen/SPIRV/** - llvm/test/Frontend/HLSL/** @@ -733,6 +752,12 @@ backend:RISC-V: - llvm/**/*riscv* - llvm/**/*RISCV* +backend:Xtensa: + - clang/**/*xtensa* + - clang/**/*Xtensa* + - llvm/**/*xtensa* + - llvm/**/*Xtensa* + lld:coff: - lld/**/COFF/** - lld/Common/** diff --git a/.github/workflows/bandit.yml b/.github/workflows/bandit.yml new file mode 100644 index 0000000000000..5686ee37b3fa5 --- /dev/null +++ b/.github/workflows/bandit.yml @@ -0,0 +1,32 @@ +name: Scan Python code with Bandit + +on: + workflow_dispatch: + schedule: + - cron: '0 3 * * *' + + +permissions: + contents: read + +jobs: + bandit: + name: Bandit + runs-on: ubuntu-latest + permissions: + security-events: write + + steps: + - name: Clone the git repo + uses: actions/checkout@v4 + + - name: Install Bandit + run: pip install bandit bandit-sarif-formatter + + - name: Run Bandit + run: | + bandit -c devops/bandit.config -r . --exit-zero -f sarif -o bandit_results.sarif + + - uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: bandit_results.sarif diff --git a/.github/workflows/build-ci-container-windows.yml b/.github/workflows/build-ci-container-windows.yml new file mode 100644 index 0000000000000..bba34066a97cd --- /dev/null +++ b/.github/workflows/build-ci-container-windows.yml @@ -0,0 +1,75 @@ +name: Build Windows CI Container + +permissions: + contents: read + +on: + push: + branches: + - main + paths: + - .github/workflows/build-ci-container-windows.yml + - '.github/workflows/containers/github-action-ci-windows/**' + pull_request: + branches: + - main + paths: + - .github/workflows/build-ci-container-windows.yml + - '.github/workflows/containers/github-action-ci-windows/**' + +jobs: + build-ci-container-windows: + if: github.repository_owner == 'llvm' + runs-on: windows-2019 + outputs: + container-name: ${{ steps.vars.outputs.container-name }} + container-name-tag: ${{ steps.vars.outputs.container-name-tag }} + container-filename: ${{ steps.vars.outputs.container-filename }} + steps: + - name: Checkout LLVM + uses: actions/checkout@v4 + with: + sparse-checkout: .github/workflows/containers/github-action-ci-windows + - name: Write Variables + id: vars + run: | + $tag = [int64](Get-Date -UFormat %s) + $container_name="ghcr.io/$env:GITHUB_REPOSITORY_OWNER/ci-windows-2019" + echo "container-name=${container_name}" >> $env:GITHUB_OUTPUT + echo "container-name-tag=${container_name}:${tag}" >> $env:GITHUB_OUTPUT + echo "container-filename=ci-windows-${tag}.tar" >> $env:GITHUB_OUTPUT + - name: Build Container + working-directory: .github/workflows/containers/github-action-ci-windows + run: | + docker build -t ${{ steps.vars.outputs.container-name-tag }} . + - name: Save container image + run: | + docker save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} + - name: Upload container image + uses: actions/upload-artifact@v4 + with: + name: container + path: ${{ steps.vars.outputs.container-filename }} + retention-days: 14 + + push-ci-container: + if: github.event_name == 'push' + needs: + - build-ci-container-windows + permissions: + packages: write + runs-on: windows-2019 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Download container + uses: actions/download-artifact@v4 + with: + name: container + - name: Push Container + run: | + docker load -i ${{ needs.build-ci-container-windows.outputs.container-filename }} + docker tag ${{ needs.build-ci-container-windows.outputs.container-name-tag }} ${{ needs.build-ci-container-windows.outputs.container-name }}:latest + docker login -u ${{ github.actor }} -p $env:GITHUB_TOKEN ghcr.io + docker push ${{ needs.build-ci-container-windows.outputs.container-name-tag }} + docker push ${{ needs.build-ci-container-windows.outputs.container-name }}:latest diff --git a/.github/workflows/build-ci-container.yml b/.github/workflows/build-ci-container.yml index 28fc7de2ee065..8a81d47186469 100644 --- a/.github/workflows/build-ci-container.yml +++ b/.github/workflows/build-ci-container.yml @@ -18,95 +18,84 @@ on: - '.github/workflows/containers/github-action-ci/**' jobs: - # TODO(boomanaiden154): Switch this back to a single stage build when we can - # run this on the self-hosted runners and don't have to do it this way to - # avoid timeouts. - build-ci-container-stage1: + build-ci-container: if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest + runs-on: depot-ubuntu-22.04-16 + outputs: + container-name: ${{ steps.vars.outputs.container-name }} + container-name-agent: ${{ steps.vars.outputs.container-name-agent }} + container-name-tag: ${{ steps.vars.outputs.container-name-tag }} + container-name-agent-tag: ${{ steps.vars.outputs.container-name-agent-tag }} + container-filename: ${{ steps.vars.outputs.container-filename }} + container-agent-filename: ${{ steps.vars.outputs.container-agent-filename }} steps: - name: Checkout LLVM uses: actions/checkout@v4 with: sparse-checkout: .github/workflows/containers/github-action-ci/ - - name: Change podman Root Direcotry - run: | - mkdir -p ~/.config/containers - sudo mkdir -p /mnt/podman - sudo chown `whoami`:`whoami` /mnt/podman - cp ./.github/workflows/containers/github-action-ci/storage.conf ~/.config/containers/storage.conf - podman info - - name: Build container stage1 - working-directory: ./.github/workflows/containers/github-action-ci/ - run: | - podman build -t stage1-toolchain --target stage1-toolchain -f stage1.Dockerfile . - - name: Save container image - run: | - podman save stage1-toolchain > stage1-toolchain.tar - - name: Upload container image - uses: actions/upload-artifact@v4 - with: - name: stage1-toolchain - path: stage1-toolchain.tar - retention-days: 1 - build-ci-container-stage2: - if: github.repository_owner == 'llvm' - runs-on: ubuntu-latest - needs: build-ci-container-stage1 - permissions: - packages: write - steps: - name: Write Variables id: vars run: | tag=`date +%s` container_name="ghcr.io/$GITHUB_REPOSITORY_OWNER/ci-ubuntu-22.04" echo "container-name=$container_name" >> $GITHUB_OUTPUT + echo "container-name-agent=$container_name-agent" >> $GITHUB_OUTPUT echo "container-name-tag=$container_name:$tag" >> $GITHUB_OUTPUT - - - name: Checkout LLVM - uses: actions/checkout@v4 - with: - sparse-checkout: .github/workflows/containers/github-action-ci/ - - - name: Change podman Root Direcotry + echo "container-name-agent-tag=$container_name-agent:$tag" >> $GITHUB_OUTPUT + echo "container-filename=$(echo $container_name:$tag | sed -e 's/\//-/g' -e 's/:/-/g').tar" >> $GITHUB_OUTPUT + echo "container-agent-filename=$(echo $container_name-agent:$tag | sed -e 's/\//-/g' -e 's/:/-/g').tar" >> $GITHUB_OUTPUT + - name: Build container + working-directory: ./.github/workflows/containers/github-action-ci/ run: | - mkdir -p ~/.config/containers - sudo mkdir -p /mnt/podman - sudo chown `whoami`:`whoami` /mnt/podman - cp ./.github/workflows/containers/github-action-ci/storage.conf ~/.config/containers/storage.conf - podman info + podman build --target ci-container -t ${{ steps.vars.outputs.container-name-tag }} . + podman build --target ci-container-agent -t ${{ steps.vars.outputs.container-name-agent-tag }} . - # Download the container image into /mnt/podman rather than - # $GITHUB_WORKSPACE to avoid space limitations on the default drive - # and use the permissions setup for /mnt/podman. - - name: Download stage1-toolchain - uses: actions/download-artifact@v4 - with: - name: stage1-toolchain - path: /mnt/podman - - - name: Load stage1-toolchain + # Save the container so we have it in case the push fails. This also + # allows us to separate the push step into a different job so we can + # maintain minimal permissions while building the container. + - name: Save container image run: | - podman load -i /mnt/podman/stage1-toolchain.tar + podman save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} + podman save ${{ steps.vars.outputs.container-name-agent-tag }} > ${{ steps.vars.outputs.container-agent-filename }} - - name: Build Container - working-directory: ./.github/workflows/containers/github-action-ci/ - run: | - podman build -t ${{ steps.vars.outputs.container-name-tag }} -f stage2.Dockerfile . - podman tag ${{ steps.vars.outputs.container-name-tag }} ${{ steps.vars.outputs.container-name }}:latest + - name: Upload container image + uses: actions/upload-artifact@v4 + with: + name: container + path: "*.tar" + retention-days: 14 - name: Test Container run: | - for image in ${{ steps.vars.outputs.container-name-tag }} ${{ steps.vars.outputs.container-name }}; do - podman run --rm -it $image /usr/bin/bash -x -c 'printf '\''#include \nint main(int argc, char **argv) { std::cout << "Hello\\n"; }'\'' | clang++ -x c++ - && ./a.out | grep Hello' + for image in ${{ steps.vars.outputs.container-name-tag }}; do + # Use --pull=never to ensure we are testing the just built image. + podman run --pull=never --rm -it $image /usr/bin/bash -x -c 'cd $HOME && printf '\''#include \nint main(int argc, char **argv) { std::cout << "Hello\\n"; }'\'' | clang++ -x c++ - && ./a.out | grep Hello' done + push-ci-container: + if: github.event_name == 'push' + needs: + - build-ci-container + permissions: + packages: write + runs-on: ubuntu-24.04 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Download container + uses: actions/download-artifact@v4 + with: + name: container + - name: Push Container - if: github.event_name == 'push' - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | + podman load -i ${{ needs.build-ci-container.outputs.container-filename }} + podman tag ${{ needs.build-ci-container.outputs.container-name-tag }} ${{ needs.build-ci-container.outputs.container-name }}:latest podman login -u ${{ github.actor }} -p $GITHUB_TOKEN ghcr.io - podman push ${{ steps.vars.outputs.container-name-tag }} - podman push ${{ steps.vars.outputs.container-name }}:latest + podman push ${{ needs.build-ci-container.outputs.container-name-tag }} + podman push ${{ needs.build-ci-container.outputs.container-name }}:latest + + podman load -i ${{ needs.build-ci-container.outputs.container-agent-filename }} + podman tag ${{ needs.build-ci-container.outputs.container-name-agent-tag }} ${{ needs.build-ci-container.outputs.container-name-agent }}:latest + podman push ${{ needs.build-ci-container.outputs.container-name-agent-tag }} + podman push ${{ needs.build-ci-container.outputs.container-name-agent }}:latest diff --git a/.github/workflows/build-metrics-container.yml b/.github/workflows/build-metrics-container.yml new file mode 100644 index 0000000000000..751ab679411dc --- /dev/null +++ b/.github/workflows/build-metrics-container.yml @@ -0,0 +1,78 @@ +name: Build Metrics Container + +permissions: + contents: read + +on: + push: + branches: + - main + paths: + - .github/workflows/build-metrics-container.yml + - '.ci/metrics/**' + pull_request: + branches: + - main + paths: + - .github/workflows/build-metrics-container.yml + - '.ci/metrics/**' + +jobs: + build-metrics-container: + if: github.repository_owner == 'llvm' + runs-on: ubuntu-latest + outputs: + container-name: ${{ steps.vars.outputs.container-name }} + container-name-tag: ${{ steps.vars.outputs.container-name-tag }} + container-filename: ${{ steps.vars.outputs.container-filename }} + steps: + - name: Checkout LLVM + uses: actions/checkout@v4 + with: + sparse-checkout: .ci/metrics/ + - name: Write Variables + id: vars + run: | + tag=`date +%s` + container_name="ghcr.io/$GITHUB_REPOSITORY_OWNER/metrics" + echo "container-name=$container_name" >> $GITHUB_OUTPUT + echo "container-name-tag=$container_name:$tag" >> $GITHUB_OUTPUT + echo "container-filename=$(echo $container_name:$tag | sed -e 's/\//-/g' -e 's/:/-/g').tar" >> $GITHUB_OUTPUT + - name: Build Container + working-directory: ./.ci/metrics + run: | + podman build -t ${{ steps.vars.outputs.container-name-tag }} -f Dockerfile . + # Save the container so we have it in case the push fails. This also + # allows us to separate the push step into a different job so we can + # maintain minimal permissions while building the container. + - name: Save Container Image + run: | + podman save ${{ steps.vars.outputs.container-name-tag }} > ${{ steps.vars.outputs.container-filename }} + - name: Upload Container Image + uses: actions/upload-artifact@v4 + with: + name: container + path: ${{ steps.vars.outputs.container-filename }} + retention-days: 14 + + push-metrics-container: + if: github.event_name == 'push' + needs: + - build-metrics-container + permissions: + packages: write + runs-on: ubuntu-24.04 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Download Container + uses: actions/download-artifact@v4 + with: + name: container + - name: Push Container + run: | + podman load -i ${{ needs.build-metrics-container.outputs.container-filename }} + podman tag ${{ needs.build-metrics-container.outputs.container-name-tag }} ${{ needs.build-metrics-container.outputs.container-name }}:latest + podman login -u ${{ github.actor }} -p $GITHUB_TOKEN ghcr.io + podman push ${{ needs.build-metrics-container.outputs.container-name-tag }} + podman push ${{ needs.build-metrics-container.outputs.container-name }}:latest diff --git a/.github/workflows/commit-access-review.py b/.github/workflows/commit-access-review.py index 8ea9b1fcc2fb0..4f539fe98004a 100644 --- a/.github/workflows/commit-access-review.py +++ b/.github/workflows/commit-access-review.py @@ -62,93 +62,56 @@ def __repr__(self): ) -def run_graphql_query( - query: str, variables: dict, token: str, retry: bool = True -) -> dict: - """ - This function submits a graphql query and returns the results as a - dictionary. - """ - s = requests.Session() - retries = requests.adapters.Retry(total=8, backoff_factor=2, status_forcelist=[504]) - s.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries)) - - headers = { - "Authorization": "bearer {}".format(token), - # See - # https://github.blog/2021-11-16-graphql-global-id-migration-update/ - "X-Github-Next-Global-ID": "1", - } - request = s.post( - url="https://api.github.com/graphql", - json={"query": query, "variables": variables}, - headers=headers, - ) - - rate_limit = request.headers.get("X-RateLimit-Remaining") - print(rate_limit) - if rate_limit and int(rate_limit) < 10: - reset_time = int(request.headers["X-RateLimit-Reset"]) - while reset_time - int(time.time()) > 0: - time.sleep(60) - print( - "Waiting until rate limit reset", - reset_time - int(time.time()), - "seconds remaining", - ) - - if request.status_code == 200: - if "data" not in request.json(): - print(request.json()) - sys.exit(1) - return request.json()["data"] - elif retry: - return run_graphql_query(query, variables, token, False) - else: - raise Exception( - "Failed to run graphql query\nquery: {}\nerror: {}".format( - query, request.json() - ) - ) - - -def check_manual_requests(start_date: datetime.datetime, token: str) -> list[str]: +def check_manual_requests( + gh: github.Github, start_date: datetime.datetime +) -> list[str]: """ Return a list of users who have been asked since ``start_date`` if they - want to keep their commit access. + want to keep their commit access or if they have applied for commit + access since ``start_date`` """ + query = """ - query ($query: String!) { - search(query: $query, type: ISSUE, first: 100) { + query ($query: String!, $after: String) { + search(query: $query, type: ISSUE, first: 100, after: $after) { nodes { ... on Issue { - body - comments (first: 100) { - nodes { - author { - login - } - } + author { + login } + body } } + pageInfo { + hasNextPage + endCursor + } } } """ formatted_start_date = start_date.strftime("%Y-%m-%dT%H:%M:%S") variables = { - "query": f"type:issue created:>{formatted_start_date} org:llvm repo:llvm-project label:infrastructure:commit-access" + "query": f"type:issue created:>{formatted_start_date} org:llvm repo:llvm-project label:infra:commit-access,infra:commit-access-request" } - data = run_graphql_query(query, variables, token) + has_next_page = True users = [] - for issue in data["search"]["nodes"]: - users.extend([user[1:] for user in re.findall("@[^ ,\n]+", issue["body"])]) - + while has_next_page: + res_header, res_data = gh._Github__requester.graphql_query( + query=query, variables=variables + ) + data = res_data["data"] + for issue in data["search"]["nodes"]: + users.extend([user[1:] for user in re.findall("@[^ ,\n]+", issue["body"])]) + if issue["author"]: + users.append(issue["author"]["login"]) + has_next_page = data["search"]["pageInfo"]["hasNextPage"] + if has_next_page: + variables["after"] = data["search"]["pageInfo"]["endCursor"] return users -def get_num_commits(user: str, start_date: datetime.datetime, token: str) -> int: +def get_num_commits(gh: github.Github, user: str, start_date: datetime.datetime) -> int: """ Get number of commits that ``user`` has been made since ``start_date`. """ @@ -166,7 +129,10 @@ def get_num_commits(user: str, start_date: datetime.datetime, token: str) -> int } """ - data = run_graphql_query(user_query, variables, token) + res_header, res_data = gh._Github__requester.graphql_query( + query=user_query, variables=variables + ) + data = res_data["data"] variables["user_id"] = data["user"]["id"] query = """ @@ -193,7 +159,10 @@ def get_num_commits(user: str, start_date: datetime.datetime, token: str) -> int } """ count = 0 - data = run_graphql_query(query, variables, token) + res_header, res_data = gh._Github__requester.graphql_query( + query=query, variables=variables + ) + data = res_data["data"] for repo in data["organization"]["teams"]["nodes"][0]["repositories"]["nodes"]: count += int(repo["ref"]["target"]["history"]["totalCount"]) if count >= User.THRESHOLD: @@ -202,7 +171,7 @@ def get_num_commits(user: str, start_date: datetime.datetime, token: str) -> int def is_new_committer_query_repo( - user: str, start_date: datetime.datetime, token: str + gh: github.Github, user: str, start_date: datetime.datetime ) -> bool: """ Determine if ``user`` is a new committer. A new committer can keep their @@ -220,7 +189,10 @@ def is_new_committer_query_repo( } """ - data = run_graphql_query(user_query, variables, token) + res_header, res_data = gh._Github__requester.graphql_query( + query=user_query, variables=variables + ) + data = res_data["data"] variables["owner"] = "llvm" variables["user_id"] = data["user"]["id"] variables["start_date"] = start_date.strftime("%Y-%m-%dT%H:%M:%S") @@ -245,7 +217,10 @@ def is_new_committer_query_repo( } """ - data = run_graphql_query(query, variables, token) + res_header, res_data = gh._Github__requester.graphql_query( + query=query, variables=variables + ) + data = res_data["data"] repo = data["organization"]["repository"] commits = repo["ref"]["target"]["history"]["nodes"] if len(commits) == 0: @@ -256,18 +231,22 @@ def is_new_committer_query_repo( return True -def is_new_committer(user: str, start_date: datetime.datetime, token: str) -> bool: +def is_new_committer( + gh: github.Github, user: str, start_date: datetime.datetime +) -> bool: """ Wrapper around is_new_commiter_query_repo to handle exceptions. """ try: - return is_new_committer_query_repo(user, start_date, token) + return is_new_committer_query_repo(gh, user, start_date) except: pass return True -def get_review_count(user: str, start_date: datetime.datetime, token: str) -> int: +def get_review_count( + gh: github.Github, user: str, start_date: datetime.datetime +) -> int: """ Return the number of reviews that ``user`` has done since ``start_date``. """ @@ -286,11 +265,14 @@ def get_review_count(user: str, start_date: datetime.datetime, token: str) -> in "query": f"type:pr commenter:{user} -author:{user} merged:>{formatted_start_date} org:llvm", } - data = run_graphql_query(query, variables, token) + res_header, res_data = gh._Github__requester.graphql_query( + query=query, variables=variables + ) + data = res_data["data"] return int(data["search"]["issueCount"]) -def count_prs(triage_list: dict, start_date: datetime.datetime, token: str): +def count_prs(gh: github.Github, triage_list: dict, start_date: datetime.datetime): """ Fetch all the merged PRs for the project since ``start_date`` and update ``triage_list`` with the number of PRs merged for each user. @@ -329,7 +311,10 @@ def count_prs(triage_list: dict, start_date: datetime.datetime, token: str): has_next_page = True while has_next_page: print(variables) - data = run_graphql_query(query, variables, token) + res_header, res_data = gh._Github__requester.graphql_query( + query=query, variables=variables + ) + data = res_data["data"] for pr in data["search"]["nodes"]: # Users can be None if the user has been deleted. if not pr["author"]: @@ -365,14 +350,14 @@ def main(): print("Start:", len(triage_list), "triagers") # Step 0 Check if users have requested commit access in the last year. - for user in check_manual_requests(one_year_ago, token): + for user in check_manual_requests(gh, one_year_ago): if user in triage_list: print(user, "requested commit access in the last year.") del triage_list[user] print("After Request Check:", len(triage_list), "triagers") # Step 1 count all PRs authored or merged - count_prs(triage_list, one_year_ago, token) + count_prs(gh, triage_list, one_year_ago) print("After PRs:", len(triage_list), "triagers") @@ -381,7 +366,7 @@ def main(): # Step 2 check for reviews for user in list(triage_list.keys()): - review_count = get_review_count(user, one_year_ago, token) + review_count = get_review_count(gh, user, one_year_ago) triage_list[user].add_reviewed(review_count) print("After Reviews:", len(triage_list), "triagers") @@ -391,7 +376,7 @@ def main(): # Step 3 check for number of commits for user in list(triage_list.keys()): - num_commits = get_num_commits(user, one_year_ago, token) + num_commits = get_num_commits(gh, user, one_year_ago) # Override the total number of commits to not double count commits and # authored PRs. triage_list[user].set_authored(num_commits) @@ -401,7 +386,7 @@ def main(): # Step 4 check for new committers for user in list(triage_list.keys()): print("Checking", user) - if is_new_committer(user, one_year_ago, token): + if is_new_committer(gh, user, one_year_ago): print("Removing new committer: ", user) del triage_list[user] diff --git a/.github/workflows/containers/github-action-ci-windows/Dockerfile b/.github/workflows/containers/github-action-ci-windows/Dockerfile new file mode 100644 index 0000000000000..9a1fab694c9df --- /dev/null +++ b/.github/workflows/containers/github-action-ci-windows/Dockerfile @@ -0,0 +1,118 @@ +# Agent image for LLVM org cluster. +# .net 4.8 is required by chocolately package manager. +FROM mcr.microsoft.com/dotnet/framework/sdk:4.8-windowsservercore-ltsc2019 + +# Restore the default Windows shell for correct batch processing. +SHELL ["cmd", "/S", "/C"] + +# Download the Build Tools bootstrapper. +ADD https://aka.ms/vs/16/release/vs_buildtools.exe /TEMP/vs_buildtools.exe + +RUN powershell -Command Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + +# Download channel for fixed install. +ARG CHANNEL_URL=https://aka.ms/vs/16/release/channel +ADD ${CHANNEL_URL} /TEMP/VisualStudio.chman + +# Install Build Tools with C++ workload. +# - Documentation for docker installation +# https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container?view=vs-2019 +# - Documentation on workloads +# https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-build-tools?view=vs-2019#c-build-tools +# - Documentation on flags +# https://docs.microsoft.com/en-us/visualstudio/install/use-command-line-parameters-to-install-visual-studio?view=vs-2019 +RUN /TEMP/vs_buildtools.exe --quiet --wait --norestart --nocache \ + --channelUri C:\TEMP\VisualStudio.chman \ + --installChannelUri C:\TEMP\VisualStudio.chman \ + --installPath C:\BuildTools \ + --add Microsoft.VisualStudio.Workload.VCTools \ + --add Microsoft.VisualStudio.Component.VC.ATL \ + --includeRecommended \ + || IF "%ERRORLEVEL%"=="3010" EXIT 0 + +# Register DIA dll (Debug Interface Access) so it can be used to symbolize +# the stack traces. Register dll for 32 and 64 bit. +# see https://developercommunity.visualstudio.com/content/problem/290674/msdia140dll-is-not-registered-on-vs2017-hosts.html + +RUN regsvr32 /S "C:\BuildTools\DIA SDK\bin\amd64\msdia140.dll" & \ + regsvr32 /S "C:\BuildTools\DIA SDK\bin\msdia140.dll" + +# install tools as described in https://llvm.org/docs/GettingStartedVS.html +# and a few more that were not documented... +RUN choco install -y ninja git +# Pin an older version of Python; the current Python 3.10 fails when +# doing "pip install" for the other dependencies, as it fails to find libxml +# while compiling some package. +RUN choco install -y python3 --version 3.9.7 + +# ActivePerl is currently not installable via Chocolatey, see +# http://disq.us/p/2ipditb. Install StrawberryPerl instead. Unfortunately, +# StrawberryPerl not only installs Perl, but also a redundant C/C++ compiler +# toolchain, and a copy of pkg-config which can cause misdetections for other +# built products, see +# https://github.com/StrawberryPerl/Perl-Dist-Strawberry/issues/11 for further +# details. Remove the redundant and unnecessary parts of the StrawberryPerl +# install. +RUN choco install -y strawberryperl && \ + rmdir /q /s c:\strawberry\c && \ + del /q c:\strawberry\perl\bin\pkg-config* + +# libcxx requires clang(-cl) to be available +RUN choco install -y sccache llvm +RUN pip install psutil + +RUN curl -LO https://github.com/mstorsjo/llvm-mingw/releases/download/20230320/llvm-mingw-20230320-ucrt-x86_64.zip && \ + powershell Expand-Archive llvm-mingw-*-ucrt-x86_64.zip -DestinationPath . && \ + del llvm-mingw-*-ucrt-x86_64.zip && \ + ren llvm-mingw-20230320-ucrt-x86_64 llvm-mingw + +# configure Python encoding +ENV PYTHONIOENCODING=UTF-8 + +# update the path variable +# C:\Program Files\Git\usr\bin contains a usable bash and other unix tools. +# C:\llvm-mingw\bin contains Clang configured for mingw targets and +# corresponding sysroots. Both the 'llvm' package (with Clang defaulting +# to MSVC targets) and this directory contains executables named +# 'clang.exe' - add this last to let the other one have precedence. +# To use these compilers, use the triple prefixed form, e.g. +# x86_64-w64-mingw32-clang. +# C:\buildtools and SDK paths are ones that are set by c:\BuildTools\Common7\Tools\VsDevCmd.bat -arch=amd64 -host_arch=amd64 +RUN powershell -Command \ + [System.Environment]::SetEnvironmentVariable('PATH', \ + [System.Environment]::GetEnvironmentVariable('PATH', 'machine') + ';C:\Program Files\Git\usr\bin;C:\llvm-mingw\bin' \ + + ';C:\BuildTools\Common7\IDE\' \ + + ';C:\BuildTools\Common7\IDE\CommonExt ensions\Microsoft\TeamFoundation\Team Explorer' \ + + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin' \ + + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja' \ + + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\TeamFoundation\Team Explorer' \ + + ';C:\BuildTools\Common7\IDE\CommonExtensions\Microsoft\TestWindow' \ + + ';C:\BuildTools\Common7\IDE\VC\VCPackages' \ + + ';C:\BuildTools\Common7\Tools\' \ + + ';C:\BuildTools\Common7\Tools\devinit' \ + + ';C:\BuildTools\MSBuild\Current\Bin' \ + + ';C:\BuildTools\MSBuild\Current\bin\Roslyn' \ + + ';C:\BuildTools\VC\Tools\MSVC\14.29.30133\bin\HostX64\x64' \ + + ';C:\Program Files (x86)\Microsoft SDKs\Windows\v10.0A\bin\NETFX 4.8 Tools\x64\' \ + + ';C:\Program Files (x86)\Windows Kits\10\bin\10.0.19041.0\x64' \ + + ';C:\Program Files (x86)\Windows Kits\10\bin\x64' \ + + ';C:\Windows\Microsoft.NET\Framework64\v4.0.30319' \ + ,'machine') + +# support long file names during git checkout +RUN git config --system core.longpaths true & \ + git config --global core.autocrlf false + +# handle for debugging of files beeing locked by some processes. +RUN choco install -y handle + +RUN pip3 install pywin32 buildbot-worker==2.8.4 + +ARG RUNNER_VERSION=2.322.0 +ENV RUNNER_VERSION=$RUNNER_VERSION + +RUN powershell -Command \ + Invoke-WebRequest -Uri https://github.com/actions/runner/releases/download/v${env:RUNNER_VERSION}/actions-runner-win-x64-${env:RUNNER_VERSION}.zip -OutFile actions-runner-win.zip ; \ + Add-Type -AssemblyName System.IO.Compression.FileSystem ; \ + [System.IO.Compression.ZipFile]::ExtractToDirectory('actions-runner-win.zip', $PWD) ;\ + rm actions-runner-win.zip diff --git a/.github/workflows/containers/github-action-ci/Dockerfile b/.github/workflows/containers/github-action-ci/Dockerfile new file mode 100644 index 0000000000000..377b8f14402ee --- /dev/null +++ b/.github/workflows/containers/github-action-ci/Dockerfile @@ -0,0 +1,106 @@ +FROM docker.io/library/ubuntu:22.04 as base +ENV LLVM_SYSROOT=/opt/llvm + +FROM base as stage1-toolchain +ENV LLVM_VERSION=19.1.5 + +RUN apt-get update && \ + apt-get install -y \ + wget \ + gcc \ + g++ \ + cmake \ + ninja-build \ + python3 \ + git \ + curl \ + zlib1g-dev + +RUN curl -O -L https://github.com/llvm/llvm-project/archive/refs/tags/llvmorg-$LLVM_VERSION.tar.gz && tar -xf llvmorg-$LLVM_VERSION.tar.gz + +WORKDIR /llvm-project-llvmorg-$LLVM_VERSION + +# Patch to enable better PGO profile data. +# TODO: Remove this for llvm 20 +ADD https://github.com/llvm/llvm-project/commit/738250989ce516f02f809bdfde474a039c77e81f.patch . + +RUN patch -p1 < 738250989ce516f02f809bdfde474a039c77e81f.patch + +RUN cmake -B ./build -G Ninja ./llvm \ + -C ./clang/cmake/caches/BOLT-PGO.cmake \ + -DBOOTSTRAP_LLVM_ENABLE_LLD=ON \ + -DBOOTSTRAP_BOOTSTRAP_LLVM_ENABLE_LLD=ON \ + -DPGO_INSTRUMENT_LTO=Thin \ + -DLLVM_ENABLE_RUNTIMES="compiler-rt" \ + -DCMAKE_INSTALL_PREFIX="$LLVM_SYSROOT" \ + -DLLVM_ENABLE_PROJECTS="bolt;clang;lld;clang-tools-extra" \ + -DLLVM_DISTRIBUTION_COMPONENTS="lld;compiler-rt;clang-format;scan-build" \ + -DCLANG_DEFAULT_LINKER="lld" + +RUN ninja -C ./build stage2-clang-bolt stage2-install-distribution && ninja -C ./build install-distribution + +FROM base as ci-container + +COPY --from=stage1-toolchain $LLVM_SYSROOT $LLVM_SYSROOT + +# Need to install curl for hendrikmuhs/ccache-action +# Need nodejs for some of the GitHub actions. +# Need perl-modules for clang analyzer tests. +# Need git for SPIRV-Tools tests. +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + binutils \ + cmake \ + curl \ + git \ + libstdc++-11-dev \ + ninja-build \ + nodejs \ + perl-modules \ + python3-psutil \ + sudo \ + + # These are needed by the premerge pipeline. Pip is used to install + # dependent python packages and ccache is used for build caching. File and + # tzdata are used for tests. + python3-pip \ + ccache \ + file \ + tzdata + +# Install sccache as it is needed by most of the project test workflows and +# cannot be installed by the ccache action when executing as a non-root user. +# TODO(boomanaiden154): This should be switched to being installed with apt +# once we bump to Ubuntu 24.04. +RUN curl -L 'https://github.com/mozilla/sccache/releases/download/v0.7.6/sccache-v0.7.6-x86_64-unknown-linux-musl.tar.gz' > /tmp/sccache.tar.gz && \ + echo "2902a5e44c3342132f07b62e70cca75d9b23252922faf3b924f449808cc1ae58 /tmp/sccache.tar.gz" | sha256sum -c && \ + tar xzf /tmp/sccache.tar.gz -O --wildcards '*/sccache' > '/usr/local/bin/sccache' && \ + rm /tmp/sccache.tar.gz && \ + chmod +x /usr/local/bin/sccache + +ENV LLVM_SYSROOT=$LLVM_SYSROOT +ENV PATH=${LLVM_SYSROOT}/bin:${PATH} + +# Create a new user to avoid test failures related to a lack of expected +# permissions issues in some tests. Set the user id to 1001 as that is the +# user id that Github Actions uses to perform the checkout action. +RUN useradd gha -u 1001 -m -s /bin/bash + +# Also add the user to passwordless sudoers so that we can install software +# later on without having to rebuild the container. +RUN adduser gha sudo +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + +USER gha +WORKDIR /home/gha + +FROM ci-container as ci-container-agent + +ENV GITHUB_RUNNER_VERSION=2.322.0 + +RUN mkdir actions-runner && \ + cd actions-runner && \ + curl -O -L https://github.com/actions/runner/releases/download/v$GITHUB_RUNNER_VERSION/actions-runner-linux-x64-$GITHUB_RUNNER_VERSION.tar.gz && \ + tar xzf ./actions-runner-linux-x64-$GITHUB_RUNNER_VERSION.tar.gz && \ + rm ./actions-runner-linux-x64-$GITHUB_RUNNER_VERSION.tar.gz + diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml new file mode 100644 index 0000000000000..83138e4a67836 --- /dev/null +++ b/.github/workflows/coverity.yml @@ -0,0 +1,83 @@ +name: Coverity +on: + workflow_dispatch: + schedule: + - cron: '0 0 * * 0' + +permissions: read-all + +jobs: + coverity: + if: github.repository == 'intel/llvm' + name: Coverity + runs-on: [Linux, build] + container: + image: ghcr.io/intel/llvm/ubuntu2404_intel_drivers:alldeps + options: -u 1001:1001 + + steps: + - uses: actions/checkout@v4 + with: + sparse-checkout: | + devops/actions + + - name: Register cleanup after job is finished + uses: ./devops/actions/cleanup + + - uses: ./devops/actions/cached_checkout + with: + path: src + ref: ${{ github.sha }} + cache_path: "/__w/repo_cache/" + + - name: Get coverity tool + run: | + wget https://scan.coverity.com/download/linux64 --post-data "token=${{ secrets.COVERITY_TOKEN }}&project=intel%2Fllvm" -O coverity_tool.tgz + tar -xf coverity_tool.tgz + + - name: Configure + env: + CC: gcc + CXX: g++ + CUDA_LIB_PATH: "/usr/local/cuda/lib64/stubs" + run: | + mkdir -p $GITHUB_WORKSPACE/build + cd $GITHUB_WORKSPACE/build + python3 $GITHUB_WORKSPACE/src/buildbot/configure.py -w $GITHUB_WORKSPACE \ + -s $GITHUB_WORKSPACE/src -o $GITHUB_WORKSPACE/build -t Release \ + --ci-defaults --hip --cuda \ + -DNATIVECPU_USE_OCK=Off \ + -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV + + - name: Build with coverity + run: $GITHUB_WORKSPACE/cov-analysis-linux64-*/bin/cov-build --dir cov-int cmake --build $GITHUB_WORKSPACE/build --target sycl-toolchain + + - name: Compress results + run: tar -I pigz -cf intel_llvm.tgz cov-int + + - name: Submit build + run: | + # Initialize a build. Fetch a cloud upload url. + curl -X POST \ + -d version="sycl: ${{ github.sha }}" \ + -d description="Regular build" \ + -d email=${{ secrets.COVERITY_EMAIL }} \ + -d token=${{ secrets.COVERITY_TOKEN }} \ + -d file_name="intel_llvm.tgz" \ + https://scan.coverity.com/projects/31090/builds/init \ + | tee response + + # Store response data to use in later stages. + upload_url=$(jq -r '.url' response) + build_id=$(jq -r '.build_id' response) + + # Upload the tarball to the Cloud. + curl -X PUT \ + --header 'Content-Type: application/json' \ + --upload-file $PWD/intel_llvm.tgz \ + $upload_url + + # Trigger the build on Scan. + curl -X PUT \ + -d token=${{ secrets.COVERITY_TOKEN }} \ + https://scan.coverity.com/projects/31090/builds/$build_id/enqueue diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 54f82bd4e4c0b..5be65141d04b7 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -65,7 +65,7 @@ jobs: fetch-depth: 1 - name: Get subprojects that have doc changes id: docs-changed-subprojects - uses: tj-actions/changed-files@v45 + uses: step-security/changed-files@3dbe17c78367e7d60f00d78ae6781a35be47b4a1 # v45.0.1 with: files_yaml: | llvm: @@ -94,76 +94,129 @@ jobs: flang: - 'flang/docs/**' - 'flang/include/flang/Optimizer/Dialect/FIROps.td' + workflow: + - '.github/workflows/docs.yml' - name: Fetch LLVM sources (PR) if: ${{ github.event_name == 'pull_request' }} uses: actions/checkout@v4 with: fetch-depth: 1 - name: Setup Python env - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.4.0 with: python-version: '3.11' cache: 'pip' - cache-dependency-path: 'llvm/docs/requirements.txt' + cache-dependency-path: 'llvm/docs/requirements-hashed.txt' - name: Install python dependencies - run: pip install -r llvm/docs/requirements.txt + run: pip install -r llvm/docs/requirements-hashed.txt - name: Install system dependencies run: | sudo apt-get update # swig and graphviz are lldb specific dependencies sudo apt-get install -y cmake ninja-build swig graphviz libhwloc-dev + - name: Setup output folder + run: mkdir built-docs - name: Build LLVM docs - if: steps.docs-changed-subprojects.outputs.llvm_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.llvm_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B llvm-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C llvm-build docs-llvm-html docs-llvm-man + mkdir built-docs/llvm + cp -r llvm-build/docs/* built-docs/llvm/ - name: Build Clang docs - if: steps.docs-changed-subprojects.outputs.clang_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.clang_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B clang-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C clang-build docs-clang-html docs-clang-man + mkdir built-docs/clang + cp -r clang-build/docs/* built-docs/clang/ - name: Build clang-tools-extra docs - if: steps.docs-changed-subprojects.outputs.clang-tools-extra_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.clang-tools-extra_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B clang-tools-extra-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C clang-tools-extra-build docs-clang-tools-html docs-clang-tools-man + mkdir built-docs/clang-tools-extra + cp -r clang-tools-extra-build/docs/* built-docs/clang-tools-extra/ - name: Build LLDB docs - if: steps.docs-changed-subprojects.outputs.lldb_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.lldb_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B lldb-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;lldb" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C lldb-build docs-lldb-html docs-lldb-man + mkdir built-docs/lldb + cp -r lldb-build/docs/* built-docs/lldb/ - name: Build libunwind docs - if: steps.docs-changed-subprojects.outputs.libunwind_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.libunwind_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B libunwind-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_RUNTIMES="libunwind" -DLLVM_ENABLE_SPHINX=ON ./runtimes TZ=UTC ninja -C libunwind-build docs-libunwind-html + mkdir built-docs/libunwind + cp -r libunwind-build/libunwind/docs/* built-docs/libunwind - name: Build libcxx docs - if: steps.docs-changed-subprojects.outputs.libcxx_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.libcxx_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B libcxx-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_RUNTIMES="libcxxabi;libcxx;libunwind" -DLLVM_ENABLE_SPHINX=ON ./runtimes TZ=UTC ninja -C libcxx-build docs-libcxx-html + mkdir built-docs/libcxx + cp -r libcxx-build/libcxx/docs/* built-docs/libcxx/ - name: Build libc docs - if: steps.docs-changed-subprojects.outputs.libc_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.libc_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B libc-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_RUNTIMES="libc" -DLLVM_ENABLE_SPHINX=ON ./runtimes TZ=UTC ninja -C libc-build docs-libc-html + mkdir built-docs/libc + cp -r libc-build/libc/docs/* built-docs/libc/ - name: Build LLD docs - if: steps.docs-changed-subprojects.outputs.lld_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.lld_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B lld-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="lld" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C lld-build docs-lld-html + mkdir built-docs/lld + cp -r lld-build/docs/* built-docs/lld/ - name: Build OpenMP docs - if: steps.docs-changed-subprojects.outputs.openmp_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.openmp_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B openmp-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;openmp" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C openmp-build docs-openmp-html + mkdir built-docs/openmp + cp -r openmp-build/docs/* built-docs/openmp/ - name: Build Polly docs - if: steps.docs-changed-subprojects.outputs.polly_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.polly_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B polly-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="polly" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C polly-build docs-polly-html docs-polly-man + mkdir built-docs/polly + cp -r polly-build/docs/* built-docs/polly/ - name: Build Flang docs - if: steps.docs-changed-subprojects.outputs.flang_any_changed == 'true' + if: | + steps.docs-changed-subprojects.outputs.flang_any_changed == 'true' || + steps.docs-changed-subprojects.outputs.workflow_any_changed == 'true' run: | cmake -B flang-build -GNinja -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;mlir;flang" -DLLVM_ENABLE_SPHINX=ON ./llvm TZ=UTC ninja -C flang-build docs-flang-html + mkdir built-docs/flang + cp -r flang-build/docs/* built-docs/flang/ + - name: Upload docs + uses: actions/upload-artifact@v4 + with: + name: docs-output + path: built-docs/ diff --git a/.github/workflows/email-check.yaml b/.github/workflows/email-check.yaml index 4d4c419595b5c..4b70bf2db2cbb 100644 --- a/.github/workflows/email-check.yaml +++ b/.github/workflows/email-check.yaml @@ -2,8 +2,9 @@ name: "Check for private emails used in PRs" on: pull_request: - types: - - opened + branches: + - sycl + - sycl-rel-** permissions: contents: read diff --git a/.github/workflows/hlsl-matrix.yaml b/.github/workflows/hlsl-matrix.yaml new file mode 100644 index 0000000000000..c63a32acd2b3e --- /dev/null +++ b/.github/workflows/hlsl-matrix.yaml @@ -0,0 +1,30 @@ +name: HLSL Tests + +permissions: + contents: read + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - llvm/**/DirectX/** + - .github/workflows/hlsl* + - clang/*HLSL*/**/* + - clang/**/*HLSL* + - llvm/**/Frontend/HLSL/**/* + +jobs: + HLSL-Tests: + strategy: + fail-fast: false + matrix: + runs-on: + - hlsl-macos + + uses: ./.github/workflows/hlsl-test-all.yaml + with: + SKU: hlsl-macos + TestTarget: check-hlsl-clang-mtl # TODO: This target changes based on SKU + LLVM-ref: ${{ github.ref }} diff --git a/.github/workflows/hlsl-test-all.yaml b/.github/workflows/hlsl-test-all.yaml new file mode 100644 index 0000000000000..93a1c6d2662d4 --- /dev/null +++ b/.github/workflows/hlsl-test-all.yaml @@ -0,0 +1,87 @@ +name: HLSL Test + +permissions: + contents: read + +on: + workflow_call: + inputs: + OffloadTest-branch: + description: 'Test Suite Branch' + required: false + default: 'main' + type: string + LLVM-ref: + description: 'LLVM Branch' + required: false + default: 'main' + type: string + SKU: + required: true + type: string + TestTarget: + required: false + default: 'check-hlsl' + type: string + +jobs: + build: + runs-on: ${{ inputs.SKU }} + steps: + - name: Checkout DXC + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: Microsoft/DirectXShaderCompiler + ref: main + path: DXC + submodules: true + - name: Checkout LLVM + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + ref: ${{ inputs.LLVM-branch }} + path: llvm-project + - name: Checkout OffloadTest + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: llvm-beanz/offload-test-suite + ref: main + path: OffloadTest + - name: Checkout Golden Images + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: llvm-beanz/offload-golden-images + ref: main + path: golden-images + - name: Setup Windows + if: runner.os == 'Windows' + uses: llvm/actions/setup-windows@main + with: + arch: amd64 + - name: Build DXC + run: | + cd DXC + mkdir build + cd build + cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -C ${{ github.workspace }}/DXC/cmake/caches/PredefinedParams.cmake -C ${{ github.workspace }}/OffloadTest/cmake/caches/sccache.cmake -DHLSL_DISABLE_SOURCE_GENERATION=On ${{ github.workspace }}/DXC/ + ninja dxv llvm-dis + - name: Build LLVM + run: | + cd llvm-project + mkdir build + cd build + cmake -G Ninja -DDXIL_DIS=${{ github.workspace }}/DXC/build/bin/llvm-dis -DLLVM_INCLUDE_DXIL_TESTS=On -DCMAKE_BUILD_TYPE=Release -C ${{ github.workspace }}/llvm-project/clang/cmake/caches/HLSL.cmake -C ${{ github.workspace }}/OffloadTest/cmake/caches/sccache.cmake -DDXC_DIR=${{ github.workspace }}/DXC/build/bin -DLLVM_EXTERNAL_OFFLOADTEST_SOURCE_DIR=${{ github.workspace }}/OffloadTest -DLLVM_EXTERNAL_PROJECTS="OffloadTest" -DLLVM_LIT_ARGS="--xunit-xml-output=testresults.xunit.xml -v" -DGOLDENIMAGE_DIR=${{ github.workspace }}/golden-images ${{ github.workspace }}/llvm-project/llvm/ + ninja hlsl-test-depends llvm-test-depends clang-test-depends + - name: Run HLSL Tests + run: | + cd llvm-project + cd build + ninja check-llvm + ninja check-clang + ninja check-hlsl-unit + ninja ${{ inputs.TestTarget }} + - name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action/macos@170bf24d20d201b842d7a52403b73ed297e6645b # v2 + if: always() && runner.os == 'macOS' + with: + comment_mode: off + files: llvm-project/build/**/testresults.xunit.xml diff --git a/.github/workflows/libc-fullbuild-tests.yml b/.github/workflows/libc-fullbuild-tests.yml new file mode 100644 index 0000000000000..2c88da653aae4 --- /dev/null +++ b/.github/workflows/libc-fullbuild-tests.yml @@ -0,0 +1,96 @@ +# This workflow is for pre-commit testing of the LLVM-libc project. +name: LLVM-libc Pre-commit Fullbuild Tests +permissions: + contents: read +on: + pull_request: + branches: [ "main" ] + paths: + - 'libc/**' + - '.github/workflows/libc-fullbuild-tests.yml' + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-24.04 + ccache-variant: sccache + c_compiler: clang + cpp_compiler: clang++ + # TODO: remove ccache logic when https://github.com/hendrikmuhs/ccache-action/issues/279 is resolved. + - os: ubuntu-24.04-arm + ccache-variant: ccache + c_compiler: clang + cpp_compiler: clang++ + # TODO: add back gcc build when it is fixed + # - c_compiler: gcc + # cpp_compiler: g++ + steps: + - uses: actions/checkout@v4 + + # Libc's build is relatively small comparing with other components of LLVM. + # A fresh fullbuild takes about 190MiB of uncompressed disk space, which can + # be compressed into ~40MiB. Limiting the cache size to 1G should be enough. + # Prefer sccache as it is more modern. + # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects + # frequent small object writes. + - name: Setup ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + max-size: 1G + key: libc_fullbuild_${{ matrix.c_compiler }} + variant: ${{ matrix.ccache-variant }} + + # Notice: + # - MPFR is required by some of the mathlib tests. + # - Debian has a multilib setup, so we need to symlink the asm directory. + # For more information, see https://wiki.debian.org/Multiarch/LibraryPathOverview + - name: Prepare dependencies (Ubuntu) + run: | + sudo apt-get update + sudo apt-get install -y libmpfr-dev libgmp-dev libmpc-dev ninja-build linux-libc-dev + sudo ln -sf /usr/include/$(uname -p)-linux-gnu/asm /usr/include/asm + + - name: Set reusable strings + id: strings + shell: bash + run: | + echo "build-output-dir=${{ github.workspace }}/build" >> "$GITHUB_OUTPUT" + echo "build-install-dir=${{ github.workspace }}/install" >> "$GITHUB_OUTPUT" + + # Configure libc fullbuild with scudo. + # Use MinSizeRel to reduce the size of the build. + - name: Configure CMake + run: > + cmake -B ${{ steps.strings.outputs.build-output-dir }} + -DCMAKE_CXX_COMPILER=${{ matrix.cpp_compiler }} + -DCMAKE_C_COMPILER=${{ matrix.c_compiler }} + -DCMAKE_BUILD_TYPE=MinSizeRel + -DCMAKE_C_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} + -DCMAKE_INSTALL_PREFIX=${{ steps.strings.outputs.build-install-dir }} + -DLLVM_ENABLE_RUNTIMES="libc;compiler-rt" + -DLLVM_LIBC_FULL_BUILD=ON + -DLLVM_LIBC_INCLUDE_SCUDO=ON + -DCOMPILER_RT_BUILD_SCUDO_STANDALONE_WITH_LLVM_LIBC=ON + -DCOMPILER_RT_BUILD_GWP_ASAN=OFF + -DCOMPILER_RT_SCUDO_STANDALONE_BUILD_SHARED=OFF + -G Ninja + -S ${{ github.workspace }}/runtimes + + - name: Build + run: > + cmake + --build ${{ steps.strings.outputs.build-output-dir }} + --parallel + --target install + + - name: Test + run: > + cmake + --build ${{ steps.strings.outputs.build-output-dir }} + --parallel + --target check-libc diff --git a/.github/workflows/libc-overlay-tests.yml b/.github/workflows/libc-overlay-tests.yml new file mode 100644 index 0000000000000..0a0916084b18c --- /dev/null +++ b/.github/workflows/libc-overlay-tests.yml @@ -0,0 +1,120 @@ +# This workflow is for pre-commit testing of the LLVM-libc project. +name: LLVM-libc Pre-commit Overlay Tests +permissions: + contents: read +on: + pull_request: + branches: [ "main" ] + paths: + - 'libc/**' + - '.github/workflows/libc-overlay-tests.yml' + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + # Set fail-fast to false to ensure that feedback is delivered for all matrix combinations. + fail-fast: false + matrix: + include: + # TODO: add linux gcc when it is fixed + - os: ubuntu-24.04 + ccache-variant: sccache + compiler: + c_compiler: clang + cpp_compiler: clang++ + # TODO: remove ccache logic when https://github.com/hendrikmuhs/ccache-action/issues/279 is resolved. + - os: ubuntu-24.04-arm + ccache-variant: ccache + compiler: + c_compiler: clang + cpp_compiler: clang++ + - os: windows-2022 + ccache-variant: sccache + compiler: + c_compiler: clang-cl + cpp_compiler: clang-cl + - os: windows-2025 + ccache-variant: sccache + compiler: + c_compiler: clang-cl + cpp_compiler: clang-cl + - os: macos-14 + ccache-variant: sccache + compiler: + c_compiler: clang + cpp_compiler: clang++ + + steps: + - uses: actions/checkout@v4 + + # Libc's build is relatively small comparing with other components of LLVM. + # A fresh linux overlay takes about 180MiB of uncompressed disk space, which can + # be compressed into ~40MiB. MacOS and Windows overlay builds are less than 10MiB + # after compression. Limiting the cache size to 1G should be enough. + # Prefer sccache as it is modern and it has a guarantee to work with MSVC. + # Do not use direct GHAC access even though it is supported by sccache. GHAC rejects + # frequent small object writes. + - name: Setup ccache + uses: hendrikmuhs/ccache-action@v1 + with: + max-size: 1G + key: libc_overlay_build_${{ matrix.os }}_${{ matrix.compiler.c_compiler }} + variant: ${{ matrix.ccache-variant }} + + # MPFR is required by some of the mathlib tests. + - name: Prepare dependencies (Ubuntu) + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y libmpfr-dev libgmp-dev libmpc-dev ninja-build + + # Chocolatey is shipped with Windows runners. Windows Server 2025 recommends WinGet. + # Consider migrating to WinGet when Windows Server 2025 is available. + - name: Prepare dependencies (Windows) + if: runner.os == 'Windows' + run: | + choco install ninja + + - name: Prepare dependencies (macOS) + if: runner.os == 'macOS' + run: | + brew install ninja + + - name: Set reusable strings + id: strings + shell: bash + run: | + echo "build-output-dir=${{ github.workspace }}/build" >> "$GITHUB_OUTPUT" + + # Use MinSizeRel to reduce the size of the build. + # Notice that CMP0141=NEW and MSVC_DEBUG_INFORMATION_FORMAT=Embedded are required + # by the sccache tool. + - name: Configure CMake + run: > + cmake -B ${{ steps.strings.outputs.build-output-dir }} + -DCMAKE_CXX_COMPILER=${{ matrix.compiler.cpp_compiler }} + -DCMAKE_C_COMPILER=${{ matrix.compiler.c_compiler }} + -DCMAKE_BUILD_TYPE=MinSizeRel + -DCMAKE_C_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ matrix.ccache-variant }} + -DCMAKE_POLICY_DEFAULT_CMP0141=NEW + -DCMAKE_MSVC_DEBUG_INFORMATION_FORMAT=Embedded + -DLLVM_ENABLE_RUNTIMES=libc + -G Ninja + -S ${{ github.workspace }}/runtimes + + - name: Build + run: > + cmake + --build ${{ steps.strings.outputs.build-output-dir }} + --parallel + --config MinSizeRel + --target libc + + - name: Test + run: > + cmake + --build ${{ steps.strings.outputs.build-output-dir }} + --parallel + --target check-libc diff --git a/.github/workflows/libclang-abi-tests.yml b/.github/workflows/libclang-abi-tests.yml index 8b04d8a086db1..41b3075288d2d 100644 --- a/.github/workflows/libclang-abi-tests.yml +++ b/.github/workflows/libclang-abi-tests.yml @@ -130,7 +130,7 @@ jobs: sed -i 's/LLVM_[0-9]\+/LLVM_NOVERSION/' $lib-${{ matrix.ref }}.abi done - name: Upload ABI file - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 with: name: ${{ matrix.name }} path: '*${{ matrix.ref }}.abi' @@ -143,12 +143,12 @@ jobs: - abi-dump steps: - name: Download baseline - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 with: name: build-baseline path: build-baseline - name: Download latest - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 with: name: build-latest path: build-latest @@ -162,7 +162,7 @@ jobs: done - name: Upload ABI Comparison if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 with: name: compat-report-${{ github.sha }} path: compat_reports/ diff --git a/.github/workflows/libclang-python-tests.yml b/.github/workflows/libclang-python-tests.yml index 801a701724789..d8f58c5b8d1ce 100644 --- a/.github/workflows/libclang-python-tests.yml +++ b/.github/workflows/libclang-python-tests.yml @@ -37,5 +37,5 @@ jobs: projects: clang # There is an issue running on "windows-2019". # See https://github.com/llvm/llvm-project/issues/76601#issuecomment-1873049082. - os_list: '["ubuntu-latest"]' + os_list: '["ubuntu-22.04"]' python_version: ${{ matrix.python-version }} diff --git a/.github/workflows/libcxx-build-and-test.yaml b/.github/workflows/libcxx-build-and-test.yaml index 184fed2268e81..ee77e83363d37 100644 --- a/.github/workflows/libcxx-build-and-test.yaml +++ b/.github/workflows/libcxx-build-and-test.yaml @@ -33,34 +33,23 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number }} cancel-in-progress: true - -env: - # LLVM POST-BRANCH bump version - # LLVM POST-BRANCH add compiler test for ToT - 1, e.g. "Clang 17" - # LLVM RELEASE bump remove compiler ToT - 3, e.g. "Clang 15" - LLVM_HEAD_VERSION: "19" # Used compiler, update POST-BRANCH. - LLVM_PREVIOUS_VERSION: "18" - LLVM_OLDEST_VERSION: "17" - GCC_STABLE_VERSION: "13" - LLVM_SYMBOLIZER_PATH: "/usr/bin/llvm-symbolizer-19" - CLANG_CRASH_DIAGNOSTICS_DIR: "crash_diagnostics" - - jobs: stage1: if: github.repository_owner == 'llvm' - runs-on: libcxx-runners-8-set + runs-on: libcxx-self-hosted-linux + container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 continue-on-error: false strategy: fail-fast: false matrix: config: [ + 'frozen-cxx03-headers', 'generic-cxx03', 'generic-cxx26', 'generic-modules' ] - cc: [ 'clang-19' ] - cxx: [ 'clang++-19' ] + cc: [ 'clang-20' ] + cxx: [ 'clang++-20' ] include: - config: 'generic-gcc' cc: 'gcc-14' @@ -79,12 +68,14 @@ jobs: path: | **/test-results.xml **/*.abilist + **/CMakeConfigureLog.yaml **/CMakeError.log **/CMakeOutput.log **/crash_diagnostics/* stage2: if: github.repository_owner == 'llvm' - runs-on: libcxx-runners-8-set + runs-on: libcxx-self-hosted-linux + container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 needs: [ stage1 ] continue-on-error: false strategy: @@ -97,18 +88,18 @@ jobs: 'generic-cxx20', 'generic-cxx23' ] - cc: [ 'clang-19' ] - cxx: [ 'clang++-19' ] + cc: [ 'clang-20' ] + cxx: [ 'clang++-20' ] include: - config: 'generic-gcc-cxx11' cc: 'gcc-14' cxx: 'g++-14' - config: 'generic-cxx23' - cc: 'clang-17' - cxx: 'clang++-17' - - config: 'generic-cxx26' cc: 'clang-18' cxx: 'clang++-18' + - config: 'generic-cxx26' + cc: 'clang-19' + cxx: 'clang++-19' steps: - uses: actions/checkout@v4 - name: ${{ matrix.config }} @@ -123,6 +114,7 @@ jobs: path: | **/test-results.xml **/*.abilist + **/CMakeConfigureLog.yaml **/CMakeError.log **/CMakeOutput.log **/crash_diagnostics/* @@ -155,32 +147,30 @@ jobs: 'generic-no-rtti', 'generic-optimized-speed', 'generic-static', - # TODO Find a better place for the benchmark and bootstrapping builds to live. They're either very expensive - # or don't provide much value since the benchmark run results are too noise on the bots. - 'benchmarks', 'bootstrapping-build' ] - machine: [ 'libcxx-runners-8-set' ] + machine: [ 'libcxx-self-hosted-linux' ] include: - config: 'generic-cxx26' - machine: libcxx-runners-8-set + machine: libcxx-self-hosted-linux - config: 'generic-asan' - machine: libcxx-runners-8-set + machine: libcxx-self-hosted-linux - config: 'generic-tsan' - machine: libcxx-runners-8-set + machine: libcxx-self-hosted-linux - config: 'generic-ubsan' - machine: libcxx-runners-8-set + machine: libcxx-self-hosted-linux # Use a larger machine for MSAN to avoid timeout and memory allocation issues. - config: 'generic-msan' - machine: libcxx-runners-8-set + machine: libcxx-self-hosted-linux runs-on: ${{ matrix.machine }} + container: ghcr.io/llvm/libcxx-linux-builder:d8a0709b1090350a7fe3604d8ab78c7d62f10698 steps: - uses: actions/checkout@v4 - name: ${{ matrix.config }} run: libcxx/utils/ci/run-buildbot ${{ matrix.config }} env: - CC: clang-19 - CXX: clang++-19 + CC: clang-20 + CXX: clang++-20 - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 if: always() with: @@ -188,6 +178,7 @@ jobs: path: | **/test-results.xml **/*.abilist + **/CMakeConfigureLog.yaml **/CMakeError.log **/CMakeOutput.log **/crash_diagnostics/* @@ -199,13 +190,13 @@ jobs: matrix: include: - config: generic-cxx03 - os: macos-latest + os: macos-15 - config: generic-cxx23 - os: macos-latest + os: macos-15 - config: generic-modules - os: macos-latest + os: macos-15 - config: apple-configuration - os: macos-latest + os: macos-15 - config: apple-system os: macos-13 - config: apple-system-hardened @@ -230,6 +221,7 @@ jobs: path: | **/test-results.xml **/*.abilist + **/CMakeConfigureLog.yaml **/CMakeError.log **/CMakeOutput.log **/crash_diagnostics/* diff --git a/.github/workflows/libcxx-build-containers.yml b/.github/workflows/libcxx-build-containers.yml new file mode 100644 index 0000000000000..2d040f712ce59 --- /dev/null +++ b/.github/workflows/libcxx-build-containers.yml @@ -0,0 +1,71 @@ +# This file defines an action that builds the various Docker images used to run +# libc++ CI whenever modifications to those Docker files are pushed to `main`. +# +# The images are pushed to the LLVM package registry at https://github.com/orgs/llvm/packages +# and tagged appropriately. The selection of which Docker image version is used by the libc++ +# CI nodes at any given point is controlled from the workflow files themselves. + +name: Build Docker images for libc++ CI + +permissions: + contents: read + packages: write + +on: + push: + branches: + - main + paths: + - 'libcxx/utils/ci/**' + - '.github/workflows/libcxx-build-containers.yml' + pull_request: + branches: + - main + paths: + - 'libcxx/utils/ci/**' + - '.github/workflows/libcxx-build-containers.yml' + +jobs: + build-and-push: + runs-on: ubuntu-latest + if: github.repository_owner == 'llvm' + permissions: + packages: write + + steps: + - uses: actions/checkout@v4 + + - name: Build the Linux builder image + working-directory: libcxx/utils/ci + run: docker compose build actions-builder + env: + TAG: ${{ github.sha }} + + # - name: Build the Android builder image + # working-directory: libcxx/utils/ci + # run: docker compose build android-buildkite-builder + # env: + # TAG: ${{ github.sha }} + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Push the Linux builder image + if: github.event_name == 'push' + working-directory: libcxx/utils/ci + run: | + docker compose push actions-builder + env: + TAG: ${{ github.sha }} + + # - name: Push the Android builder image + # if: github.event_name == 'push' + # working-directory: libcxx/utils/ci + # run: | + # docker compose push android-buildkite-builder + # env: + # TAG: ${{ github.sha }} diff --git a/.github/workflows/libcxx-restart-preempted-jobs.yaml b/.github/workflows/libcxx-restart-preempted-jobs.yaml index 21879ce19c27c..e7e3772d4de22 100644 --- a/.github/workflows/libcxx-restart-preempted-jobs.yaml +++ b/.github/workflows/libcxx-restart-preempted-jobs.yaml @@ -92,6 +92,12 @@ jobs: check_run_id: check_run_id }) + // For temporary debugging purposes to see the structure of the annotations. + console.log(annotations); + + has_failed_job = false; + saved_failure_message = null; + for (annotation of annotations.data) { if (annotation.annotation_level != 'failure') { continue; @@ -106,15 +112,32 @@ jobs: const failure_match = annotation.message.match(failure_regex); if (failure_match != null) { - // We only want to restart the workflow if all of the failures were due to preemption. - // We don't want to restart the workflow if there were other failures. - core.notice('Choosing not to rerun workflow because we found a non-preemption failure' + - 'Failure message: "' + annotation.message + '"'); - await create_check_run('skipped', 'Choosing not to rerun workflow because we found a non-preemption failure\n' - + 'Failure message: ' + annotation.message) - return; + has_failed_job = true; + saved_failure_message = annotation.message; } } + if (has_failed_job && (! has_preempted_job)) { + // We only want to restart the workflow if all of the failures were due to preemption. + // We don't want to restart the workflow if there were other failures. + // + // However, libcxx runners running inside docker containers produce both a preemption message and failure message. + // + // The desired approach is to ignore failure messages which appear on the same job as a preemption message + // (An job is a single run with a specific configuration, ex generic-gcc, gcc-14). + // + // However, it's unclear that this code achieves the desired approach, and it may ignore all failures + // if a preemption message is found at all on any run. + // + // For now, it's more important to restart preempted workflows than to avoid restarting workflows with + // non-preemption failures. + // + // TODO Figure this out. + core.notice('Choosing not to rerun workflow because we found a non-preemption failure' + + 'Failure message: "' + saved_failure_message + '"'); + await create_check_run('skipped', 'Choosing not to rerun workflow because we found a non-preemption failure\n' + + 'Failure message: ' + saved_failure_message) + return; + } } if (!has_preempted_job) { @@ -130,3 +153,91 @@ jobs: run_id: context.payload.workflow_run.id }) await create_check_run('success', 'Restarted workflow run due to preempted job') + + restart-test: + if: github.repository_owner == 'llvm' && (github.event.workflow_run.conclusion == 'failure' || github.event.workflow_run.conclusion == 'cancelled') && github.event.actor.login == 'ldionne' # TESTING ONLY + name: "Restart Job (test)" + permissions: + statuses: read + checks: write + actions: write + runs-on: ubuntu-latest + steps: + - name: "Restart Job (test)" + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea #v7.0.1 + with: + script: | + const FAILURE_REGEX = /Process completed with exit code 1./ + const PREEMPTION_REGEX = /(The runner has received a shutdown signal)|(The operation was canceled)/ + + function log(msg) { + core.notice(msg) + } + + const wf_run = context.payload.workflow_run + log(`Running on "${wf_run.display_title}" by @${wf_run.actor.login} (event: ${wf_run.event})\nWorkflow run URL: ${wf_run.html_url}`) + + log('Listing check runs for suite') + const check_suites = await github.rest.checks.listForSuite({ + owner: context.repo.owner, + repo: context.repo.repo, + check_suite_id: context.payload.workflow_run.check_suite_id, + per_page: 100 // FIXME: We don't have 100 check runs yet, but we should handle this better. + }) + + preemptions = []; + legitimate_failures = []; + for (check_run of check_suites.data.check_runs) { + log(`Checking check run: ${check_run.id}`); + if (check_run.status != 'completed') { + log('Check run was not completed. Skipping.'); + continue; + } + + if (check_run.conclusion != 'failure' && check_run.conclusion != 'cancelled') { + log(`Check run had conclusion: ${check_run.conclusion}. Skipping.`); + continue; + } + + annotations = await github.rest.checks.listAnnotations({ + owner: context.repo.owner, + repo: context.repo.repo, + check_run_id: check_run.id + }) + + preemption_annotation = annotations.data.find(function(annotation) { + return annotation.annotation_level == 'failure' && + annotation.message.match(PREEMPTION_REGEX) != null; + }); + if (preemption_annotation != null) { + log(`Found preemption message: ${preemption_annotation.message}`); + preemptions.push(check_run); + break; + } + + failure_annotation = annotations.data.find(function(annotation) { + return annotation.annotation_level == 'failure' && + annotation.message.match(FAILURE_REGEX) != null; + }); + if (failure_annotation != null) { + log(`Found legitimate failure annotation: ${failure_annotation.message}`); + legitimate_failures.push(check_run); + break; + } + } + + if (preemptions) { + log('Found some preempted jobs'); + if (legitimate_failures) { + log('Also found some legitimate failures, so not restarting the workflow.'); + } else { + log('Did not find any legitimate failures. Restarting workflow.'); + await github.rest.actions.reRunWorkflowFailedJobs({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.payload.workflow_run.id + }) + } + } else { + log('Did not find any preempted jobs. Not restarting the workflow.'); + } diff --git a/.github/workflows/llvm-project-tests.yml b/.github/workflows/llvm-project-tests.yml index 95a3890c0d2dc..a6dd37cc0887d 100644 --- a/.github/workflows/llvm-project-tests.yml +++ b/.github/workflows/llvm-project-tests.yml @@ -39,7 +39,12 @@ on: type: string # Use windows-2019 due to: # https://developercommunity.visualstudio.com/t/Prev-Issue---with-__assume-isnan-/1597317 - default: '["ubuntu-latest", "windows-2019", "macOS-13"]' + # Use ubuntu-22.04 rather than ubuntu-latest to match the ubuntu + # version in the CI container. Without this, setup-python tries + # to install a python version linked against a newer version of glibc. + # TODO(boomanaiden154): Bump the Ubuntu version once the version in the + # container is bumped. + default: '["ubuntu-22.04", "windows-2019", "macOS-13"]' python_version: required: false @@ -77,7 +82,7 @@ jobs: # lldb. Using this setup-python action to make 3.10 the default # python fixes this. - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.4.0 with: python-version: ${{ inputs.python_version }} - name: Install Ninja @@ -113,7 +118,8 @@ jobs: run: | if [ "${{ runner.os }}" == "Linux" ]; then builddir="/mnt/build/" - mkdir -p $builddir + sudo mkdir -p $builddir + sudo chown gha $builddir extra_cmake_args="-DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang" else builddir="$(pwd)"/build diff --git a/.github/workflows/llvm-tests.yml b/.github/workflows/llvm-tests.yml index d1a7fddb9d2b7..4e570a7cb1455 100644 --- a/.github/workflows/llvm-tests.yml +++ b/.github/workflows/llvm-tests.yml @@ -137,14 +137,14 @@ jobs: # Remove symbol versioning from dumps, so we can compare across major versions. sed -i 's/LLVM_${{ matrix.llvm_version_major }}/LLVM_NOVERSION/' ${{ matrix.ref }}.abi - name: Upload ABI file - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 with: name: ${{ matrix.name }} path: ${{ matrix.ref }}.abi - name: Upload symbol list file if: matrix.name == 'build-baseline' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 with: name: symbol-list path: llvm.symbols @@ -157,17 +157,17 @@ jobs: - abi-dump steps: - name: Download baseline - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 with: name: build-baseline path: build-baseline - name: Download latest - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 with: name: build-latest path: build-latest - name: Download symbol list - uses: actions/download-artifact@v4 + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # 4.1.8 with: name: symbol-list path: symbol-list @@ -186,7 +186,7 @@ jobs: abi-compliance-checker $EXTRA_ARGS -l libLLVM.so -old build-baseline/*.abi -new build-latest/*.abi || test "${{ needs.abi-dump-setup.outputs.ABI_HEADERS }}" = "llvm-c" - name: Upload ABI Comparison if: always() - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 with: name: compat-report-${{ github.sha }} path: compat_reports/ diff --git a/.github/workflows/new-issues.yml b/.github/workflows/new-issues.yml index ed15fdb9fba6e..3cac57e268513 100644 --- a/.github/workflows/new-issues.yml +++ b/.github/workflows/new-issues.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: llvm/actions/issue-labeler@main with: - repo-token: ${{ secrets.GITHUB_TOKEN }} + repo-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} configuration-path: .github/new-issues-labeler.yml include-title: 1 include-body: 0 diff --git a/.github/workflows/pr-code-format.yml b/.github/workflows/pr-code-format.yml index 67c80441dd3a9..789a8f92b98a6 100644 --- a/.github/workflows/pr-code-format.yml +++ b/.github/workflows/pr-code-format.yml @@ -8,7 +8,6 @@ on: branches: - main - sycl - - sycl-devops-pr/** - sycl-rel-** - 'users/**' @@ -35,7 +34,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v45 + uses: step-security/changed-files@3dbe17c78367e7d60f00d78ae6781a35be47b4a1 # v45.0.1 with: separator: "," skip_initial_fetch: true @@ -63,10 +62,10 @@ jobs: - name: Install clang-format uses: aminya/setup-cpp@v1 with: - clangformat: 18.1.7 + clangformat: 19.1.6 - name: Setup Python env - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.4.0 with: python-version: '3.11' cache: 'pip' diff --git a/.github/workflows/premerge.yaml b/.github/workflows/premerge.yaml new file mode 100644 index 0000000000000..54d6e1bf092cf --- /dev/null +++ b/.github/workflows/premerge.yaml @@ -0,0 +1,134 @@ +name: LLVM Premerge Checks + +permissions: + contents: read + +on: + pull_request: + paths: + - .github/workflows/premerge.yaml + push: + branches: + - 'main' + +jobs: + premerge-checks-linux: + if: github.repository_owner == 'llvm' + runs-on: llvm-premerge-linux-runners + concurrency: + group: ${{ github.workflow }}-linux-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + steps: + - name: Checkout LLVM + uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Setup ccache + uses: hendrikmuhs/ccache-action@v1.2.14 + with: + max-size: "2000M" + - name: Build and Test + # Mark the job as a success even if the step fails so that people do + # not get notified while the new premerge pipeline is in an + # experimental state. + # TODO(boomanaiden154): Remove this once the pipeline is stable and we + # are ready for people to start recieving notifications. + continue-on-error: true + run: | + git config --global --add safe.directory '*' + + modified_files=$(git diff --name-only HEAD~1...HEAD) + modified_dirs=$(echo "$modified_files" | cut -d'/' -f1 | sort -u) + + echo $modified_files + echo $modified_dirs + + . ./.ci/compute-projects.sh + + all_projects="bolt clang clang-tools-extra compiler-rt cross-project-tests flang libc libclc lld lldb llvm mlir openmp polly pstl" + modified_projects="$(keep-modified-projects ${all_projects})" + + linux_projects_to_test=$(exclude-linux $(compute-projects-to-test 0 ${modified_projects})) + linux_check_targets=$(check-targets ${linux_projects_to_test} | sort | uniq) + linux_projects=$(add-dependencies ${linux_projects_to_test} | sort | uniq) + + linux_runtimes_to_test=$(compute-runtimes-to-test ${linux_projects_to_test}) + linux_runtime_check_targets=$(check-targets ${linux_runtimes_to_test} | sort | uniq) + linux_runtimes=$(echo ${linux_runtimes_to_test} | sort | uniq) + + if [[ "${linux_projects}" == "" ]]; then + echo "No projects to build" + exit 0 + fi + + echo "Building projects: ${linux_projects}" + echo "Running project checks targets: ${linux_check_targets}" + echo "Building runtimes: ${linux_runtimes}" + echo "Running runtimes checks targets: ${linux_runtime_check_targets}" + + export CC=/opt/llvm/bin/clang + export CXX=/opt/llvm/bin/clang++ + + ./.ci/monolithic-linux.sh "$(echo ${linux_projects} | tr ' ' ';')" "$(echo ${linux_check_targets})" "$(echo ${linux_runtimes} | tr ' ' ';')" "$(echo ${linux_runtime_check_targets})" + + premerge-checks-windows: + if: github.repository_owner == 'llvm' + runs-on: llvm-premerge-windows-runners + concurrency: + group: ${{ github.workflow }}-windows-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true + defaults: + run: + shell: bash + steps: + - name: Checkout LLVM + uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Setup ccache + uses: hendrikmuhs/ccache-action@v1.2.14 + with: + variant: "sccache" + max-size: "2000M" + - name: Compute Projects + id: vars + run: | + modified_files=$(git diff --name-only HEAD~1...HEAD) + modified_dirs=$(echo "$modified_files" | cut -d'/' -f1 | sort | uniq) + + echo $modified_files + echo $modified_dirs + + . ./.ci/compute-projects.sh + + all_projects="bolt clang clang-tools-extra compiler-rt cross-project-tests flang libc libclc lld lldb llvm mlir openmp polly pstl" + modified_projects="$(keep-modified-projects ${all_projects})" + + windows_projects_to_test=$(exclude-windows $(compute-projects-to-test 1 ${modified_projects})) + windows_check_targets=$(check-targets ${windows_projects_to_test} | sort | uniq | tr -d '\r' | tr '\n' ' ') + windows_projects=$(add-dependencies ${windows_projects_to_test} | sort | uniq | tr -d '\r' | tr '\n' ';') + + if [[ "${windows_projects}" == "" ]]; then + echo "No projects to build" + fi + + echo "Building projects: ${windows_projects}" + echo "Running project checks targets: ${windows_check_targets}" + + echo "windows-projects=${windows_projects}" >> $GITHUB_OUTPUT + echo "windows-check-targets=${windows_check_targets}" >> $GITHUB_OUTPUT + - name: Build and Test + # Mark the job as a success even if the step fails so that people do + # not get notified while the new premerge pipeline is in an + # experimental state. + # TODO(boomanaiden154): Remove this once the pipeline is stable and we + # are ready for people to start recieving notifications. + continue-on-error: true + if: ${{ steps.vars.outputs.windows-projects != '' }} + shell: cmd + run: | + set MAX_PARALLEL_COMPILE_JOBS=64 + set MAX_PARALLEL_LINK_JOBS=64 + call C:\\BuildTools\\Common7\\Tools\\VsDevCmd.bat -arch=amd64 -host_arch=amd64 + bash .ci/monolithic-windows.sh "${{ steps.vars.outputs.windows-projects }}" "${{ steps.vars.outputs.windows-check-targets }}" + diff --git a/.github/workflows/release-binaries-all.yml b/.github/workflows/release-binaries-all.yml index f5318aecc53a7..d18b9b0b5c2ff 100644 --- a/.github/workflows/release-binaries-all.yml +++ b/.github/workflows/release-binaries-all.yml @@ -83,7 +83,7 @@ jobs: matrix: runs-on: - ubuntu-22.04 - - windows-2022 + - ubuntu-22.04-arm - macos-13 - macos-14 diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index dc58089653763..9ee0b3f928b5c 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -18,7 +18,7 @@ on: type: choice options: - ubuntu-22.04 - - windows-2022 + - ubuntu-22.04-arm - macos-13 - macos-14 @@ -50,21 +50,24 @@ jobs: prepare: name: Prepare to build binaries runs-on: ${{ inputs.runs-on }} - if: github.repository == 'llvm/llvm-project' + if: github.repository_owner == 'llvm' outputs: release-version: ${{ steps.vars.outputs.release-version }} ref: ${{ steps.vars.outputs.ref }} upload: ${{ steps.vars.outputs.upload }} target-cmake-flags: ${{ steps.vars.outputs.target-cmake-flags }} + ccache: ${{ steps.vars.outputs.ccache }} build-flang: ${{ steps.vars.outputs.build-flang }} enable-pgo: ${{ steps.vars.outputs.enable-pgo }} release-binary-basename: ${{ steps.vars.outputs.release-binary-basename }} release-binary-filename: ${{ steps.vars.outputs.release-binary-filename }} + build-runs-on: ${{ steps.vars.outputs.build-runs-on }} + test-runs-on: ${{ steps.vars.outputs.build-runs-on }} steps: # It's good practice to use setup-python, but this is also required on macos-14 # due to https://github.com/actions/runner-images/issues/10385 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f + - uses: actions/setup-python@6ca8e8598faa206f7140a65ba31b899bebe16f58 with: python-version: '3.12' @@ -83,7 +86,7 @@ jobs: USER_TOKEN: ${{ secrets.RELEASE_TASKS_USER_TOKEN }} shell: bash run: | - ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user ${{ github.actor }} --user-token "$USER_TOKEN" check-permissions + ./llvm/utils/release/./github-upload-release.py --token "$GITHUB_TOKEN" --user "$GITHUB_ACTOR" --user-token "$USER_TOKEN" check-permissions - name: Collect Variables id: vars @@ -102,8 +105,8 @@ jobs: release_version="$trimmed" ref="llvmorg-$release_version" else - release_version="${{ (github.event_name == 'pull_request' && format('PR{0}', github.event.pull_request.number)) || 'CI'}}-${{ github.sha }}" - ref=${{ github.sha }} + release_version="${{ (github.event_name == 'pull_request' && format('PR{0}', github.event.pull_request.number)) || 'CI'}}-$GITHUB_SHA" + ref="$GITHUB_SHA" fi if [ -n "${{ inputs.upload }}" ]; then upload="${{ inputs.upload }}" @@ -114,20 +117,28 @@ jobs: echo "ref=$ref" >> $GITHUB_OUTPUT echo "upload=$upload" >> $GITHUB_OUTPUT - release_binary_basename="LLVM-$release_version-${{ runner.os }}-${{ runner.arch }}" + release_binary_basename="LLVM-$release_version-$RUNNER_OS-$RUNNER_ARCH" echo "release-binary-basename=$release_binary_basename" >> $GITHUB_OUTPUT echo "release-binary-filename=$release_binary_basename.tar.xz" >> $GITHUB_OUTPUT + target="$RUNNER_OS-$RUNNER_ARCH" + # The hendrikmuhs/ccache-action action does not support installing sccache + # on arm64 Linux. + if [ "$target" = "Linux-ARM64" ]; then + echo ccache=ccache >> $GITHUB_OUTPUT + else + echo ccache=sccache >> $GITHUB_OUTPUT + fi + # Detect necessary CMake flags - target="${{ runner.os }}-${{ runner.arch }}" echo "enable-pgo=false" >> $GITHUB_OUTPUT target_cmake_flags="-DLLVM_RELEASE_ENABLE_PGO=OFF" # The macOS builds try to cross compile some libraries so we need to # add extra CMake args to disable them. # See https://github.com/llvm/llvm-project/issues/99767 - if [ "${{ runner.os }}" = "macOS" ]; then + if [ "$RUNNER_OS" = "macOS" ]; then target_cmake_flags="$target_cmake_flags -DBOOTSTRAP_COMPILER_RT_ENABLE_IOS=OFF" - if [ "${{ runner.arch }}" = "ARM64" ]; then + if [ "$RUNNER_ARCH" = "ARM64" ]; then arches=arm64 else arches=x86_64 @@ -137,19 +148,47 @@ jobs: build_flang="true" - if [ "${{ runner.os }}" = "Windows" ]; then + if [ "$RUNNER_OS" = "Windows" ]; then # The build times out on Windows, so we need to disable LTO. target_cmake_flags="$target_cmake_flags -DLLVM_RELEASE_ENABLE_LTO=OFF" fi echo "target-cmake-flags=$target_cmake_flags" >> $GITHUB_OUTPUT echo "build-flang=$build_flang" >> $GITHUB_OUTPUT - - build-stage1: - name: "Build Stage 1" + case "${{ inputs.runs-on }}" in + ubuntu-22.04*) + build_runs_on="depot-${{ inputs.runs-on }}-16" + test_runs_on=$build_runs_on + ;; + macos-13) + if [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then + build_runs_on="${{ inputs.runs-on }}" + else + build_runs_on="macos-13-large" + fi + test_runs_on="${{ inputs.runs-on }}" + ;; + macos-14) + if [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then + build_runs_on="${{ inputs.runs-on }}" + else + build_runs_on="depot-macos-14" + fi + test_runs_on="${{ inputs.runs-on }}" + ;; + *) + test_runs_on="${{ inputs.runs-on }}" + build_runs_on=$test_runs_on + ;; + esac + echo "build-runs-on=$build_runs_on" >> $GITHUB_OUTPUT + echo "test-runs-on=$test_runs_on" >> $GITHUB_OUTPUT + + build-release-package: + name: "Build Release Package" needs: prepare - if: github.repository == 'llvm/llvm-project' - runs-on: ${{ inputs.runs-on }} + if: github.repository_owner == 'llvm' + runs-on: ${{ needs.prepare.outputs.build-runs-on }} steps: - name: Checkout Actions @@ -187,17 +226,11 @@ jobs: id: setup-stage uses: ./workflows-main/.github/workflows/release-binaries-setup-stage - - name: Setup sccache - uses: hendrikmuhs/ccache-action@ca3acd2731eef11f1572ccb126356c2f9298d35e # v1.2.9 - with: - # Default to 2G to workaround: https://github.com/hendrikmuhs/ccache-action/issues/174 - max-size: 2G - key: sccache-${{ runner.os }}-${{ runner.arch }}-release - variant: sccache - - - name: Build Stage 1 Clang + - name: Configure id: build shell: bash + env: + CCACHE_BIN: ${{ needs.prepare.outputs.ccache }} run: | # There were some issues on the ARM64 MacOS runners with trying to build x86 object, # so we need to set some extra cmake flags to disable this. @@ -205,185 +238,14 @@ jobs: ${{ needs.prepare.outputs.target-cmake-flags }} \ -C clang/cmake/caches/Release.cmake \ -DBOOTSTRAP_LLVM_PARALLEL_LINK_JOBS=1 \ - -DBOOTSTRAP_CPACK_PACKAGE_FILE_NAME="${{ needs.prepare.outputs.release-binary-basename }}" \ - -DCMAKE_C_COMPILER_LAUNCHER=sccache \ - -DCMAKE_CXX_COMPILER_LAUNCHER=sccache - ninja -v -C ${{ steps.setup-stage.outputs.build-prefix }}/build - # There is a race condition on the MacOS builders and this command is here - # to help debug that when it happens. - ls -ltr ${{ steps.setup-stage.outputs.build-prefix }}/build - - - name: Save Stage - uses: ./workflows-main/.github/workflows/release-binaries-save-stage - with: - build-prefix: ${{ steps.setup-stage.outputs.build-prefix }} + -DBOOTSTRAP_CPACK_PACKAGE_FILE_NAME="${{ needs.prepare.outputs.release-binary-basename }}" - build-stage2: - name: "Build Stage 2" - needs: - - prepare - - build-stage1 - if: github.repository == 'llvm/llvm-project' - runs-on: ${{ inputs.runs-on }} - steps: - - name: Checkout Actions - uses: actions/checkout@v4 - with: - ref: ${{ (github.event_name == 'pull_request' && github.sha) || 'main' }} - sparse-checkout: | - .github/workflows/ - sparse-checkout-cone-mode: false - path: workflows - - name: Setup Stage - id: setup-stage - uses: ./workflows/.github/workflows/release-binaries-setup-stage - with: - previous-artifact: build-stage1 - - - name: Build Stage 2 - # Re-enable once PGO builds are supported. - if: needs.prepare.outputs.enable-pgo == 'true' + - name: Build shell: bash run: | - ninja -C ${{ steps.setup-stage.outputs.build-prefix}}/build stage2-instrumented - - - name: Save Stage - uses: ./workflows/.github/workflows/release-binaries-save-stage - with: - build-prefix: ${{ steps.setup-stage.outputs.build-prefix }} - - build-stage3-clang: - name: "Build Stage 3 LLVM/Clang" - needs: - - prepare - - build-stage2 - if: github.repository == 'llvm/llvm-project' - runs-on: ${{ inputs.runs-on }} - steps: - - name: Checkout Actions - uses: actions/checkout@v4 - with: - ref: ${{ (github.event_name == 'pull_request' && github.sha) || 'main' }} - sparse-checkout: | - .github/workflows/ - sparse-checkout-cone-mode: false - path: workflows - - name: Setup Stage - id: setup-stage - uses: ./workflows/.github/workflows/release-binaries-setup-stage - with: - previous-artifact: build-stage2 - - - name: Build LLVM/Clang - shell: bash - run: | - # There is a race condition on the MacOS builders and this command is here - # to help debug that when it happens. - ls -ltr ${{ steps.setup-stage.outputs.build-prefix }}/build - ninja -C ${{ steps.setup-stage.outputs.build-prefix }}/build stage2-clang - # Build some of the larger binaries here too. - ninja -C ${{ steps.setup-stage.outputs.build-prefix }}/build/tools/clang/stage2-bins/ \ - clang-scan-deps \ - modularize clangd \ - clangd-indexer \ - clang-check \ - ${{ (runner.os == 'Linux' && 'clangd-fuzzer') || '' }} \ - clang-tidy \ - llc \ - lli \ - llvm-exegesis \ - llvm-opt-fuzzer \ - llvm-reduce \ - llvm-lto \ - dsymutil - - - name: Save Stage - uses: ./workflows/.github/workflows/release-binaries-save-stage - with: - build-prefix: ${{ steps.setup-stage.outputs.build-prefix }} - - build-stage3-flang: - name: "Build Stage 3 Flang/MLIR/Bolt" - needs: - - prepare - - build-stage3-clang - runs-on: ${{ inputs.runs-on }} - steps: - - name: Checkout Actions - uses: actions/checkout@v4 - with: - ref: ${{ (github.event_name == 'pull_request' && github.sha) || 'main' }} - sparse-checkout: | - .github/workflows/ - sparse-checkout-cone-mode: false - path: workflows - - name: Setup Stage - id: setup-stage - uses: ./workflows/.github/workflows/release-binaries-setup-stage - with: - previous-artifact: build-stage3-clang - - - name: Build Flang / MLIR / Bolt - shell: bash - run: | - # Build some of the mlir tools that take a long time to link - if [ "${{ needs.prepare.outputs.build-flang }}" = "true" ]; then - ninja -C ${{ steps.setup-stage.outputs.build-prefix }}/build/tools/clang/stage2-bins/ -j2 flang bbc - fi - ninja -C ${{ steps.setup-stage.outputs.build-prefix }}/build/tools/clang/stage2-bins/ \ - mlir-bytecode-parser-fuzzer \ - mlir-cpu-runner \ - mlir-lsp-server \ - mlir-opt \ - mlir-query \ - mlir-reduce \ - mlir-text-parser-fuzzer \ - mlir-translate \ - mlir-transform-opt \ - mlir-cat \ - mlir-minimal-opt \ - mlir-minimal-opt-canonicalize \ - mlir-pdll-lsp-server \ - llvm-bolt \ - llvm-bolt-heatmap - - - name: Save Stage - uses: ./workflows/.github/workflows/release-binaries-save-stage - with: - build-prefix: ${{ steps.setup-stage.outputs.build-prefix }} - - build-stage3-all: - name: "Build Stage 3" - needs: - - prepare - - build-stage3-flang - runs-on: ${{ inputs.runs-on }} - steps: - - name: Checkout Actions - uses: actions/checkout@v4 - with: - ref: ${{ (github.event_name == 'pull_request' && github.sha) || 'main' }} - sparse-checkout: | - .github/workflows/ - sparse-checkout-cone-mode: false - path: workflows - - name: Setup Stage - id: setup-stage - uses: ./workflows/.github/workflows/release-binaries-setup-stage - with: - previous-artifact: build-stage3-flang - - - name: Build Release Package - shell: bash - run: | - which cmake - ninja -C ${{ steps.setup-stage.outputs.build-prefix }}/build stage2-package - # Copy Release artifact to the workspace so it is easier to upload. - # This is necessary, because on Windows, the build-prefix path can - # only be used on bash steps, because it uses the form of /d/files/ - # and other steps expect D:\files. + ninja -v -C ${{ steps.setup-stage.outputs.build-prefix }}/build stage2-package mv ${{ steps.setup-stage.outputs.build-prefix }}/build/tools/clang/stage2-bins/${{ needs.prepare.outputs.release-binary-filename }} . - + - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0 with: name: ${{ runner.os }}-${{ runner.arch }}-release-binary @@ -398,9 +260,9 @@ jobs: run: | find ${{ steps.setup-stage.outputs.build-prefix }}/build -iname ${{ needs.prepare.outputs.release-binary-filename }} -delete rm -Rf ${{ steps.setup-stage.outputs.build-prefix }}/build/tools/clang/stage2-bins/_CPack_Packages - + - name: Save Stage - uses: ./workflows/.github/workflows/release-binaries-save-stage + uses: ./workflows-main/.github/workflows/release-binaries-save-stage with: build-prefix: ${{ steps.setup-stage.outputs.build-prefix }} @@ -408,9 +270,8 @@ jobs: name: "Upload Release Binaries" needs: - prepare - - build-stage3-all + - build-release-package if: >- - always() && github.event_name != 'pull_request' && needs.prepare.outputs.upload == 'true' runs-on: ubuntu-22.04 @@ -436,7 +297,7 @@ jobs: - name: Attest Build Provenance id: provenance - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@bd77c077858b8d561b7a36cbe48ef4cc642ca39d # v2.2.2 with: subject-path: ${{ needs.prepare.outputs.release-binary-filename }} @@ -463,14 +324,14 @@ jobs: upload \ --files ${{ needs.prepare.outputs.release-binary-filename }}* - test-stage3: - name: "Test Stage 3" + test-release: + name: "Test Release" needs: - prepare - - build-stage3-all + - build-release-package if: >- - github.repository == 'llvm/llvm-project' - runs-on: ${{ inputs.runs-on }} + github.repository_owner == 'llvm' + runs-on: ${{ needs.prepare.outputs.test-runs-on }} steps: - name: Checkout Actions uses: actions/checkout@v4 @@ -484,7 +345,7 @@ jobs: id: setup-stage uses: ./workflows/.github/workflows/release-binaries-setup-stage with: - previous-artifact: build-stage3-all + previous-artifact: build-release-package - name: Run Tests shell: bash diff --git a/.github/workflows/release-documentation.yml b/.github/workflows/release-documentation.yml index ea64cfd4c17a4..e7fd01f19e894 100644 --- a/.github/workflows/release-documentation.yml +++ b/.github/workflows/release-documentation.yml @@ -37,7 +37,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Python env - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.4.0 with: cache: 'pip' cache-dependency-path: './llvm/docs/requirements.txt' @@ -60,7 +60,7 @@ jobs: ./llvm/utils/release/build-docs.sh -release "${{ inputs.release-version }}" -no-doxygen - name: Create Release Notes Artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # 4.6.0 with: name: release-notes path: docs-build/html-export/ diff --git a/.github/workflows/release-doxygen.yml b/.github/workflows/release-doxygen.yml index e6efe56f0c0c4..7db14d02c95ad 100644 --- a/.github/workflows/release-doxygen.yml +++ b/.github/workflows/release-doxygen.yml @@ -43,7 +43,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Setup Python env - uses: actions/setup-python@v5 + uses: actions/setup-python@v5.4.0 with: cache: 'pip' cache-dependency-path: './llvm/docs/requirements.txt' diff --git a/.github/workflows/release-sources.yml b/.github/workflows/release-sources.yml index f62d73e5e8954..cc581e1fbaf84 100644 --- a/.github/workflows/release-sources.yml +++ b/.github/workflows/release-sources.yml @@ -92,7 +92,7 @@ jobs: - name: Attest Build Provenance if: github.event_name != 'pull_request' id: provenance - uses: actions/attest-build-provenance@1c608d11d69870c2092266b3f9a6f3abbf17002c # v1.4.3 + uses: actions/attest-build-provenance@bd77c077858b8d561b7a36cbe48ef4cc642ca39d # v2.2.2 with: subject-path: "*.xz" - if: github.event_name != 'pull_request' diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index b6bd85f3d556b..d8e290896bfab 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -36,7 +36,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 + uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 with: results_file: results.sarif results_format: sarif @@ -57,6 +57,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@662472033e021d55d94146f66f6058822b0b39fd # v3.27.0 + uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0 with: sarif_file: results.sarif diff --git a/.github/workflows/spirv-tests.yml b/.github/workflows/spirv-tests.yml index 75918e73e8973..ea466dc6c52e5 100644 --- a/.github/workflows/spirv-tests.yml +++ b/.github/workflows/spirv-tests.yml @@ -25,5 +25,5 @@ jobs: with: build_target: check-llvm-codegen-spirv projects: - extra_cmake_args: '-DLLVM_TARGETS_TO_BUILD="" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="SPIRV" -DLLVM_INCLUDE_SPIRV_TOOLS_TESTS=ON' - os_list: '["ubuntu-latest"]' + extra_cmake_args: '-DLLVM_TARGETS_TO_BUILD="SPIRV" -DLLVM_INCLUDE_SPIRV_TOOLS_TESTS=ON' + os_list: '["ubuntu-22.04"]' diff --git a/.github/workflows/sycl-aws.yml b/.github/workflows/sycl-aws.yml index 869b734df541d..47e0cc602245c 100644 --- a/.github/workflows/sycl-aws.yml +++ b/.github/workflows/sycl-aws.yml @@ -15,15 +15,19 @@ on: description: "JSON string with array of objects with aws-type, runs-on, aws-ami, aws-spot, aws-disk, aws-timebomb, one-job properties" type: string default: '[{"runs-on":"aws_cuda-${{ github.run_id }}-${{ github.run_attempt }}","aws-ami":"ami-01cb0573cb039ab24","aws-type":["g5.2xlarge","g5.4xlarge"],"aws-disk":"/dev/sda1:64","aws-spot":"false"}]' + ref: + type: string + required: false jobs: aws: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest environment: aws steps: - uses: actions/checkout@v4 with: sparse-checkout: devops/actions/aws-ec2 + ref: ${{ inputs.ref || github.sha }} - run: npm install ./devops/actions/aws-ec2 - uses: ./devops/actions/aws-ec2 with: diff --git a/.github/workflows/sycl-benchmark-aggregate.yml b/.github/workflows/sycl-benchmark-aggregate.yml new file mode 100644 index 0000000000000..87f7ef718160a --- /dev/null +++ b/.github/workflows/sycl-benchmark-aggregate.yml @@ -0,0 +1,52 @@ +name: Aggregate compute-benchmark averages from historical data + +# The benchmarking workflow in sycl-linux-run-tests.yml passes or fails based on +# how the benchmark results compare to a historical average: This historical +# average is calculated in this workflow, which aggregates historical data and +# produces measures of central tendency (median in this case) used for this +# purpose. + +on: + workflow_dispatch: + inputs: + lookback_days: + description: | + Number of days from today to look back in historical results for: + This sets the age limit of data used in average calculation: Any + benchmark results created before `lookback_days` from today is + excluded from being aggregated in the historical average. + type: number + required: true + workflow_call: + inputs: + lookback_days: + type: number + required: true + secrets: + LLVM_SYCL_BENCHMARK_TOKEN: + description: | + Github token used by the faceless account to push newly calculated + medians. + required: true + + +permissions: + contents: read + +jobs: + aggregate: + name: Aggregate average (median) value for all metrics + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + sparse-checkout: | + devops/scripts/benchmarking + devops/benchmarking + devops/actions/benchmarking + - name: Aggregate benchmark results and produce historical average + uses: ./devops/actions/benchmarking/aggregate + with: + lookback_days: ${{ inputs.lookback_days }} + env: + GITHUB_TOKEN: ${{ secrets.LLVM_SYCL_BENCHMARK_TOKEN }} diff --git a/.github/workflows/sycl-containers-igc-dev.yaml b/.github/workflows/sycl-containers-igc-dev.yaml index 8bf8def9d8148..76a9b4e544f4a 100644 --- a/.github/workflows/sycl-containers-igc-dev.yaml +++ b/.github/workflows/sycl-containers-igc-dev.yaml @@ -6,11 +6,13 @@ on: - sycl paths: - 'devops/actions/build_container/**' + - 'devops/scripts/**' - 'devops/dependencies-igc-dev.json' - '.github/workflows/sycl-containers-igc-dev.yaml' pull_request: paths: - 'devops/actions/build_container/**' + - 'devops/scripts/**' - 'devops/dependencies-igc-dev.json' - '.github/workflows/sycl-containers-igc-dev.yaml' @@ -20,18 +22,18 @@ jobs: build_and_push_images: if: github.repository == 'intel/llvm' name: Build and Push IGC Dev Docker Images - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest permissions: packages: write strategy: matrix: include: - - name: Intel Drivers Ubuntu 22.04 Docker image with dev IGC - dockerfile: ubuntu2204_intel_drivers_igc_dev - imagefile: ubuntu2204_intel_drivers + - name: Intel Drivers Ubuntu 24.04 Docker image with dev IGC + dockerfile: ubuntu2404_intel_drivers_igc_dev + imagefile: ubuntu2404_intel_drivers tag: devigc build_args: | - "use_latest=false" + "use_unstable_driver=false" "use_igc_dev=true" steps: - name: Checkout diff --git a/.github/workflows/sycl-containers.yaml b/.github/workflows/sycl-containers.yaml index 7abeee17df01b..7ba768a79701c 100644 --- a/.github/workflows/sycl-containers.yaml +++ b/.github/workflows/sycl-containers.yaml @@ -29,7 +29,7 @@ jobs: build_and_push_images: if: github.repository == 'intel/llvm' name: Build and Push Docker Images - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest permissions: packages: write strategy: @@ -39,25 +39,44 @@ jobs: file: ubuntu2204_base tag: latest build_args: "" - - name: Build Ubuntu Docker image + - name: Base Ubuntu 24.04 Docker image + file: ubuntu2404_base + tag: latest + build_args: "" + - name: Build Ubuntu 22.04 Docker image file: ubuntu2204_build tag: latest build_args: "" + - name: Build Ubuntu 24.04 Docker image + file: ubuntu2404_build + tag: latest + build_args: "" - name: Intel Drivers Ubuntu 22.04 Docker image file: ubuntu2204_intel_drivers tag: latest - build_args: "use_latest=false" - - name: Intel Drivers (unstable) Ubuntu 22.04 Docker image - file: ubuntu2204_intel_drivers + build_args: "use_unstable_driver=false" + - name: Intel Drivers Ubuntu 24.04 Docker image + file: ubuntu2404_intel_drivers + tag: latest + build_args: "use_unstable_driver=false" + - name: Intel Drivers (unstable) Ubuntu 24.04 Docker image + file: ubuntu2404_intel_drivers tag: unstable - build_args: "use_latest=true" + build_args: "use_unstable_driver=true" - name: Build + Intel Drivers Ubuntu 22.04 Docker image file: ubuntu2204_intel_drivers tag: alldeps build_args: | base_image=ghcr.io/intel/llvm/ubuntu2204_build base_tag=latest - use_latest=false + use_unstable_driver=false + - name: Build + Intel Drivers Ubuntu 24.04 Docker image + file: ubuntu2404_intel_drivers + tag: alldeps + build_args: | + base_image=ghcr.io/intel/llvm/ubuntu2404_build + base_tag=latest + use_unstable_driver=false steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/sycl-detect-changes.yml b/.github/workflows/sycl-detect-changes.yml index 396cd424ff4ac..33152fb3f55e1 100644 --- a/.github/workflows/sycl-detect-changes.yml +++ b/.github/workflows/sycl-detect-changes.yml @@ -72,6 +72,9 @@ jobs: - 'sycl/include/sycl/ext/oneapi/experimental/invoke_simd.hpp' - 'sycl/include/sycl/ext/oneapi/experimental/detail/invoke_simd_types.hpp' - 'sycl/test-e2e/(ESIMD|InvokeSimd)/**' + ur: + - 'unified-runtime/**' + - .github/workflows/ur-* - name: Set output id: result @@ -84,7 +87,7 @@ jobs: return '${{ steps.changes.outputs.changes }}'; } // Treat everything as changed for huge PRs. - return ["llvm", "llvm_spirv", "clang", "sycl_jit", "xptifw", "libclc", "sycl", "ci", "esimd"]; + return ["llvm", "llvm_spirv", "clang", "sycl_jit", "xptifw", "libclc", "sycl", "ci", "esimd", "ur"]; - run: echo '${{ steps.result.outputs.result }}' diff --git a/.github/workflows/sycl-issues-ping-assignee.yml b/.github/workflows/sycl-issues-ping-assignee.yml index c809d67586e71..adb4c2e5b658d 100644 --- a/.github/workflows/sycl-issues-ping-assignee.yml +++ b/.github/workflows/sycl-issues-ping-assignee.yml @@ -20,7 +20,7 @@ jobs: run: permissions: issues: write - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} REPO: ${{ github.repository }} @@ -39,7 +39,7 @@ jobs: - name: Filter issues and ping run: | - days_to_stale=60 + days_to_stale=90 current_time=$(date +%s) cat issues.json | jq -c '.[]' | while read -r issue; do diff --git a/.github/workflows/sycl-linux-build.yml b/.github/workflows/sycl-linux-build.yml index 0dc956dfea752..b76609f9d66bd 100644 --- a/.github/workflows/sycl-linux-build.yml +++ b/.github/workflows/sycl-linux-build.yml @@ -14,7 +14,7 @@ on: build_image: type: string required: false - default: "ghcr.io/intel/llvm/ubuntu2204_build:latest" + default: "ghcr.io/intel/llvm/ubuntu2404_intel_drivers:alldeps" build_ref: type: string required: false @@ -40,16 +40,13 @@ on: description: 'Filter matches for the changed files in the PR' default: '[llvm, clang, sycl, llvm_spirv, xptifw, libclc, libdevice]' required: false - merge_ref: - description: | - Commit-ish to merge post-checkout if non-empty. Must be reachable from - the default_branch input paramter. - type: string - default: 'FETCH_HEAD' retention-days: description: 'Artifacts retention period' type: string default: 3 + e2e_binaries_artifact: + type: string + required: False outputs: build_conclusion: @@ -70,7 +67,7 @@ on: build_image: type: choice options: - - "ghcr.io/intel/llvm/sycl_ubuntu2204_nightly:build" + - 'ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest' cc: type: choice options: @@ -150,13 +147,14 @@ jobs: with: path: src ref: ${{ inputs.build_ref || github.sha }} - merge_ref: ${{ inputs.merge_ref }} cache_path: "/__w/repo_cache/" + - name: Setup oneAPI env + if: ${{ inputs.cc == 'icx' || inputs.cxx == 'icpx' }} + uses: ./devops/actions/setup_linux_oneapi_env - name: Configure env: CC: ${{ inputs.cc }} CXX: ${{ inputs.cxx }} - ARGS: ${{ inputs.build_configure_extra_args }} CUDA_LIB_PATH: "/usr/local/cuda/lib64/stubs" run: | mkdir -p $CCACHE_DIR @@ -164,15 +162,15 @@ jobs: cd $GITHUB_WORKSPACE/build python3 $GITHUB_WORKSPACE/src/buildbot/configure.py -w $GITHUB_WORKSPACE \ -s $GITHUB_WORKSPACE/src -o $GITHUB_WORKSPACE/build -t Release \ - --ci-defaults $ARGS \ - --cmake-opt=-DCMAKE_C_COMPILER_LAUNCHER=ccache \ - --cmake-opt=-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ - --cmake-opt="-DLLVM_INSTALL_UTILS=ON" \ - --cmake-opt="-DNATIVECPU_USE_OCK=Off" \ - --cmake-opt="-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV" + --ci-defaults ${{ inputs.build_configure_extra_args }} \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DLLVM_INSTALL_UTILS=ON \ + -DNATIVECPU_USE_OCK=Off \ + -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV - name: Compile id: build - run: cmake --build $GITHUB_WORKSPACE/build + run: cmake --build $GITHUB_WORKSPACE/build --target sycl-toolchain - name: check-llvm if: always() && !cancelled() && contains(inputs.changes, 'llvm') run: | @@ -247,3 +245,53 @@ jobs: name: sycl_linux_${{ inputs.build_artifact_suffix }} path: ${{ steps.artifact_info.outputs.ARCHIVE_NAME }} retention-days: ${{ inputs.retention-days }} + + - name: Copy toolchain + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + # We must have the compiler in the same location as it will be in the E2E + # run-tests job. + run: cp -r $GITHUB_WORKSPACE/build/install $GITHUB_WORKSPACE/toolchain + + - name: Source OneAPI TBB vars.sh + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + shell: bash + run: | + # https://github.com/actions/runner/issues/1964 prevents us from using + # the ENTRYPOINT in the image. + env | sort > env_before + if [ -e /runtimes/oneapi-tbb/env/vars.sh ]; then + source /runtimes/oneapi-tbb/env/vars.sh; + elif [ -e /opt/runtimes/oneapi-tbb/env/vars.sh ]; then + source /opt/runtimes/oneapi-tbb/env/vars.sh; + else + echo "no TBB vars in /opt/runtimes or /runtimes"; + fi + env | sort > env_after + comm -13 env_before env_after >> $GITHUB_ENV + rm env_before env_after + + - name: Build E2E tests + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + uses: ./devops/actions/run-tests/e2e + with: + ref: ${{ inputs.ref || github.sha }} + testing_mode: build-only + target_devices: all + binaries_artifact: ${{ inputs.e2e_binaries_artifact }} + cxx_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ + extra_lit_opts: --param sycl_build_targets="spir;nvidia;amd" + + - name: Remove E2E tests before spirv-backend run + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + run: rm -rf build-e2e + + - name: Build E2E tests with SPIR-V Backend + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + uses: ./devops/actions/run-tests/e2e + with: + ref: ${{ inputs.ref || github.sha }} + testing_mode: build-only + target_devices: all + binaries_artifact: ${{ inputs.e2e_binaries_artifact }}_spirv_backend + cxx_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ + extra_lit_opts: --param spirv-backend=True diff --git a/.github/workflows/sycl-linux-precommit-aws.yml b/.github/workflows/sycl-linux-precommit-aws.yml index 07d309900d764..e8bd368556aa9 100644 --- a/.github/workflows/sycl-linux-precommit-aws.yml +++ b/.github/workflows/sycl-linux-precommit-aws.yml @@ -45,7 +45,7 @@ jobs: }) aws-start: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest environment: aws steps: - uses: actions/checkout@v4 @@ -68,11 +68,10 @@ jobs: runner: '["aws_cuda-${{ github.event.workflow_run.id }}-${{ github.event.workflow_run.run_attempt }}"]' image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --gpus all --cap-add SYS_ADMIN --env NVIDIA_DISABLE_REQUIRE=1 - target_devices: ext_oneapi_cuda:gpu + target_devices: cuda:gpu # No idea why but that seems to work and be in sync with the main # pre-commit workflow. - ref: ${{ github.event.workflow_run.referenced_workflows[0].sha }} - merge_ref: '' + repo_ref: ${{ github.event.workflow_run.referenced_workflows[0].sha }} sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: llvm_sycl.tar.zst @@ -107,7 +106,7 @@ jobs: aws-stop: needs: [aws-start, e2e-cuda] if: always() - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest environment: aws steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/sycl-linux-precommit.yml b/.github/workflows/sycl-linux-precommit.yml index 231d49b0500c5..48e4befe65b0e 100644 --- a/.github/workflows/sycl-linux-precommit.yml +++ b/.github/workflows/sycl-linux-precommit.yml @@ -7,12 +7,12 @@ on: pull_request: branches: - sycl - - sycl-devops-pr/** - sycl-rel-** # Do not run builds if changes are only in the following locations paths-ignore: - '.github/ISSUE_TEMPLATE/**' - '.github/CODEOWNERS' + - 'sycl/cts_exclude_filter/**' - 'sycl/doc/**' - 'sycl/gdb/**' - 'clang/docs/**' @@ -21,6 +21,7 @@ on: - '.github/workflows/sycl-windows-*.yml' - '.github/workflows/sycl-macos-*.yml' - '.github/workflows/sycl-nightly.yml' + - '.github/workflows/sycl-rel-nightly.yml' - 'devops/containers/**' - 'devops/actions/build_container/**' @@ -41,108 +42,109 @@ jobs: uses: ./.github/workflows/sycl-linux-build.yml with: build_ref: ${{ github.sha }} - merge_ref: '' build_cache_root: "/__w/" build_artifact_suffix: "default" build_cache_suffix: "default" + # Docker image has last nightly pre-installed and added to the PATH + build_image: "ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest" + cc: clang + cxx: clang++ changes: ${{ needs.detect_changes.outputs.filters }} + e2e_binaries_artifact: sycl_e2e_bin_default - determine_arc_tests: - name: Decide which Arc tests to run + run_prebuilt_e2e_tests: needs: [build, detect_changes] if: ${{ always() && !cancelled() && needs.build.outputs.build_conclusion == 'success' }} - runs-on: [Linux, aux-tasks] - timeout-minutes: 3 - outputs: - arc_tests: ${{ steps.arc_tests.outputs.arc_tests }} - steps: - - name: Determine Arc tests - id: arc_tests - run: | - if [ "${{ contains(needs.detect_changes.outputs.filters, 'devigccfg') }}" == "true" ]; then - echo 'arc_tests="(ESIMD|InvokeSimd|Matrix)/"' >> "$GITHUB_OUTPUT" - elif [ "${{ contains(needs.detect_changes.outputs.filters, 'drivers') }}" == "true" ]; then - echo 'arc_tests=""' >> "$GITHUB_OUTPUT" - elif [ "${{ contains(needs.detect_changes.outputs.filters, 'esimd') }}" == "true" ]; then - echo 'arc_tests="(ESIMD|InvokeSimd|Matrix)/"' >> "$GITHUB_OUTPUT" - else - echo 'arc_tests="Matrix/"' >> "$GITHUB_OUTPUT" - fi - test: - needs: [build, detect_changes, determine_arc_tests] - if: ${{ always() && !cancelled() && needs.build.outputs.build_conclusion == 'success' }} strategy: fail-fast: false matrix: include: + - name: GEN 12 Integrated + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu;opencl:gpu;opencl:cpu + reset_intel_gpu: true + extra_lit_opts: --param gpu-intel-gen12=True - name: NVIDIA/CUDA runner: '["Linux", "cuda"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --gpus all --cap-add SYS_ADMIN - target_devices: ext_oneapi_cuda:gpu + target_devices: cuda:gpu - name: AMD/HIP runner: '["Linux", "amdgpu"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --device=/dev/dri --device=/dev/kfd - target_devices: ext_oneapi_hip:gpu + target_devices: hip:gpu reset_intel_gpu: false - - name: Intel - runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest + extra_lit_opts: -j 1 + - name: Intel Arc A-Series Graphics + runner: '["Linux", "arc"]' image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN - target_devices: level_zero:gpu;opencl:gpu;opencl:cpu + target_devices: level_zero:gpu;opencl:gpu;level_zero_v2:gpu reset_intel_gpu: true - extra_lit_opts: --param gpu-intel-gen12=True - - name: E2E tests on Intel Arc A-Series Graphics + extra_lit_opts: --param matrix-xmx8=True + - name: E2E tests with dev igc on Intel Arc A-Series Graphics runner: '["Linux", "arc"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest + image: ghcr.io/intel/llvm/ubuntu2404_intel_drivers:devigc image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: level_zero:gpu;opencl:gpu reset_intel_gpu: true - extra_lit_opts: --param matrix-xmx8=True --param gpu-intel-dg2=True - env: '{"LIT_FILTER":${{ needs.determine_arc_tests.outputs.arc_tests }} }' - - name: E2E tests with dev igc on Intel Arc A-Series Graphics - runner: '["Linux", "arc"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:devigc + extra_lit_opts: --param matrix-xmx8=True + use_igc_dev: true + - name: E2E tests on Intel Ponte Vecchio GPU + runner: '["Linux", "pvc"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu;opencl:gpu + reset_intel_gpu: true + extra_lit_opts: -j 50 + - name: Dev IGC on Intel Ponte Vecchio GPU + runner: '["Linux", "pvc"]' + image: ghcr.io/intel/llvm/ubuntu2404_intel_drivers:devigc image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: level_zero:gpu;opencl:gpu reset_intel_gpu: true - extra_lit_opts: --param matrix-xmx8=True --param gpu-intel-dg2=True - env: '{"LIT_FILTER":${{ needs.determine_arc_tests.outputs.arc_tests }} }' use_igc_dev: true - + extra_lit_opts: -j 50 + - name: Intel Battlemage Graphics + runner: '["Linux", "bmg"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + reset_intel_gpu: true + - name: SPIR-V Backend / Intel Battlemage Graphics + runner: '["Linux", "bmg"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu;opencl:gpu;opencl:cpu + reset_intel_gpu: true + extra_lit_opts: --param spirv-backend=True + e2e_binaries_artifact: sycl_e2e_bin_default_spirv_backend uses: ./.github/workflows/sycl-linux-run-tests.yml with: name: ${{ matrix.name }} - runner: ${{ matrix. runner }} + runner: ${{ matrix.runner }} image: ${{ matrix.image }} image_options: ${{ matrix.image_options }} target_devices: ${{ matrix.target_devices }} - reset_intel_gpu: ${{ matrix.reset_intel_gpu }} extra_lit_opts: ${{ matrix.extra_lit_opts }} - env: ${{ matrix.env || '{}' }} + reset_intel_gpu: ${{ matrix.reset_intel_gpu }} + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.build.outputs.artifact_decompress_command }} + e2e_binaries_artifact: ${{ matrix.e2e_binaries_artifact || 'sycl_e2e_bin_default' }} + e2e_testing_mode: 'run-only' # Do not install drivers on AMD and CUDA runners. install_igc_driver: >- - ${{ !contains(matrix.target_devices, 'ext_oneapi_cuda') && - !contains(matrix.target_devices, 'ext_oneapi_hip') && + ${{ !contains(matrix.target_devices, 'cuda') && + !contains(matrix.target_devices, 'hip') && contains(needs.detect_changes.outputs.filters, 'drivers') }} install_dev_igc_driver: >- - ${{ !contains(matrix.target_devices, 'ext_oneapi_cuda') && - !contains(matrix.target_devices, 'ext_oneapi_hip') && - matrix.use_igc_dev && contains(needs.detect_changes.outputs.filters, 'devigccfg') || + ${{ !contains(matrix.target_devices, 'cuda') && + !contains(matrix.target_devices, 'hip') && + matrix.use_igc_dev && + (contains(needs.detect_changes.outputs.filters, 'devigccfg') || contains(needs.detect_changes.outputs.filters, 'drivers')) || 'false' }} # Run only if the PR does not have the 'ci-no-devigc' label. skip_run: ${{matrix.use_igc_dev && contains(github.event.pull_request.labels.*.name, 'ci-no-devigc') || 'false'}} - ref: ${{ github.sha }} - merge_ref: '' - - sycl_toolchain_artifact: sycl_linux_default - sycl_toolchain_archive: ${{ needs.build.outputs.artifact_archive_name }} - sycl_toolchain_decompress_command: ${{ needs.build.outputs.artifact_decompress_command }} - - test-perf: needs: [build, detect_changes] if: | @@ -156,37 +158,30 @@ jobs: include: - name: Intel GEN12 Graphics system runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest image_extra_opts: --device=/dev/dri reset_intel_gpu: true - name: Intel Arc A-Series Graphics system runner: '["Linux", "arc"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest image_extra_opts: --device=/dev/dri reset_intel_gpu: true - name: AMD system runner: '["Linux", "amdgpu"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_extra_opts: --device=/dev/dri --device=/dev/kfd - name: CUDA system runner: '["Linux", "cuda"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_extra_opts: --gpus all uses: ./.github/workflows/sycl-linux-run-tests.yml with: name: Perf tests on ${{ matrix.name }} runner: ${{ matrix. runner }} - image: ${{ matrix.image }} image_options: -u 1001 --privileged --cap-add SYS_ADMIN ${{ matrix.image_extra_opts }} target_devices: all reset_intel_gpu: ${{ matrix.reset_intel_gpu }} env: '{"LIT_FILTER":"PerformanceTests/"}' extra_lit_opts: -a -j 1 --param enable-perf-tests=True - extra_cmake_args: ${{ matrix.extra_cmake_args }} - ref: ${{ github.sha }} - merge_ref: '' + repo_ref: ${{ github.sha }} sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.build.outputs.artifact_archive_name }} diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index 73b2a1f336db7..2f3c02bf334ed 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -12,20 +12,20 @@ on: required: True image: type: string - required: True + required: False image_options: type: string required: True target_devices: type: string - required: True + required: False extra_cmake_args: type: string required: False tests_selector: description: | - Two possible options: "e2e" and "cts". + Three possible options: "e2e", "cts", and "compute-benchmarks". type: string default: "e2e" @@ -35,16 +35,20 @@ on: type: string default: '' - ref: + repo_ref: type: string required: True - merge_ref: description: | - Commit-ish to merge post-checkout if non-empty. Must be reachable from - the default_branch input paramter. + Commit SHA or branch to checkout the intel/llvm repo. + devops_ref: type: string - default: 'FETCH_HEAD' required: False + description: | + Commit SHA or branch to checkout the devops directory. + tests_ref: + type: string + required: False + description: Commit SHA or branch to checkout e2e/cts tests. sycl_toolchain_artifact: type: string @@ -59,6 +63,25 @@ on: default: '' required: False + e2e_binaries_artifact: + description: | + Must be set if `e2e_testing_mode` is equal to `run-only` and the + artifact must exist. Can be set in other modes resulting in artifact + upload. + type: string + default: '' + required: False + e2e_testing_mode: + description: | + Testing mode to run E2E tests in, can be either `full`, `build-only` + or `run-only`. + type: string + default: 'full' + retention-days: + description: 'E2E/SYCL-CTS binaries artifact retention period.' + type: string + default: 1 + reset_intel_gpu: type: string required: False @@ -78,6 +101,19 @@ on: default: 'false' required: False + cts_testing_mode: + description: | + Testing mode to run SYCL-CTS in, can be either `full`, `build-only` + or `run-only`. In `build-only` mode an artifact of the CTS binaries + will be uploaded. + type: string + default: 'full' + + sycl_cts_artifact: + type: string + default: '' + required: False + workflow_dispatch: inputs: runner: @@ -86,34 +122,38 @@ on: - '["Linux", "gen12"]' - '["amdgpu"]' - '["Linux", "arc"]' + - '["Linux", "pvc"]' - '["cts-cpu"]' + - '["Linux", "build"]' + - '["cuda"]' + - '["PVC_PERF"]' image: - description: | - Use option ending with ":build" for AMDGPU, ":latest" for the rest. type: choice options: - - 'ghcr.io/intel/llvm/sycl_ubuntu2204_nightly:latest' - - 'ghcr.io/intel/llvm/sycl_ubuntu2204_nightly:build' + - 'ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest' + - 'ghcr.io/intel/llvm/ubuntu2404_intel_drivers:alldeps' image_options: description: | Use option with "--device=/dev/kfd" for AMDGPU, without it for the rest. type: choice options: - - '-u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN' - '-u 1001 --device=/dev/dri --device=/dev/kfd --privileged --cap-add SYS_ADMIN' + - '-u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN' + - '-u 1001 --gpus all --cap-add SYS_ADMIN' target_devices: type: choice options: + - 'level_zero:gpu' - 'opencl:cpu' - 'opencl:gpu' - - 'opencl:fpga' - - 'level_zero:gpu' - - 'ext_oneapi_hip:gpu' + - 'hip:gpu' + - 'cuda:gpu' tests_selector: type: choice options: - e2e - cts + - compute-benchmarks env: description: | @@ -121,7 +161,7 @@ on: LIT_OPTS won't work as we redefine it as part of this workflow. For SYCL CTS - CTS_TESTS_TO_BUILD to specify which categories to - build. + build, e.g. {"CTS_TESTS_TO_BUILD":"test_category1 test_category2..."}. Format: '{"VAR1":"VAL1","VAR2":"VAL2",...}' default: '{}' @@ -131,20 +171,23 @@ on: Extra options to be added to LIT_OPTS. default: '' - install_igc_driver: + reset_intel_gpu: + description: | + Reset Intel GPUs type: choice options: - false - true - install_dev_igc_driver: + e2e_testing_mode: type: choice options: - - false - - true + - "full" + - "build-only" permissions: contents: read + packages: read jobs: run: @@ -152,43 +195,35 @@ jobs: name: ${{ inputs.name }} runs-on: ${{ fromJSON(inputs.runner) }} container: - image: ${{ inputs.image }} + image: ${{ inputs.image || 'ghcr.io/intel/llvm/ubuntu2404_intel_drivers:alldeps'}} options: ${{ inputs.image_options }} env: ${{ fromJSON(inputs.env) }} steps: - name: Reset Intel GPU if: inputs.reset_intel_gpu == 'true' + shell: bash run: | - sudo mount -t debugfs none /sys/kernel/debug - sudo bash -c 'echo 1 > /sys/kernel/debug/dri/0/i915_wedged' + if [[ '${{ inputs.runner }}' == '["Linux", "bmg"]' ]]; then + sudo bash -c 'echo 0000:05:00.0 > /sys/bus/pci/drivers/xe/unbind' + sudo bash -c 'echo 1 > /sys/bus/pci/devices/0000:05:00.0/reset' + sudo bash -c 'echo 0000:05:00.0 > /sys/bus/pci/drivers/xe/bind' + else + sudo mount -t debugfs none /sys/kernel/debug + base_dir="/sys/kernel/debug/dri" + + for dir in "$base_dir"/*; do + if [ -f "$dir/i915_wedged" ]; then + sudo bash -c 'echo 1 > $0/i915_wedged' $dir + fi + done + fi - uses: actions/checkout@v4 with: - ref: ${{ inputs.ref }} + ref: ${{ inputs.devops_ref || inputs.repo_ref }} sparse-checkout: | devops - name: Register cleanup after job is finished uses: ./devops/actions/cleanup - - name: Checkout E2E tests - if: inputs.tests_selector == 'e2e' - uses: ./devops/actions/cached_checkout - with: - path: llvm - ref: ${{ inputs.ref || github.sha }} - merge_ref: ${{ inputs.merge_ref }} - cache_path: "/__w/repo_cache/" - - name: Checkout SYCL CTS tests - if: inputs.tests_selector == 'cts' - uses: ./devops/actions/cached_checkout - with: - path: khronos_sycl_cts - repository: 'KhronosGroup/SYCL-CTS' - ref: 'main' - default_branch: 'main' - cache_path: "/__w/repo_cache/" - - name: SYCL CTS GIT submodules init - if: inputs.tests_selector == 'cts' - run: | - git -C khronos_sycl_cts submodule update --init - name: Install drivers if: inputs.install_igc_driver == 'true' || inputs.install_dev_igc_driver == 'true' env: @@ -198,7 +233,7 @@ jobs: # If libllvm14 is already installed (dev igc docker), still return true. sudo apt-get install -yqq libllvm14 || true; fi - sudo -E bash devops/scripts/install_drivers.sh llvm/devops/dependencies.json ${{ inputs.install_dev_igc_driver == 'true' && 'llvm/devops/dependencies-igc-dev.json --use-dev-igc' || '' }} --all + sudo -E bash devops/scripts/install_drivers.sh devops/dependencies.json ${{ inputs.install_dev_igc_driver == 'true' && 'devops/dependencies-igc-dev.json --use-dev-igc' || '' }} --all - name: Source OneAPI TBB vars.sh shell: bash run: | @@ -270,108 +305,35 @@ jobs: cat /usr/local/lib/igc/IGCTAG.txt fi - - name: Deduce E2E CMake options - if: inputs.tests_selector == 'e2e' - id: cmake_opts - shell: bash - env: - CMAKE_EXTRA_ARGS: ${{ inputs.extra_cmake_args }} - run: | - if [ -n "$CMAKE_EXTRA_ARGS" ]; then - echo "opts=$CMAKE_EXTRA_ARGS" >> $GITHUB_OUTPUT - fi - - name: Configure E2E tests - if: inputs.tests_selector == 'e2e' - run: | - cmake -GNinja -B./build-e2e -S./llvm/sycl/test-e2e -DSYCL_TEST_E2E_TARGETS="${{ inputs.target_devices }}" -DCMAKE_CXX_COMPILER="$(which clang++)" -DLLVM_LIT="$PWD/llvm/llvm/utils/lit/lit.py" ${{ steps.cmake_opts.outputs.opts }} - - name: SYCL End-to-end tests - shell: bash {0} + - name: Run E2E Tests if: inputs.tests_selector == 'e2e' - env: - LIT_OPTS: -v --no-progress-bar --show-unsupported --show-pass --show-xfail --max-time 3600 --time-tests ${{ inputs.extra_lit_opts }} - run: | - ninja -C build-e2e check-sycl-e2e > e2e.log 2>&1 - exit_code=$? - cat e2e.log - if [ $exit_code -ne 0 ]; then - awk '/^Failed Tests|Unexpectedly Passed Tests|Unresolved tests|Testing Time/{flag=1}/FAILED: CMakeFiles/{flag=0}flag' e2e.log >> $GITHUB_STEP_SUMMARY - fi - exit $exit_code - - name: Build SYCL CTS tests - if: inputs.tests_selector == 'cts' - env: - CMAKE_EXTRA_ARGS: ${{ inputs.extra_cmake_args }} - run: | - cts_exclude_filter="" - # If CTS_TESTS_TO_BUILD is null - use filter - if [ -z "$CTS_TESTS_TO_BUILD" ]; then - if [ "${{ contains(inputs.target_devices, 'opencl:cpu') }}" = "true" ]; then - cts_exclude_filter=$PWD/devops/cts_exclude_filter_OCL_CPU - elif [ "${{ contains(inputs.target_devices, 'level_zero:gpu') }}" = "true" ]; then - cts_exclude_filter=$PWD/devops/cts_exclude_filter_L0_GPU - fi - - # List excluded SYCL CTS categories: - # SYCL_CTS_EXCLUDE_TEST_CATEGORIES - Optional file specifying a list - # of test categories to be excluded from the build. - echo "::group::Excluded test categories" - cat $cts_exclude_filter - echo "::endgroup::" - fi - - cmake -GNinja -B./build-cts -S./khronos_sycl_cts -DCMAKE_CXX_COMPILER=$(which clang++) \ - -DSYCL_IMPLEMENTATION=DPCPP \ - -DSYCL_CTS_EXCLUDE_TEST_CATEGORIES="$cts_exclude_filter" \ - -DSYCL_CTS_ENABLE_OPENCL_INTEROP_TESTS=OFF \ - -DDPCPP_INSTALL_DIR="$(dirname $(which clang++))/.." \ - $CMAKE_EXTRA_ARGS - # Ignore errors so that if one category build fails others still have a - # chance to finish and be executed at the run stage. Note that - # "test_conformance" target skips building "test_all" executable. - ninja -C build-cts -k0 $( [ -n "$CTS_TESTS_TO_BUILD" ] && echo "$CTS_TESTS_TO_BUILD" || echo "test_conformance") + uses: ./devops/actions/run-tests/e2e + with: + ref: ${{ inputs.tests_ref || inputs.repo_ref || github.sha }} + binaries_artifact: ${{ inputs.e2e_binaries_artifact }} + testing_mode: ${{ inputs.e2e_testing_mode }} + extra_cmake_args: ${{ inputs.extra_cmake_args }} + target_devices: ${{ inputs.target_devices }} + extra_lit_opts: ${{ inputs.extra_lit_opts }} + retention-days: ${{ inputs.retention-days }} - - name: SYCL CTS List devices - # Proceed with execution even if the 'build' step did not succeed. - if: inputs.tests_selector == 'cts' && (success() || failure()) - env: - ONEAPI_DEVICE_SELECTOR: ${{ inputs.target_devices }} - run: | - ./build-cts/bin/* --list-devices + - name: Run SYCL CTS Tests + if: inputs.tests_selector == 'cts' + uses: ./devops/actions/run-tests/cts + with: + ref: ${{ inputs.tests_ref || 'main' }} + cts_exclude_ref: ${{ inputs.repo_ref }} + extra_cmake_args: ${{ inputs.extra_cmake_args }} + cts_testing_mode: ${{ inputs.cts_testing_mode }} + sycl_cts_artifact: ${{ inputs.sycl_cts_artifact }} + target_devices: ${{ inputs.target_devices }} + retention-days: ${{ inputs.retention-days }} - - name: Run SYCL CTS tests - # Proceed with execution even if the previous two steps did not succeed. - if: inputs.tests_selector == 'cts' && (success() || failure()) + - name: Run compute-benchmarks on SYCL + if: inputs.tests_selector == 'compute-benchmarks' + uses: ./devops/actions/run-tests/benchmark + with: + target_devices: ${{ inputs.target_devices }} env: - ONEAPI_DEVICE_SELECTOR: ${{ inputs.target_devices }} - # This job takes ~100min usually. But sometimes some test isn't - # responding, so the job reaches the 360min limit. Setting a lower one. - timeout-minutes: 150 - # By-default GitHub actions execute the "run" shell script with -e option, - # so the execution terminates if any command returns a non-zero status. - # Since we're using a loop to run all test-binaries separately, some test - # may fail and terminate the execution. Setting "shell" value to override - # the default behavior. - # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#custom-shell - shell: bash {0} - run: | - # Run each test category separately so that - # - crash on one would not affect others - # - multiple tests could be run in parallel - mkdir logs - find build-cts/bin/ -type f -print | \ - xargs -t -I % -P 8 sh -c 'log=logs/$(basename %).log ; echo % >$log ; date >>$log ; timeout 60m % >>$log 2>&1 ; ret=$? ; echo "exit code: $ret" >>$log ; date >>$log ; exit $ret' - ret=$? - - for f in logs/* ; do - echo "::group::$f" - cat $f - echo "::endgroup::" - done - - echo "::group::Fails:" - grep 'exit code: [^0]' -r logs - echo "::endgroup::" - - grep 'exit code: [^0]' -r logs >> $GITHUB_STEP_SUMMARY - - exit $ret + RUNNER_TAG: ${{ inputs.runner }} + GITHUB_TOKEN: ${{ secrets.LLVM_SYCL_BENCHMARK_TOKEN }} diff --git a/.github/workflows/sycl-macos-build-and-test.yml b/.github/workflows/sycl-macos-build-and-test.yml index f25e847d8a341..9831368b413b5 100644 --- a/.github/workflows/sycl-macos-build-and-test.yml +++ b/.github/workflows/sycl-macos-build-and-test.yml @@ -50,8 +50,8 @@ jobs: python3 $GITHUB_WORKSPACE/src/buildbot/configure.py -w $GITHUB_WORKSPACE \ -s $GITHUB_WORKSPACE/src -o $GITHUB_WORKSPACE/build -t Release \ --ci-defaults $ARGS \ - --cmake-opt=-DCMAKE_C_COMPILER_LAUNCHER=ccache \ - --cmake-opt=-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ - --cmake-opt="-DLLVM_INSTALL_UTILS=ON" + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache \ + -DLLVM_INSTALL_UTILS=ON - name: Compile run: cmake --build $GITHUB_WORKSPACE/build --target deploy-sycl-toolchain diff --git a/.github/workflows/sycl-nightly.yml b/.github/workflows/sycl-nightly.yml index 4becda0c89f82..331911824ef3c 100644 --- a/.github/workflows/sycl-nightly.yml +++ b/.github/workflows/sycl-nightly.yml @@ -16,7 +16,7 @@ jobs: build_cache_root: "/__w/" build_artifact_suffix: default build_configure_extra_args: '--hip --cuda' - merge_ref: '' + build_image: ghcr.io/intel/llvm/ubuntu2204_build:latest retention-days: 90 # We upload the build for people to download/use, override its name and @@ -32,10 +32,23 @@ jobs: build_cache_suffix: sprod_shared build_artifact_suffix: sprod_shared build_configure_extra_args: '--shared-libs --hip --cuda --native_cpu' - merge_ref: '' artifact_archive_name: sycl_linux_shared.tar.zst + ubuntu2404_oneapi_build: + if: github.repository == 'intel/llvm' + uses: ./.github/workflows/sycl-linux-build.yml + secrets: inherit + with: + build_cache_root: "/__w/" + build_cache_suffix: oneapi + build_artifact_suffix: oneapi + build_configure_extra_args: -DCMAKE_C_FLAGS="-no-intel-lib -ffp-model=precise" -DCMAKE_CXX_FLAGS="-no-intel-lib -ffp-model=precise" + cc: icx + cxx: icpx + + artifact_archive_name: sycl_linux_oneapi.tar.zst + ubuntu2204_test: needs: [ubuntu2204_build] if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} @@ -45,79 +58,96 @@ jobs: include: - name: AMD/HIP runner: '["Linux", "amdgpu"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --device=/dev/dri --device=/dev/kfd - target_devices: ext_oneapi_hip:gpu - tests_selector: e2e + target_devices: hip:gpu + + - name: NVIDIA/CUDA + runner: '["Linux", "cuda"]' + image_options: -u 1001 --gpus all --cap-add SYS_ADMIN + target_devices: cuda:gpu - - name: Intel L0 GPU + - name: Intel L0 Gen12 GPU runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: level_zero:gpu reset_intel_gpu: true - tests_selector: e2e - extra_lit_opts: --param gpu-intel-gen12=True - - name: Intel OCL GPU + - name: Intel L0 Ponte Vecchio GPU + runner: '["Linux", "pvc"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + reset_intel_gpu: true + + - name: Intel L0 Battlemage GPU + runner: '["Linux", "bmg"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + reset_intel_gpu: true + + - name: Intel L0 Arc A-Series GPU + runner: '["Linux", "arc"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + reset_intel_gpu: true + + - name: Intel OCL Gen12 GPU runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: opencl:gpu reset_intel_gpu: true - tests_selector: e2e - extra_lit_opts: --param gpu-intel-gen12=True - name: OCL CPU (AMD) - runner: '["Linux", "amdgpu"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest + runner: '["Linux", "amdcpu"]' image_options: -u 1001 target_devices: opencl:cpu - tests_selector: e2e - name: OCL CPU (Intel/GEN12) runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest image_options: -u 1001 --privileged --cap-add SYS_ADMIN target_devices: opencl:cpu - tests_selector: e2e - name: OCL CPU (Intel/Arc) runner: '["Linux", "arc"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest image_options: -u 1001 target_devices: opencl:cpu - tests_selector: e2e - - name: SYCL-CTS on OCL CPU - runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest - image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN - target_devices: opencl:cpu - tests_selector: cts - - - name: SYCL-CTS on L0 gen12 - runner: '["Linux", "gen12"]' - image: ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest - image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + - name: Preview mode on SPR/PVC + runner: '["Linux", "pvc"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: level_zero:gpu - tests_selector: cts + reset_intel_gpu: true + extra_lit_opts: --param test-preview-mode=True + uses: ./.github/workflows/sycl-linux-run-tests.yml with: name: ${{ matrix.name }} runner: ${{ matrix.runner }} - image: ${{ matrix.image }} image_options: ${{ matrix.image_options }} target_devices: ${{ matrix.target_devices }} - tests_selector: ${{ matrix.tests_selector }} - extra_lit_opts: ${{ matrix.extra_lit_opts }} + tests_selector: e2e + extra_lit_opts: "--param 'cxx_flags=-D_GLIBCXX_USE_CXX11_ABI=0' ${{ matrix.extra_lit_opts }}" reset_intel_gpu: ${{ matrix.reset_intel_gpu }} - ref: ${{ github.sha }} - merge_ref: '' + repo_ref: ${{ github.sha }} sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + ubuntu2404_oneapi_test: + needs: [ubuntu2404_oneapi_build] + if: ${{ always() && !cancelled() && needs.ubuntu2404_oneapi_build.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: Intel PVC L0 oneAPI + runner: '["Linux", "pvc"]' + target_devices: level_zero:gpu + reset_intel_gpu: true + extra_lit_opts: -j 50 + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_oneapi + sycl_toolchain_archive: ${{ needs.ubuntu2404_oneapi_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2404_oneapi_build.outputs.artifact_decompress_command }} + build-win: uses: ./.github/workflows/sycl-windows-build.yml if: github.repository == 'intel/llvm' @@ -139,7 +169,6 @@ jobs: name: Intel GEN12 Graphics with Level Zero runner: '["Windows","gen12"]' sycl_toolchain_archive: ${{ needs.build-win.outputs.artifact_archive_name }} - extra_lit_opts: --param gpu-intel-gen12=True cuda-aws-start: needs: [ubuntu2204_build] @@ -156,11 +185,10 @@ jobs: with: name: CUDA E2E runner: '["aws_cuda-${{ github.run_id }}-${{ github.run_attempt }}"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest-0300ac924620a51f76c4929794637b82790f12ab + image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --gpus all --cap-add SYS_ADMIN --env NVIDIA_DISABLE_REQUIRE=1 - target_devices: ext_oneapi_cuda:gpu - ref: ${{ github.sha }} - merge_ref: '' + target_devices: cuda:gpu + repo_ref: ${{ github.sha }} sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} @@ -168,12 +196,127 @@ jobs: cuda-aws-stop: needs: [cuda-aws-start, cuda-run-tests] - if: always() + if: always() && ${{ needs.cuda-aws-start.result != 'skipped' }} uses: ./.github/workflows/sycl-aws.yml secrets: inherit with: mode: stop + build-sycl-cts-linux: + needs: ubuntu2204_build + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: Build SYCL-CTS for Linux + runner: '["Linux", "build"]' + cts_testing_mode: 'build-only' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + tests_selector: cts + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + sycl_cts_artifact: sycl_cts_bin_linux + + run-sycl-cts-linux: + needs: [ubuntu2204_build, build-sycl-cts-linux] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + include: + - name: SYCL-CTS on OCL CPU + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + target_devices: opencl:cpu + + - name: SYCL-CTS on L0 gen12 + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + cts_testing_mode: 'run-only' + image_options: ${{ matrix.image_options }} + target_devices: ${{ matrix.target_devices }} + tests_selector: cts + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + sycl_cts_artifact: sycl_cts_bin_linux + + build-sycl-cts-win: + needs: build-win + if: ${{ always() && !cancelled() && needs.build-win.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-windows-run-tests.yml + with: + name: Build SYCL-CTS for Windows + runner: '["Windows", "build-e2e"]' + cts_testing_mode: 'build-only' + tests_selector: cts + repo_ref: ${{ github.sha }} + sycl_toolchain_archive: ${{ needs.build-win.outputs.artifact_archive_name }} + sycl_cts_artifact: sycl_cts_bin_win + + run-sycl-cts-win: + needs: [build-win, build-sycl-cts-win] + if: ${{ always() && !cancelled() && needs.build-win.outputs.build_conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + include: + - name: SYCL-CTS on L0 gen12 + runner: '["Windows", "gen12"]' + target_devices: level_zero:gpu + uses: ./.github/workflows/sycl-windows-run-tests.yml + with: + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + cts_testing_mode: 'run-only' + target_devices: ${{ matrix.target_devices }} + tests_selector: cts + repo_ref: ${{ github.sha }} + sycl_toolchain_archive: ${{ needs.build-win.outputs.artifact_archive_name }} + sycl_cts_artifact: sycl_cts_bin_win + + aggregate_benchmark_results: + if: github.repository == 'intel/llvm' && !cancelled() + name: Aggregate benchmark results and produce historical averages + uses: ./.github/workflows/sycl-benchmark-aggregate.yml + secrets: + LLVM_SYCL_BENCHMARK_TOKEN: ${{ secrets.LLVM_SYCL_BENCHMARK_TOKEN }} + with: + lookback_days: 100 + + run-sycl-benchmarks: + needs: [ubuntu2204_build, aggregate_benchmark_results] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + include: + - name: Run compute-benchmarks on L0 PVC + runner: '["PVC_PERF"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + reset_intel_gpu: true + uses: ./.github/workflows/sycl-linux-run-tests.yml + secrets: inherit + with: + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + image_options: ${{ matrix.image_options }} + target_devices: ${{ matrix.target_devices }} + tests_selector: compute-benchmarks + reset_intel_gpu: ${{ matrix.reset_intel_gpu }} + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + nightly_build_upload: name: Nightly Build Upload if: ${{ github.ref_name == 'sycl' }} @@ -198,7 +341,7 @@ jobs: echo "TAG=$(date +'%Y-%m-%d')-${GITHUB_SHA::7}" >> "$GITHUB_OUTPUT" fi - name: Upload binaries - uses: softprops/action-gh-release@e7a8f85e1c67a31e6ed99a94b41bd0b71bbee6b8 + uses: softprops/action-gh-release@v2.2.1 with: files: | sycl_linux.tar.gz @@ -209,7 +352,7 @@ jobs: body: "Daily build ${{ steps.tag.outputs.TAG }}" target_commitish: ${{ github.sha }} - ubuntu2204_docker_build_push: + docker_build_push: if: github.repository == 'intel/llvm' runs-on: [Linux, build] permissions: @@ -221,42 +364,16 @@ jobs: with: name: sycl_linux_default path: devops/ - - name: Build and Push Container (with drivers) - uses: ./devops/actions/build_container - with: - push: ${{ github.ref_name == 'sycl' }} - file: ubuntu2204_preinstalled - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - build-args: | - base_image=ghcr.io/intel/llvm/ubuntu2204_intel_drivers - base_tag=latest - tags: | - ghcr.io/${{ github.repository }}/sycl_ubuntu2204_nightly:${{ github.sha }} - ghcr.io/${{ github.repository }}/sycl_ubuntu2204_nightly:latest - - name: Build and Push Container (no drivers) - uses: ./devops/actions/build_container - with: - push: ${{ github.ref_name == 'sycl' }} - file: ubuntu2204_preinstalled - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - build-args: | - base_image=ghcr.io/intel/llvm/ubuntu2204_base - base_tag=latest - tags: | - ghcr.io/${{ github.repository }}/sycl_ubuntu2204_nightly:no-drivers-${{ github.sha }} - ghcr.io/${{ github.repository }}/sycl_ubuntu2204_nightly:no-drivers - - name: Build and Push Container (Build image) + - name: Build and Push Container uses: ./devops/actions/build_container with: push: ${{ github.ref_name == 'sycl' }} - file: ubuntu2204_preinstalled + file: nightly username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} build-args: | - base_image=ghcr.io/intel/llvm/ubuntu2204_build - base_tag=latest + base_image=ghcr.io/intel/llvm/ubuntu2404_intel_drivers + base_tag=alldeps tags: | - ghcr.io/${{ github.repository }}/sycl_ubuntu2204_nightly:build-${{ github.sha }} - ghcr.io/${{ github.repository }}/sycl_ubuntu2204_nightly:build + ghcr.io/${{ github.repository }}/sycl_ubuntu2404_nightly:${{ github.sha }} + ghcr.io/${{ github.repository }}/sycl_ubuntu2404_nightly:latest diff --git a/.github/workflows/sycl-post-commit.yml b/.github/workflows/sycl-post-commit.yml index 1abd497a9c97c..6c97dedf21fc2 100644 --- a/.github/workflows/sycl-post-commit.yml +++ b/.github/workflows/sycl-post-commit.yml @@ -1,16 +1,16 @@ name: SYCL Post Commit on: + workflow_dispatch: + push: branches: - sycl - - sycl-devops-pr/** - sycl-rel-** pull_request: branches: - sycl - - sycl-devops-pr/** paths: - .github/workflows/sycl-post-commit.yml - .github/workflows/sycl-linux-build.yml @@ -18,6 +18,8 @@ on: - .github/workflows/sycl-macos-build-and-test.yml - ./devops/actions/cleanup - ./devops/actions/cached_checkout + - ./devops/dependencies.json + - ./devops/dependencies-igc-dev.json concurrency: # Cancel a currently running workflow from the same PR or commit hash. @@ -27,6 +29,10 @@ concurrency: permissions: read-all jobs: + detect_changes: + if: ${{ github.event_name == 'pull_request' }} + uses: ./.github/workflows/sycl-detect-changes.yml + build-lin: name: Linux (Self build + no-assertions) if: github.repository == 'intel/llvm' @@ -35,15 +41,10 @@ jobs: build_cache_root: "/__w/llvm" build_cache_suffix: default build_artifact_suffix: default - build_configure_extra_args: --no-assertions --hip --cuda --native_cpu --cmake-opt="-DSYCL_ENABLE_STACK_PRINTING=ON" --cmake-opt="-DSYCL_LIB_WITH_DEBUG_SYMBOL=ON" - # Docker image has last nightly pre-installed and added to the PATH - build_image: "ghcr.io/intel/llvm/sycl_ubuntu2204_nightly:build" - cc: clang - cxx: clang++ - merge_ref: '' + build_configure_extra_args: --no-assertions --hip --cuda --native_cpu -DSYCL_ENABLE_STACK_PRINTING=ON -DSYCL_LIB_WITH_DEBUG_SYMBOL=ON e2e-lin: - needs: [build-lin] + needs: [detect_changes, build-lin] if: ${{ always() && !cancelled() && needs.build-lin.outputs.build_conclusion == 'success' }} strategy: fail-fast: false @@ -51,18 +52,16 @@ jobs: include: - name: Intel GEN12 Graphics with Level Zero runner: '["Linux", "gen12"]' - extra_lit_opts: --param gpu-intel-gen12=True - target_devices: level_zero:gpu;opencl:fpga + target_devices: level_zero:gpu reset_intel_gpu: true - name: Intel Arc A-Series Graphics with Level Zero runner: '["Linux", "arc"]' - extra_lit_opts: --param matrix-xmx8=True --param gpu-intel-dg2=True + extra_lit_opts: --param matrix-xmx8=True reset_intel_gpu: true - name: AMD/HIP runner: '["Linux", "amdgpu"]' - image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --device=/dev/dri --device=/dev/kfd - target_devices: ext_oneapi_hip:gpu + target_devices: hip:gpu reset_intel_gpu: false # Performance tests below. Specifics: # - only run performance tests (use LIT_FILTER env) @@ -73,7 +72,7 @@ jobs: - name: Perf tests on Intel GEN12 Graphics system runner: '["Linux", "gen12"]' env: '{"LIT_FILTER":"PerformanceTests/"}' - extra_lit_opts: -a -j 1 --param enable-perf-tests=True --param gpu-intel-gen12=True + extra_lit_opts: -a -j 1 --param enable-perf-tests=True target_devices: all reset_intel_gpu: true - name: Perf tests on Intel Arc A-Series Graphics system @@ -86,7 +85,6 @@ jobs: with: name: ${{ matrix.name }} runner: ${{ matrix. runner }} - image: ${{ matrix.image || 'ghcr.io/intel/llvm/ubuntu2204_intel_drivers:latest' }} image_options: ${{ matrix.image_options || '-u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN' }} target_devices: ${{ matrix.target_devices || 'level_zero:gpu' }} reset_intel_gpu: ${{ matrix.reset_intel_gpu }} @@ -94,19 +92,29 @@ jobs: extra_lit_opts: ${{ matrix.extra_lit_opts }} env: ${{ matrix.env || '{}' }} - ref: ${{ github.sha }} - merge_ref: '' + repo_ref: ${{ github.sha }} sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.build-lin.outputs.artifact_archive_name }} sycl_toolchain_decompress_command: ${{ needs.build-lin.outputs.artifact_decompress_command }} + # Do not install drivers on AMD and CUDA runners. + install_igc_driver: >- + ${{ github.event_name == 'pull_request' }} && + ${{ !contains(matrix.target_devices, 'cuda') && + !contains(matrix.target_devices, 'hip') && + contains(needs.detect_changes.outputs.filters, 'drivers') }} + build-win: if: | always() && success() && github.repository == 'intel/llvm' uses: ./.github/workflows/sycl-windows-build.yml + with: + compiler: icx + build_configure_extra_args: -DCMAKE_C_FLAGS="/fp:precise /clang:-Wno-nonportable-include-path /clang:-Wno-cast-function-type-mismatch" -DCMAKE_CXX_FLAGS="/fp:precise /clang:-Wno-nonportable-include-path /clang:-Wno-cast-function-type-mismatch" -DCMAKE_EXE_LINKER_FLAGS=/manifest:no -DCMAKE_MODULE_LINKER_FLAGS=/manifest:no -DCMAKE_SHARED_LINKER_FLAGS=/manifest:no + build_cache_suffix: icx e2e-win: needs: build-win @@ -120,7 +128,7 @@ jobs: name: Intel GEN12 Graphics with Level Zero runner: '["Windows","gen12"]' sycl_toolchain_archive: ${{ needs.build-win.outputs.artifact_archive_name }} - extra_lit_opts: --param gpu-intel-gen12=True + compiler: icx macos_default: name: macOS diff --git a/.github/workflows/sycl-rel-nightly.yml b/.github/workflows/sycl-rel-nightly.yml new file mode 100644 index 0000000000000..b0487f6d435bc --- /dev/null +++ b/.github/workflows/sycl-rel-nightly.yml @@ -0,0 +1,207 @@ +name: SYCL Release Branch Nightly + +on: + workflow_dispatch: + schedule: + - cron: '0 3 * * *' + +permissions: read-all + +jobs: + # To avoid unnecessary scheduled runs this job checks if there are new commits + # since the last run. More precisely, it checks if the last commit is older + # than 24h. That means the previous Nightly already tested this commit. + check_for_new_commits: + if: github.repository == 'intel/llvm' + runs-on: ubuntu-latest + name: Check for new commits + outputs: + is_new_commit: ${{ steps.is_new_commit.outputs.is_new_commit }} + steps: + - uses: actions/checkout@v4 + with: + ref: sycl-rel-6_1_0 + - run: git show --quiet | tee -a $GITHUB_STEP_SUMMARY + + - id: is_new_commit + if: ${{ github.event_name == 'schedule' }} + run: | + if [ -z "$(git rev-list --after="24 hours" HEAD)" ]; then + echo "is_new_commit=false" >> $GITHUB_OUTPUT + fi + + ubuntu2204_build: + needs: [check_for_new_commits] + if: ${{ github.repository == 'intel/llvm' && needs.check_for_new_commits.outputs.is_new_commit != 'false' }} + uses: ./.github/workflows/sycl-linux-build.yml + secrets: inherit + with: + build_cache_root: "/__w/" + build_artifact_suffix: default + build_configure_extra_args: '--hip --cuda' + build_image: ghcr.io/intel/llvm/ubuntu2204_build:latest + build_ref: sycl-rel-6_1_0 + + # We upload the build for people to download/use, override its name and + # prefer widespread gzip compression. + artifact_archive_name: sycl_linux.tar.gz + + ubuntu2204_test: + needs: [ubuntu2204_build] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + include: + - name: AMD/HIP + runner: '["Linux", "amdgpu"]' + image_options: -u 1001 --device=/dev/dri --device=/dev/kfd + target_devices: hip:gpu + tests_selector: e2e + + - name: Intel L0 GPU + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + reset_intel_gpu: true + tests_selector: e2e + extra_lit_opts: --param gpu-intel-gen12=True + + - name: Intel OCL GPU + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: opencl:gpu + reset_intel_gpu: true + tests_selector: e2e + extra_lit_opts: --param gpu-intel-gen12=True + + - name: Intel OCL CPU + runner: '["Linux", "gen12"]' + image_options: -u 1001 --privileged --cap-add SYS_ADMIN + target_devices: opencl:cpu + tests_selector: e2e + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + image_options: ${{ matrix.image_options }} + target_devices: ${{ matrix.target_devices }} + tests_selector: ${{ matrix.tests_selector }} + extra_lit_opts: ${{ matrix.extra_lit_opts }} + reset_intel_gpu: ${{ matrix.reset_intel_gpu }} + repo_ref: sycl-rel-6_1_0 + devops_ref: sycl + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + + build-win: + needs: [check_for_new_commits] + if: ${{ github.repository == 'intel/llvm' && needs.check_for_new_commits.outputs.is_new_commit != 'false' }} + uses: ./.github/workflows/sycl-windows-build.yml + with: + ref: sycl-rel-6_1_0 + + # We upload both Linux/Windows build via Github's "Releases" + # functionality, make sure Linux/Windows names follow the same pattern. + artifact_archive_name: sycl_windows.tar.gz + + e2e-win: + needs: build-win + # Continue if build was successful. + if: | + always() + && !cancelled() + && needs.build-win.outputs.build_conclusion == 'success' + uses: ./.github/workflows/sycl-windows-run-tests.yml + with: + name: Intel GEN12 Graphics with Level Zero + runner: '["Windows","gen12"]' + sycl_toolchain_archive: ${{ needs.build-win.outputs.artifact_archive_name }} + extra_lit_opts: --param gpu-intel-gen12=True + repo_ref: sycl-rel-6_1_0 + devops_ref: sycl + + cuda-aws-start: + needs: [ubuntu2204_build] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-aws.yml + secrets: inherit + with: + mode: start + ref: sycl-rel-6_1_0 + + cuda-run-tests: + needs: [ubuntu2204_build, cuda-aws-start] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: CUDA E2E + runner: '["aws_cuda-${{ github.run_id }}-${{ github.run_attempt }}"]' + image: ghcr.io/intel/llvm/ubuntu2204_build:latest + image_options: -u 1001 --gpus all --cap-add SYS_ADMIN --env NVIDIA_DISABLE_REQUIRE=1 + target_devices: cuda:gpu + repo_ref: sycl-rel-6_1_0 + devops_ref: sycl + + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + + cuda-aws-stop: + needs: [cuda-aws-start, cuda-run-tests] + if: always() + uses: ./.github/workflows/sycl-aws.yml + secrets: inherit + with: + mode: stop + ref: sycl-rel-6_1_0 + + build-sycl-cts: + needs: ubuntu2204_build + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: Build SYCL-CTS + runner: '["Linux", "build"]' + cts_testing_mode: 'build-only' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + tests_selector: cts + repo_ref: sycl-rel-6_1_0 + devops_ref: sycl + tests_ref: ead7474b9cb2189ce48025550912ccad5a72bd30 + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + sycl_cts_artifact: sycl_cts_bin_linux + + run-sycl-cts: + needs: [ubuntu2204_build, build-sycl-cts] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + include: + - name: SYCL-CTS on OCL CPU + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + target_devices: opencl:cpu + + - name: SYCL-CTS on L0 gen12 + runner: '["Linux", "gen12"]' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + cts_testing_mode: 'run-only' + image_options: ${{ matrix.image_options }} + target_devices: ${{ matrix.target_devices }} + tests_selector: cts + repo_ref: ${{ github.sha }} + devops_ref: sycl + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + sycl_cts_artifact: sycl_cts_bin_linux diff --git a/.github/workflows/sycl-stale-issues.yml b/.github/workflows/sycl-stale-issues.yml index 29afd18d22471..420dcd27a1d1c 100644 --- a/.github/workflows/sycl-stale-issues.yml +++ b/.github/workflows/sycl-stale-issues.yml @@ -11,7 +11,7 @@ jobs: permissions: issues: write # for actions/stale to close stale issues pull-requests: write # for actions/stale to close stale PRs - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - uses: actions/stale@v9 with: diff --git a/.github/workflows/sycl-sync-main.yml b/.github/workflows/sycl-sync-main.yml index 68f8578a15c3a..805ee74b1d3fd 100644 --- a/.github/workflows/sycl-sync-main.yml +++ b/.github/workflows/sycl-sync-main.yml @@ -9,7 +9,7 @@ jobs: sync: permissions: contents: write # for Git to git push - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest if: github.repository == 'intel/llvm' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/sycl-update-gpu-driver.yml b/.github/workflows/sycl-update-gpu-driver.yml index 6315f27641dce..cd998ded1b10d 100644 --- a/.github/workflows/sycl-update-gpu-driver.yml +++ b/.github/workflows/sycl-update-gpu-driver.yml @@ -11,7 +11,7 @@ jobs: update_driver_linux: permissions: contents: write # for Git to git push - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest if: github.repository == 'intel/llvm' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/sycl-update-igc-dev-driver.yml b/.github/workflows/sycl-update-igc-dev-driver.yml index 8a323a1e05221..3bc2407b1f7df 100644 --- a/.github/workflows/sycl-update-igc-dev-driver.yml +++ b/.github/workflows/sycl-update-igc-dev-driver.yml @@ -11,7 +11,7 @@ jobs: update_driver_linux: permissions: contents: write # for Git to git push - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest if: github.repository == 'intel/llvm' steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/sycl-weekly.yml b/.github/workflows/sycl-weekly.yml new file mode 100644 index 0000000000000..35da18c76ce9e --- /dev/null +++ b/.github/workflows/sycl-weekly.yml @@ -0,0 +1,67 @@ +name: SYCL Weekly + +on: + workflow_dispatch: + schedule: + # At 00:00 on Sunday. + - cron: '0 0 * * 0' + +permissions: read-all + +jobs: + ubuntu2204_build: + if: github.repository == 'intel/llvm' + uses: ./.github/workflows/sycl-linux-build.yml + secrets: inherit + with: + build_cache_root: "/__w/" + build_artifact_suffix: default + build_configure_extra_args: '' + + # This job builds SYCL-CTS with -fsycl-use-spirv-backend-for-spirv-gen. + build-sycl-cts: + needs: ubuntu2204_build + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: Build SYCL-CTS + runner: '["Linux", "build"]' + cts_testing_mode: 'build-only' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + tests_selector: cts + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + extra_cmake_args: -DDPCPP_FLAGS=-fsycl-use-spirv-backend-for-spirv-gen + sycl_cts_artifact: sycl_cts_bin + + run-sycl-cts: + needs: [ubuntu2204_build, build-sycl-cts] + if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + include: + - name: SYCL-CTS on OCL CPU PVC w/ LLVM SPIR-V Backend + runner: '["Linux", "pvc"]' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + target_devices: opencl:cpu + + - name: SYCL-CTS on L0 GPU PVC w/ LLVM SPIR-V Backend + runner: '["Linux", "pvc"]' + image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + cts_testing_mode: 'run-only' + image_options: ${{ matrix.image_options }} + target_devices: ${{ matrix.target_devices }} + tests_selector: cts + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + sycl_cts_artifact: sycl_cts_bin diff --git a/.github/workflows/sycl-windows-build.yml b/.github/workflows/sycl-windows-build.yml index 4bd537146bf31..dbf4bfd88922c 100644 --- a/.github/workflows/sycl-windows-build.yml +++ b/.github/workflows/sycl-windows-build.yml @@ -7,7 +7,7 @@ on: type: string required: false default: "default" - build_ref: + build_configure_extra_args: type: string required: false changes: @@ -15,6 +15,9 @@ on: description: 'Filter matches for the changed files in the PR' default: '[llvm, clang, sycl, llvm_spirv, xptifw, libclc, libdevice]' required: false + ref: + type: string + required: False artifact_archive_name: type: string default: llvm_sycl.tar.gz @@ -22,6 +25,10 @@ on: description: 'Artifacts retention period' type: string default: 3 + compiler: + type: string + required: false + default: "cl" outputs: build_conclusion: @@ -36,11 +43,15 @@ on: type: choice options: - "[]" + - '[sycl]' - '[llvm, clang, sycl, llvm_spirv, xptifw, libclc, libdevice]' build_cache_suffix: type: choice options: - "default" + build_configure_extra_args: + type: string + required: false artifact_archive_name: type: choice options: @@ -50,6 +61,12 @@ on: type: choice options: - 3 + compiler: + type: choice + options: + - cl + - icx + permissions: read-all jobs: @@ -57,28 +74,48 @@ jobs: name: Build + LIT runs-on: [Windows, build] environment: WindowsCILock - # TODO use cached checkout outputs: build_conclusion: ${{ steps.build.conclusion }} steps: + - name: Detect hung tests + if: always() + shell: powershell + continue-on-error: true + run: | + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + powershell.exe -File windows_detect_hung_tests.ps1 + $exitCode = $LASTEXITCODE + Remove-Item -Path "windows_detect_hung_tests.ps1" + exit $exitCode + - uses: actions/checkout@v4 + with: + sparse-checkout: | + devops/actions - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 with: arch: amd64 + - name: Setup oneAPI env + uses: ./devops/actions/setup_windows_oneapi_env + if: ${{ always() && !cancelled() && inputs.compiler == 'icx' }} - name: Set env run: | git config --system core.longpaths true git config --global core.autocrlf false echo "C:\Program Files\Git\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "SCCACHE_DIR=D:\github\_work\cache\${{ inputs.build_cache_suffix }}" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - uses: actions/checkout@v4 + echo "CCACHE_DIR=D:\github\_work\cache\${{ inputs.build_cache_suffix }}" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "CCACHE_MAXSIZE=10G" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "LIT_OPTS='-j$env:NUMBER_OF_PROCESSORS $LIT_OPTS'" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + - name: Register cleanup after job is finished + uses: ./devops/actions/cleanup + - uses: ./devops/actions/cached_checkout with: path: src - ref: ${{ inputs.build_ref || github.sha }} - fetch-depth: 1 - - name: Register cleanup after job is finished - uses: ./src/devops/actions/cleanup + ref: ${{ inputs.ref || github.sha }} + cache_path: "D:\\\\github\\\\_work\\\\repo_cache\\\\" - name: Configure shell: cmd + env: + ARGS: ${{ inputs.build_configure_extra_args }} # TODO switch to clang-cl and lld when this is fixed https://github.com/oneapi-src/level-zero/issues/83 run: | mkdir build @@ -86,14 +123,14 @@ jobs: IF NOT EXIST D:\github\_work\cache MKDIR D:\github\_work\cache IF NOT EXIST D:\github\_work\cache\${{inputs.build_cache_suffix}} MKDIR D:\github\_work\cache\${{inputs.build_cache_suffix}} python.exe src/buildbot/configure.py -o build ^ - --ci-defaults ^ - --cmake-opt="-DCMAKE_C_COMPILER=cl" ^ - --cmake-opt="-DCMAKE_CXX_COMPILER=cl" ^ - --cmake-opt="-DCMAKE_INSTALL_PREFIX=%GITHUB_WORKSPACE%\install" ^ - --cmake-opt="-DCMAKE_CXX_COMPILER_LAUNCHER=sccache" ^ - --cmake-opt="-DCMAKE_C_COMPILER_LAUNCHER=sccache" ^ - --cmake-opt="-DLLVM_INSTALL_UTILS=ON" ^ - --cmake-opt="-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV" + --ci-defaults %ARGS% ^ + "-DCMAKE_C_COMPILER=${{inputs.compiler}}" ^ + "-DCMAKE_CXX_COMPILER=${{inputs.compiler}}" ^ + "-DCMAKE_INSTALL_PREFIX=%GITHUB_WORKSPACE%\install" ^ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ^ + -DCMAKE_C_COMPILER_LAUNCHER=ccache ^ + -DLLVM_INSTALL_UTILS=ON ^ + -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV - name: Build id: build shell: bash @@ -101,7 +138,8 @@ jobs: cmake --build build --target sycl-toolchain - name: check-llvm if: always() && !cancelled() && contains(inputs.changes, 'llvm') - run: | + shell: bash + run: | cmake --build build --target check-llvm - name: check-clang if: always() && !cancelled() && contains(inputs.changes, 'clang') @@ -109,8 +147,9 @@ jobs: cmake --build build --target check-clang - name: check-sycl if: always() && !cancelled() && contains(inputs.changes, 'sycl') - run: | - cmake --build build --target check-sycl + shell: bash + run: | + cmake --build build --target check-sycl - name: check-sycl-unittests if: always() && !cancelled() && contains(inputs.changes, 'sycl') run: | @@ -127,6 +166,19 @@ jobs: if: always() && !cancelled() && contains(inputs.changes, 'libdevice') run: | cmake --build build --target check-libdevice + - name: Generate/diff new ABI symbols + if: always() && !cancelled() && contains(inputs.changes, 'sycl') + shell: bash + run: | + LLVM_BIN_PATH="build/bin" python.exe src/sycl/tools/abi_check.py --mode dump_symbols --output build/new_sycl_symbols_windows.dump build/bin/sycl?.dll + diff -Naur src/sycl/test/abi/sycl_symbols_windows.dump build/new_sycl_symbols_windows.dump || true + - name: Upload new ABI symbols + if: always() && !cancelled() && contains(inputs.changes, 'sycl') + uses: actions/upload-artifact@v4 + with: + name: sycl_windows_abi_symbols + path: build/new_sycl_symbols_windows.dump + retention-days: ${{ inputs.retention-days }} - name: Install if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' }} shell: bash @@ -154,3 +206,12 @@ jobs: name: sycl_windows_default path: ${{ inputs.artifact_archive_name }} retention-days: ${{ inputs.retention-days }} + - name: Detect hung tests + if: always() + shell: powershell + run: | + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + powershell.exe -File windows_detect_hung_tests.ps1 + $exitCode = $LASTEXITCODE + Remove-Item -Path "windows_detect_hung_tests.ps1" + exit $exitCode diff --git a/.github/workflows/sycl-windows-precommit.yml b/.github/workflows/sycl-windows-precommit.yml index a82c2b7814d75..a9cb32731d49b 100644 --- a/.github/workflows/sycl-windows-precommit.yml +++ b/.github/workflows/sycl-windows-precommit.yml @@ -4,13 +4,13 @@ on: pull_request: branches: - sycl - - sycl-devops-pr/** - llvmspirv_pulldown - sycl-rel-** # Do not run builds if changes are only in the following locations paths-ignore: - '.github/ISSUE_TEMPLATE/**' - '.github/CODEOWNERS' + - 'sycl/cts_exclude_filter/**' - 'sycl/doc/**' - 'sycl/gdb/**' - 'clang/docs/**' @@ -20,6 +20,7 @@ on: - '.github/workflows/sycl-precommit-aws.yml' - '.github/workflows/sycl-macos-*.yml' - '.github/workflows/sycl-nightly.yml' + - '.github/workflows/sycl-rel-nightly.yml' - 'devops/containers/**' - 'devops/actions/build_container/**' @@ -50,9 +51,16 @@ jobs: always() && !cancelled() && needs.build.outputs.build_conclusion == 'success' + strategy: + fail-fast: false + matrix: + include: + - name: Intel GEN12 Graphics with Level Zero + runner: '["Windows","gen12"]' + - name: Intel Battlemage Graphics with Level Zero + runner: '["Windows","bmg"]' uses: ./.github/workflows/sycl-windows-run-tests.yml with: - name: Intel GEN12 Graphics with Level Zero - runner: '["Windows","gen12"]' + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} sycl_toolchain_archive: ${{ needs.build.outputs.artifact_archive_name }} - extra_lit_opts: --param gpu-intel-gen12=True diff --git a/.github/workflows/sycl-windows-run-tests.yml b/.github/workflows/sycl-windows-run-tests.yml index 9186392f8fa46..67d1a60f23c81 100644 --- a/.github/workflows/sycl-windows-run-tests.yml +++ b/.github/workflows/sycl-windows-run-tests.yml @@ -6,18 +6,43 @@ on: name: type: string required: True + runner: type: string required: True + target_devices: + type: string + required: False + extra_cmake_args: + type: string + required: False + tests_selector: + description: | + Two possible options: "e2e" and "cts". + type: string + default: "e2e" + extra_lit_opts: description: | Extra options to be added to LIT_OPTS. type: string default: '' - ref: + + repo_ref: + type: string + required: False + description: | + Commit SHA or branch to checkout the intel/llvm repo. + devops_ref: + type: string + required: False + description: | + Commit SHA or branch to checkout the devops directory. + tests_ref: type: string required: False + description: Commit SHA or branch to checkout e2e/cts tests. sycl_toolchain_artifact: type: string @@ -33,6 +58,28 @@ on: default: '{}' required: False + compiler: + type: string + required: false + default: "cl" + + cts_testing_mode: + description: | + Testing mode to run SYCL-CTS in, can be either `full`, `build-only` + or `run-only`. In `build-only` mode an artifact of the CTS binaries + will be uploaded. + type: string + default: 'full' + + sycl_cts_artifact: + type: string + default: '' + required: False + artifact_retention_days: + description: 'E2E/SYCL-CTS binaries artifact retention period.' + type: string + default: 1 + permissions: read-all jobs: @@ -42,22 +89,40 @@ jobs: environment: WindowsCILock env: ${{ fromJSON(inputs.env) }} steps: + - name: Detect hung tests + if: always() + shell: powershell + continue-on-error: true + run: | + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + powershell.exe -File windows_detect_hung_tests.ps1 + $exitCode = $LASTEXITCODE + Remove-Item -Path "windows_detect_hung_tests.ps1" + exit $exitCode + - uses: actions/checkout@v4 + with: + sparse-checkout: | + devops/actions + ref: ${{ inputs.devops_ref|| inputs.repo_ref || github.sha }} - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 with: arch: amd64 + - name: Setup oneAPI env + uses: ./devops/actions/setup_windows_oneapi_env + if: ${{ always() && !cancelled() && inputs.compiler == 'icx' }} - name: Set env run: | git config --system core.longpaths true git config --global core.autocrlf false echo "C:\Program Files\Git\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - # TODO: use cached_checkout - - uses: actions/checkout@v4 + - name: Register cleanup after job is finished + uses: ./devops/actions/cleanup + - uses: ./devops/actions/cached_checkout + if: inputs.tests_selector == 'e2e' with: - persist-credentials: false - ref: ${{ inputs.ref || github.sha }} path: llvm - - name: Register cleanup after job is finished - uses: ./llvm/devops/actions/cleanup + ref: ${{ inputs.repo_ref || github.sha }} + cache_path: "D:\\\\github\\\\_work\\\\repo_cache\\\\" - name: Download compiler toolchain uses: actions/download-artifact@v4 with: @@ -71,36 +136,52 @@ jobs: - name: Setup SYCL toolchain run: | echo "PATH=$env:GITHUB_WORKSPACE\\install\\bin;$env:PATH" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "LIB=$env:GITHUB_WORKSPACE\\install\\lib;$env:LIB" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - run: | sycl-ls - run: | sycl-ls --verbose - name: Configure E2E with Level Zero target + if: inputs.tests_selector == 'e2e' shell: cmd run: | mkdir build-e2e - cmake -GNinja -B build-e2e -S.\llvm\sycl\test-e2e -DSYCL_TEST_E2E_TARGETS="level_zero:gpu" -DCMAKE_CXX_COMPILER="clang++" -DLEVEL_ZERO_LIBS_DIR="D:\github\level-zero_win-sdk\lib" -DLEVEL_ZERO_INCLUDE="D:\github\level-zero_win-sdk\include" -DLLVM_LIT="..\llvm\llvm\utils\lit\lit.py" + cmake -GNinja -B build-e2e -S.\llvm\sycl\test-e2e -DSYCL_TEST_E2E_TARGETS="level_zero:gpu" -DCMAKE_CXX_COMPILER="clang++" -DLEVEL_ZERO_LIBS_DIR="D:\\github\\level-zero_win-sdk\\lib" -DLEVEL_ZERO_INCLUDE="D:\\github\\level-zero_win-sdk\\include" -DLLVM_LIT="..\\llvm\\llvm\\utils\\lit\\lit.py" - name: Run End-to-End tests + if: inputs.tests_selector == 'e2e' shell: bash run: | # Run E2E tests. + if [[ ${{inputs.compiler}} == 'icx' ]]; then + export LIT_FILTER_OUT="compile_on_win_with_mdd" + fi export LIT_OPTS="-v --no-progress-bar --show-unsupported --show-pass --show-xfail --max-time 3600 --time-tests ${{ inputs.extra_lit_opts }}" cmake --build build-e2e --target check-sycl-e2e - - name: Detect hung tests - shell: powershell + + - name: Run SYCL CTS Tests + if: inputs.tests_selector == 'cts' + uses: ./devops/actions/run-tests/windows/cts + with: + ref: ${{ inputs.tests_ref || 'main' }} + cts_exclude_ref: ${{ inputs.repo_ref }} + extra_cmake_args: ${{ inputs.extra_cmake_args }} + cts_testing_mode: ${{ inputs.cts_testing_mode }} + sycl_cts_artifact: ${{ inputs.sycl_cts_artifact }} + target_devices: ${{ inputs.target_devices }} + retention-days: ${{ inputs.artifact_retention_days }} + + - name: Detect hung tests if: always() + shell: powershell run: | - $exitCode = 0 - $hungTests = Get-Process | Where-Object { ($_.Path -match "llvm\\install") -or ($_.Path -match "llvm\\build-e2e") } - $hungTests | Foreach-Object { - $exitCode = 1 - echo "Test $($_.Path) hung!" - Stop-Process -Force $_ - } + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + powershell.exe -File windows_detect_hung_tests.ps1 + $exitCode = $LASTEXITCODE + Remove-Item -Path "windows_detect_hung_tests.ps1" exit $exitCode - name: Cleanup shell: cmd if: always() run: | rmdir /q /s install - rmdir /q /s build-e2e + if exist build-e2e rmdir /q /s build-e2e diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml new file mode 100644 index 0000000000000..26637cac9d5e3 --- /dev/null +++ b/.github/workflows/trivy.yml @@ -0,0 +1,37 @@ +# This workflow performs a trivy check of docker config files. + +name: Trivy + +on: + pull_request: + paths: + - 'devops/containers/**' + - 'devops/.trivyignore.yaml' + - '.github/workflows/trivy.yml' + workflow_dispatch: + +jobs: + build: + name: Trivy + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + sparse-checkout: devops + + # There is a github action, but for some reason it ignores ignore-file. + - name: Install Trivy + run: | + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh + ./bin/trivy --version + + - name: Run Trivy vulnerability scanner + run: ./bin/trivy config --format json --output trivy-report.json --ignorefile=devops/.trivyignore.yaml devops/containers --exit-code 1 + + - name: Upload report artifact + uses: actions/upload-artifact@v4 + with: + name: trivy-report + path: trivy-report.json + retention-days: 3 diff --git a/.github/workflows/ur-benchmarks-reusable.yml b/.github/workflows/ur-benchmarks-reusable.yml new file mode 100644 index 0000000000000..66ffcecd70314 --- /dev/null +++ b/.github/workflows/ur-benchmarks-reusable.yml @@ -0,0 +1,12 @@ +name: Benchmarks Reusable + +# This workflow is a WIP: This workflow file acts as a placeholder. + +on: [ workflow_call ] + +jobs: + do-nothing: + runs-on: ubuntu-latest + steps: + - run: echo 'This workflow is a WIP.' + diff --git a/.github/workflows/ur-benchmarks.yml b/.github/workflows/ur-benchmarks.yml new file mode 100644 index 0000000000000..23fbb1ad903b4 --- /dev/null +++ b/.github/workflows/ur-benchmarks.yml @@ -0,0 +1,12 @@ +name: Benchmarks + +# This workflow is a WIP: this workflow file acts as a placeholder. + +on: [ workflow_dispatch ] + +jobs: + do-nothing: + runs-on: ubuntu-latest + steps: + - run: echo 'This workflow is a WIP.' + diff --git a/.github/workflows/ur-build-hw.yml b/.github/workflows/ur-build-hw.yml new file mode 100644 index 0000000000000..a0f94ab10f538 --- /dev/null +++ b/.github/workflows/ur-build-hw.yml @@ -0,0 +1,159 @@ +name: UR - Build adapters, test on HW + +on: + workflow_call: + inputs: + adapter_name: + required: true + type: string + other_adapter_name: + required: false + type: string + default: "" + runner_name: + required: true + type: string + platform: + description: "Platform string, `UR_CTS_ADAPTER_PLATFORM` will be set to this." + required: false + type: string + default: "" + static_loader: + required: false + type: string + default: OFF + static_adapter: + required: false + type: string + default: OFF + workflow_dispatch: + inputs: + adapter_name: + required: true + type: string + other_adapter_name: + required: false + type: string + default: "" + runner_name: + required: true + type: string + platform: + description: "Platform string, `UR_CTS_ADAPTER_PLATFORM` will be set to this." + required: false + type: string + default: "" + static_loader: + required: false + type: string + default: OFF + static_adapter: + required: false + type: string + default: OFF + +permissions: + contents: read + +env: + UR_LOG_CUDA: "level:error;flush:error" + UR_LOG_HIP: "level:error;flush:error" + UR_LOG_LEVEL_ZERO: "level:error;flush:error" + UR_LOG_NATIVE_CPU: "level:error;flush:error" + UR_LOG_OPENCL: "level:error;flush:error" + +jobs: + adapter_build_hw: + name: Build & CTS + # run only on upstream; forks won't have the HW + if: github.repository == 'intel/llvm' + strategy: + fail-fast: false + matrix: + adapter: [ + { + name: "${{inputs.adapter_name}}", + other_name: "${{inputs.other_adapter_name}}", + platform: "${{inputs.platform}}", + static_Loader: "${{inputs.static_loader}}", + static_adapter: "${{inputs.static_loader}}" + } + ] + build_type: [Release] + compiler: [{c: gcc, cxx: g++}] + + runs-on: ${{inputs.runner_name}} + + steps: + # TODO: + # - investigate if DUR_CONFORMANCE_AMD_ARCH could be removed + # - find better way to handle platform param (e.g. "Intel(R) OpenCL" -> "opencl") + # - switch to Ninja generator in CMake + # - downloading DPC++ should be integrated somehow; most likely use nightly release. + # + - name: Checkout LLVM + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + # Latest distros do not allow global pip installation + - name: Install UR python dependencies in venv + working-directory: ${{github.workspace}}/unified-runtime + run: | + python3 -m venv .venv + . .venv/bin/activate + echo "$PATH" >> $GITHUB_PATH + pip install -r third_party/requirements.txt + + - name: Download DPC++ + run: | + wget -O ${{github.workspace}}/dpcpp_compiler.tar.gz https://github.com/intel/llvm/releases/download/nightly-2024-12-12/sycl_linux.tar.gz + mkdir dpcpp_compiler + tar -xvf ${{github.workspace}}/dpcpp_compiler.tar.gz -C dpcpp_compiler + + - name: Configure Unified Runtime project + working-directory: ${{github.workspace}}/unified-runtime + # ">" is used to avoid adding "\" at the end of each line; this command is quite long + run: > + cmake + -B${{github.workspace}}/build + -DCMAKE_C_COMPILER=${{matrix.compiler.c}} + -DCMAKE_CXX_COMPILER=${{matrix.compiler.cxx}} + -DCMAKE_BUILD_TYPE=${{matrix.build_type}} + -DUR_ENABLE_TRACING=ON + -DUR_DEVELOPER_MODE=ON + -DUR_BUILD_TESTS=ON + -DUR_BUILD_ADAPTER_${{matrix.adapter.name}}=ON + -DUR_CONFORMANCE_TEST_LOADER=${{ matrix.adapter.other_name != '' && 'ON' || 'OFF' }} + ${{ matrix.adapter.other_name != '' && format('-DUR_BUILD_ADAPTER_{0}=ON', matrix.adapter.other_name) || '' }} + -DUR_STATIC_LOADER=${{matrix.adapter.static_Loader}} + -DUR_STATIC_ADAPTER_${{matrix.adapter.name}}=${{matrix.adapter.static_adapter}} + -DUR_DPCXX=${{github.workspace}}/dpcpp_compiler/bin/clang++ + -DUR_SYCL_LIBRARY_DIR=${{github.workspace}}/dpcpp_compiler/lib + -DCMAKE_INSTALL_PREFIX=${{github.workspace}}/install + ${{ matrix.adapter.name == 'HIP' && '-DUR_CONFORMANCE_AMD_ARCH=gfx1030' || '' }} + ${{ matrix.adapter.name == 'HIP' && '-DUR_HIP_PLATFORM=AMD' || '' }} + + - name: Build + # This is so that device binaries can find the sycl runtime library + run: cmake --build ${{github.workspace}}/build -j $(nproc) + + - name: Install + # This is to check that install command does not fail + run: cmake --install ${{github.workspace}}/build + + - name: Test adapter specific + env: + ZE_ENABLE_LOADER_DEBUG_TRACE: 1 + ZE_DEBUG: 1 + run: ctest -C ${{matrix.build_type}} --test-dir ${{github.workspace}}/build --output-on-failure -L "adapter-specific" -E "memcheck" --timeout 600 -VV + # Don't run adapter specific tests when building multiple adapters + if: ${{ matrix.adapter.other_name == '' }} + + - name: Test adapters + env: + ZE_ENABLE_LOADER_DEBUG_TRACE: 1 + ZE_DEBUG: 1 + run: env UR_CTS_ADAPTER_PLATFORM="${{matrix.adapter.platform}}" ctest -C ${{matrix.build_type}} --test-dir ${{github.workspace}}/build --output-on-failure -L "conformance" --timeout 600 -VV + + - name: Get information about platform + if: ${{ always() }} + run: ${{github.workspace}}/unified-runtime/.github/scripts/get_system_info.sh diff --git a/.github/workflows/ur-precommit.yml b/.github/workflows/ur-precommit.yml new file mode 100644 index 0000000000000..32fedc832c89d --- /dev/null +++ b/.github/workflows/ur-precommit.yml @@ -0,0 +1,114 @@ +name: Unified Runtime Pre Commit +# Note: this is the very first version of UR workflow. +# It was pretty much copy-pasted from UR repository. +# Over time it will be most likely integrated more into existing workflows. + +# Note: the trigger is copy-pasted from sycl-linux-precommit.yml - probably to be fine-tuned. +on: + # We rely on "Fork pull request workflows from outside collaborators" - + # "Require approval for all outside collaborators" at + # https://github.com/intel/llvm/settings/actions for security. + pull_request: + branches: + - sycl + - sycl-rel-** + # Do not run builds if changes are only in the following locations + paths-ignore: + - '.github/ISSUE_TEMPLATE/**' + - '.github/CODEOWNERS' + - 'sycl/doc/**' + - 'sycl/gdb/**' + - 'clang/docs/**' + - '**.md' + - '**.rst' + - '.github/workflows/sycl-windows-*.yml' + - '.github/workflows/sycl-macos-*.yml' + - '.github/workflows/sycl-nightly.yml' + - '.github/workflows/sycl-rel-nightly.yml' + - 'devops/containers/**' + - 'devops/actions/build_container/**' + +concurrency: + # Cancel a currently running workflow from the same PR, branch or tag. + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: read-all + +jobs: + detect_changes: + name: Detect Changes + uses: ./.github/workflows/sycl-detect-changes.yml + + source_checks: + name: Source Checks + needs: [detect_changes] + if: ${{ always() && !cancelled() && contains(needs.detect_changes.outputs.filters, 'ur') }} + uses: ./.github/workflows/ur-source-checks.yml + + adapters: + name: Adapters + needs: [detect_changes, source_checks] + if: ${{ always() && !cancelled() && contains(needs.detect_changes.outputs.filters, 'ur') }} + strategy: + matrix: + # Extra native CPU jobs are here to force the loader to be used. + # UR will not use the loader if there is only one target. + adapter: [ + {name: L0, runner: UR_L0}, + {name: L0_V2, runner: UR_L0}, + {name: L0, runner: UR_L0, static: ON}, + {name: OPENCL, runner: UR_OPENCL, platform: "Intel(R) OpenCL"}, + {name: CUDA, runner: UR_CUDA}, + {name: HIP, runner: UR_HIP}, + {name: NATIVE_CPU, runner: UR_NATIVE_CPU}, + {name: OPENCL, runner: UR_OPENCL, other_adapter: NATIVE_CPU, platform: "OPENCL:Intel(R) OpenCL"}, + {name: L0, runner: UR_L0, other_adapter: NATIVE_CPU}, + ] + uses: ./.github/workflows/ur-build-hw.yml + with: + adapter_name: ${{ matrix.adapter.name }} + runner_name: ${{ matrix.adapter.runner }} + static_loader: ${{ matrix.adapter.static || 'OFF' }} + static_adapter: ${{ matrix.adapter.static || 'OFF' }} + platform: ${{ matrix.adapter.platform || '' }} + other_adapter_name: ${{ matrix.adapter.other_adapter || '' }} + + macos: + name: MacOS build only + needs: [detect_changes, source_checks] + if: ${{ always() && !cancelled() && contains(needs.detect_changes.outputs.filters, 'ur') }} + strategy: + matrix: + os: ['macos-13'] + runs-on: ${{matrix.os}} + + steps: + - name: Checkout LLVM + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: actions/setup-python@6ca8e8598faa206f7140a65ba31b899bebe16f58 # v5.0.0 + with: + python-version: 3.9 + + - name: Install prerequisites + working-directory: ${{github.workspace}}/unified-runtime + run: python3 -m pip install -r third_party/requirements.txt + + - name: Install hwloc + run: brew install hwloc + + - name: Configure Unified Runtime project + working-directory: ${{github.workspace}}/unified-runtime + run: > + cmake + -B${{github.workspace}}/build + -DUR_ENABLE_TRACING=ON + -DUR_DEVELOPER_MODE=ON + -DCMAKE_BUILD_TYPE=Release + -DUR_BUILD_TESTS=ON + -DUR_FORMAT_CPP_STYLE=ON + -DUMF_ENABLE_POOL_TRACKING=ON + + - name: Build + run: cmake --build ${{github.workspace}}/build -j $(sysctl -n hw.logicalcpu) diff --git a/.github/workflows/ur-source-checks.yml b/.github/workflows/ur-source-checks.yml new file mode 100644 index 0000000000000..bde0f9a1a952a --- /dev/null +++ b/.github/workflows/ur-source-checks.yml @@ -0,0 +1,64 @@ +name: UR - Check generated sources + +on: + workflow_call: + +permissions: + contents: read + +jobs: + source_checks: + strategy: + matrix: + os: ['ubuntu-22.04', 'windows-2022'] + + runs-on: ${{matrix.os}} + + steps: + # TODO: + # - split into separate jobs for each OS + # + - name: Checkout LLVM + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: actions/setup-python@6ca8e8598faa206f7140a65ba31b899bebe16f58 # v5.0.0 + with: + python-version: 3.9 + + - name: Install UR python dependencies + working-directory: ${{github.workspace}}/unified-runtime + run: pip install -r third_party/requirements.txt + + - name: "[Lin] Install doxygen" + if: matrix.os == 'ubuntu-22.04' + run: | + sudo apt-get update + sudo apt-get install -y doxygen + + - name: "[Win] Install doxygen" + if: matrix.os == 'windows-2022' + run: | + $WorkingDir = $PWD.Path + Invoke-WebRequest -Uri https://github.com/doxygen/doxygen/releases/download/Release_1_9_8/doxygen-1.9.8.windows.x64.bin.zip -OutFile "$WorkingDir\doxygen.zip" + Expand-Archive -Path "$WorkingDir\doxygen.zip" + Add-Content $env:GITHUB_PATH "$WorkingDir\doxygen" + + - name: Configure Unified Runtime project + working-directory: ${{github.workspace}}/unified-runtime + run: > + cmake + -B${{github.workspace}}/build + -DCMAKE_PREFIX_PATH="${{env.VCPKG_PATH}}" + -DUR_ENABLE_TRACING=OFF + -DCMAKE_BUILD_TYPE=Debug + -DUR_BUILD_TESTS=OFF + -DUR_FORMAT_CPP_STYLE=ON + -DUMF_DISABLE_HWLOC=ON + + # Verifying license should be enough on a single OS + - name: Verify that each source file contains a license + if: matrix.os == 'ubuntu-22.04' + run: cmake --build ${{github.workspace}}/build --target verify-licenses + + - name: Generate source from spec, check for uncommitted diff + run: cmake --build ${{github.workspace}}/build --target check-generated diff --git a/devops/.trivyignore.yaml b/devops/.trivyignore.yaml new file mode 100644 index 0000000000000..f942ef6ba2a9d --- /dev/null +++ b/devops/.trivyignore.yaml @@ -0,0 +1,5 @@ +misconfigurations: + - id: AVD-DS-0001 + statement: "We use our own containers, no uncontrolled behavior is expected when the image is updated" + - id: AVD-DS-0026 + statement: "Our containers do not provide running services, but only preinstalled tools, there is not much value in adding HEALTHCHECK directives" diff --git a/devops/actions/benchmarking/aggregate/action.yml b/devops/actions/benchmarking/aggregate/action.yml new file mode 100644 index 0000000000000..c062636684b1f --- /dev/null +++ b/devops/actions/benchmarking/aggregate/action.yml @@ -0,0 +1,95 @@ +name: 'Aggregate compute-benchmark results and produce historical averages' + +# The benchmarking workflow in sycl-linux-run-tests.yml passes or fails based on +# how the benchmark results compare to a historical average: This historical +# average is calculated in this composite workflow, which aggregates historical +# data and produces measures of central tendency (median in this case) used for +# this purpose. +# +# This action assumes that /devops has been checked out in ./devops. This action +# also assumes that GITHUB_TOKEN was properly set in env, because according to +# Github, that's apparently the recommended way to pass a secret into a github +# action: +# +# https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#accessing-your-secrets +# + +inputs: + lookback_days: + type: number + required: true + +runs: + using: "composite" + steps: + - name: Obtain oldest timestamp allowed for data in aggregation + shell: bash + run: | + # DO NOT use inputs.lookback_days directly, only use SANITIZED_TIMESTAMP. + SANITIZED_LOOKBACK_DAYS="$(echo '${{ inputs.lookback_days }}' | grep -oE '^[0-9]+$')" + if [ -z "$SANITIZED_LOOKBACK_DAYS" ]; then + echo "Please ensure inputs.lookback_days is a number." + exit 1 + fi + SANITIZED_TIMESTAMP="$(date -d "$SANITIZED_LOOKBACK_DAYS days ago" +%Y%m%d_%H%M%S)" + if [ -z "$(echo "$SANITIZED_TIMESTAMP" | grep -oE '^[0-9]{8}_[0-9]{6}$' )" ]; then + echo "Invalid timestamp generated: is inputs.lookback_days valid?" + exit 1 + fi + echo "SANITIZED_TIMESTAMP=$SANITIZED_TIMESTAMP" >> $GITHUB_ENV + - name: Load benchmarking configuration + shell: bash + run: | + $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) + echo "SANITIZED_PERF_RES_GIT_REPO=$SANITIZED_PERF_RES_GIT_REPO" >> $GITHUB_ENV + echo "SANITIZED_PERF_RES_GIT_BRANCH=$SANITIZED_PERF_RES_GIT_BRANCH" >> $GITHUB_ENV + - name: Checkout historical performance results repository + shell: bash + run: | + if [ ! -d ./llvm-ci-perf-results ]; then + git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" ./llvm-ci-perf-results + fi + - name: Run aggregator on historical results + shell: bash + run: | + # The current format of the historical results respository is: + # + # /// + # + # Thus, a min/max depth of 3 is used to enumerate all test cases in the + # repository. Test name is also derived from here. + find ./llvm-ci-perf-results -mindepth 3 -maxdepth 3 -type d ! -path '*.git*' | + while read -r dir; do + test_name="$(basename "$dir")" + python ./devops/scripts/benchmarking/aggregate.py ./devops "$test_name" "$dir" "$SANITIZED_TIMESTAMP" + done + - name: Upload average to the repo + shell: bash + run: | + cd ./llvm-ci-perf-results + git config user.name "SYCL Benchmarking Bot" + git config user.email "sys_sycl_benchmarks@intel.com" + git pull + # Make sure changes have been made + if git diff --quiet && git diff --cached --quiet; then + echo "No changes to median, skipping push." + else + git add . + git commit -m "[GHA] Aggregate median data from $SANITIZED_TIMESTAMP to $(date +%Y%m%d_%H%M%S)" + git push "https://$GITHUB_TOKEN@github.com/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH" + fi + - name: Find aggregated average results artifact here + if: always() + shell: bash + run: | + cat << EOF + # + # Artifact link for aggregated averages here: + # + EOF + - name: Archive new medians + if: always() + uses: actions/upload-artifact@v4 + with: + name: llvm-ci-perf-results new medians + path: ./llvm-ci-perf-results/**/*-median.csv diff --git a/devops/actions/cached_checkout/action.yml b/devops/actions/cached_checkout/action.yml index f2c25bcbca0a5..6f58f1de825bf 100644 --- a/devops/actions/cached_checkout/action.yml +++ b/devops/actions/cached_checkout/action.yml @@ -10,7 +10,7 @@ inputs: description: | Commit-ish to merge post-checkout if non-empty. Must be reachable from the default_branch input paramter. - default: 'FETCH_HEAD' + default: '' path: description: 'Path to checkout repo to' fetch-depth: @@ -34,6 +34,7 @@ runs: git pull --prune else git clone https://github.com/${{ inputs.repository }}.git . + git gc fi - name: Checkout env: diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml new file mode 100644 index 0000000000000..e357e2bddec30 --- /dev/null +++ b/devops/actions/run-tests/benchmark/action.yml @@ -0,0 +1,128 @@ +name: 'Run compute-benchmarks' + +# Run compute-benchmarks on SYCL +# +# This action assumes SYCL is in ./toolchain, and that /devops has been +# checked out in ./devops. This action also assumes that GITHUB_TOKEN +# was properly set in env, because according to Github, that's apparently the +# recommended way to pass a secret into a github action: +# +# https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#accessing-your-secrets +# +# This action also expects a RUNNER_TAG environment variable to be set to the +# runner tag used to run this workflow: Currently, only gen12 and pvc on Linux +# are fully supported. Although this workflow won't stop you from running other +# devices, note that only gen12 and pvc has been tested to work. +# + +inputs: + target_devices: + type: string + required: True + +runs: + using: "composite" + steps: + - name: Check specified runner type / target backend + shell: bash + env: + TARGET_DEVICE: ${{ inputs.target_devices }} + run: | + case "$RUNNER_TAG" in + '["Linux", "gen12"]' | '["Linux", "pvc"]') ;; + *) + echo "#" + echo "# WARNING: Only gen12/pvc on Linux is fully supported." + echo "# This workflow is not guaranteed to work with other runners." + echo "#" ;; + esac + + # input.target_devices is not directly used, as this allows code injection + case "$TARGET_DEVICE" in + level_zero:*) ;; + *) + echo "#" + echo "# WARNING: Only level_zero backend is fully supported." + echo "# This workflow is not guaranteed to work with other backends." + echo "#" ;; + esac + - name: Compute CPU core range to run benchmarks on + shell: bash + run: | + # Taken from ur-benchmark-reusable.yml: + + # Compute the core range for the first NUMA node; second node is used by + # UMF. Skip the first 4 cores as the kernel is likely to schedule more + # work on these. + CORES="$(lscpu | awk ' + /NUMA node0 CPU|On-line CPU/ {line=$0} + END { + split(line, a, " ") + split(a[4], b, ",") + sub(/^0/, "4", b[1]) + print b[1] + }')" + echo "CPU core range to use: $CORES" + echo "CORES=$CORES" >> $GITHUB_ENV + + ZE_AFFINITY_MASK=0 + echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV + - name: Run compute-benchmarks + shell: bash + run: | + cat << EOF + # + # NOTE TO DEVELOPERS: + # + + Check latter steps of the workflow: This job produces an artifact with: + - benchmark results from passing/failing tests + - log containing all failing (too slow) benchmarks + - log containing all erroring benchmarks + + While this step in the workflow provides debugging output describing this + information, it might be easier to inspect the logs from the artifact + instead. + + EOF + export ONEAPI_DEVICE_SELECTOR="${{ inputs.target_devices }}" + export CMPLR_ROOT=./toolchain + echo "-----" + sycl-ls + echo "-----" + taskset -c "$CORES" ./devops/scripts/benchmarking/benchmark.sh -n '${{ runner.name }}' -s || exit 1 + - name: Push compute-benchmarks results + if: always() + shell: bash + run: | + # TODO -- waiting on security clearance + # Load configuration values + $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) + + cd "./llvm-ci-perf-results" + git config user.name "SYCL Benchmarking Bot" + git config user.email "sys_sycl_benchmarks@intel.com" + git pull + git add . + # Make sure changes have been made + if git diff --quiet && git diff --cached --quiet; then + echo "No new results added, skipping push." + else + git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" + git push "https://$GITHUB_TOKEN@github.com/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH" + fi + - name: Find benchmark result artifact here + if: always() + shell: bash + run: | + cat << EOF + # + # Artifact link for benchmark results here: + # + EOF + - name: Archive compute-benchmark results + if: always() + uses: actions/upload-artifact@v4 + with: + name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }}) + path: ./artifact diff --git a/devops/actions/run-tests/cts/action.yml b/devops/actions/run-tests/cts/action.yml new file mode 100644 index 0000000000000..93fd2770e166d --- /dev/null +++ b/devops/actions/run-tests/cts/action.yml @@ -0,0 +1,176 @@ +name: 'Run SYCL CTS tests on Linux' + +inputs: + ref: + description: "Commit SHA or branch to checkout tests" + required: true + cts_exclude_ref: + description: "Commit SHA or branch to checkout the cts_exclude_filter dir" + required: true + extra_cmake_args: + required: false + cts_testing_mode: + required: true + sycl_cts_artifact: + require: false + target_devices: + required: true + retention-days: + required: false + +runs: + using: "composite" + steps: + - name: Checkout cts_exclude_filter folder + uses: actions/checkout@v4 + with: + ref: ${{ inputs.cts_exclude_ref }} + path: cts_exclude + sparse-checkout: | + sycl/cts_exclude_filter + - name: Move sycl to root + shell: bash + run: | + mv cts_exclude/sycl . + rm -rf cts_exclude + - name: Checkout SYCL CTS tests + if: inputs.cts_testing_mode != 'run-only' + uses: ./devops/actions/cached_checkout + with: + path: khronos_sycl_cts + repository: 'KhronosGroup/SYCL-CTS' + ref: ${{ inputs.ref }} + cache_path: "/__w/repo_cache/" + - name: SYCL CTS GIT submodules init + if: inputs.cts_testing_mode != 'run-only' + shell: bash + run: | + git -C khronos_sycl_cts submodule update --init + - name: Build SYCL CTS tests + if: inputs.cts_testing_mode != 'run-only' + shell: bash + env: + CMAKE_EXTRA_ARGS: ${{ inputs.extra_cmake_args }} + run: | + cts_exclude_filter="" + # If CTS_TESTS_TO_BUILD is null - use filter + if [ -z "$CTS_TESTS_TO_BUILD" ]; then + if [ "${{ contains(inputs.cts_testing_mode, 'build-only') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/compfails + elif [ "${{ contains(inputs.target_devices, 'opencl:cpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/OCL_CPU + elif [ "${{ contains(inputs.target_devices, 'level_zero:gpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/L0_GPU + fi + + # List excluded SYCL CTS categories: + # SYCL_CTS_EXCLUDE_TEST_CATEGORIES - Optional file specifying a list + # of test categories to be excluded from the build. + echo "::group::Excluded test categories" + cat $cts_exclude_filter + echo "::endgroup::" + fi + + cmake -GNinja -B./build-cts -S./khronos_sycl_cts -DCMAKE_CXX_COMPILER=$(which clang++) \ + -DSYCL_IMPLEMENTATION=DPCPP \ + -DSYCL_CTS_EXCLUDE_TEST_CATEGORIES="$cts_exclude_filter" \ + -DSYCL_CTS_ENABLE_OPENCL_INTEROP_TESTS=OFF \ + -DDPCPP_INSTALL_DIR="$(dirname $(which clang++))/.." \ + $CMAKE_EXTRA_ARGS + # Ignore errors so that if one category build fails others still have a + # chance to finish and be executed at the run stage. Note that + # "test_conformance" target skips building "test_all" executable. + ninja -C build-cts -k0 $( [ -n "$CTS_TESTS_TO_BUILD" ] && echo "$CTS_TESTS_TO_BUILD" || echo "test_conformance") + + - name: Pack SYCL-CTS binaries + if: always() && !cancelled() && inputs.cts_testing_mode == 'build-only' + shell: bash + run: tar -I 'zstd -9' -cf sycl_cts_bin.tar.zst -C ./build-cts/bin . + + - name: Upload SYCL-CTS binaries + if: always() && !cancelled() && inputs.cts_testing_mode == 'build-only' + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.sycl_cts_artifact }} + path: sycl_cts_bin.tar.zst + retention-days: ${{ inputs.retention-days }} + + - name: Download SYCL-CTS binaries + if: inputs.cts_testing_mode == 'run-only' + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.sycl_cts_artifact }} + + - name: Extract SYCL-CTS binaries + if: inputs.cts_testing_mode == 'run-only' + shell: bash + run: | + mkdir -p build-cts/bin + tar -I 'zstd' -xf sycl_cts_bin.tar.zst -C build-cts/bin + + - name: SYCL CTS List devices + # Proceed with execution even if the 'build' step did not succeed. + if: (always() && !cancelled()) && inputs.cts_testing_mode != 'build-only' + shell: bash + env: + ONEAPI_DEVICE_SELECTOR: ${{ inputs.target_devices }} + run: | + ./build-cts/bin/* --list-devices + + # If the suite was built on another machine then the build contains the full + # set of tests. We have special files to filter out some test categories, + # see "sycl/cts_exclude_filter/*". Each configuration has its own file, e.g. + # there is "cts_exclude_filter/OCL_CPU" for opencl:cpu device. Therefore, + # these files may differ from each other, so when there is a pre-built set of + # tests, we need to filter it according to the filter-file. + - name: Filter SYCL CTS test categories + if: inputs.cts_testing_mode == 'run-only' + shell: bash + run: | + cts_exclude_filter="" + if [ "${{ contains(inputs.target_devices, 'opencl:cpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/OCL_CPU + elif [ "${{ contains(inputs.target_devices, 'level_zero:gpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/L0_GPU + fi + + while IFS= read -r line; do + if [[ $line != \#* ]]; then + rm "./build-cts/bin/test_$line" + fi + done < "$cts_exclude_filter" + + - name: Run SYCL CTS tests + # Proceed with execution even if the previous two steps did not succeed. + if: (always() && !cancelled()) && inputs.cts_testing_mode != 'build-only' + env: + ONEAPI_DEVICE_SELECTOR: ${{ inputs.target_devices }} + # By-default GitHub actions execute the "run" shell script with -e option, + # so the execution terminates if any command returns a non-zero status. + # Since we're using a loop to run all test-binaries separately, some test + # may fail and terminate the execution. Setting "shell" value to override + # the default behavior. + # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#custom-shell + shell: bash {0} + run: | + # Run each test category separately so that + # - crash on one would not affect others + # - multiple tests could be run in parallel + mkdir logs + find build-cts/bin/ -type f -print | \ + xargs -t -I % -P 8 sh -c 'log=logs/$(basename %).log ; echo % >$log ; date >>$log ; timeout 60m % >>$log 2>&1 ; ret=$? ; echo "exit code: $ret" >>$log ; date >>$log ; exit $ret' + ret=$? + + for f in logs/* ; do + echo "::group::$f" + cat $f + echo "::endgroup::" + done + + echo "::group::Fails:" + grep 'exit code: [^0]' -r logs + echo "::endgroup::" + + grep 'exit code: [^0]' -r logs >> $GITHUB_STEP_SUMMARY + + exit $ret diff --git a/devops/actions/run-tests/e2e/action.yml b/devops/actions/run-tests/e2e/action.yml new file mode 100644 index 0000000000000..47fc75599ccdb --- /dev/null +++ b/devops/actions/run-tests/e2e/action.yml @@ -0,0 +1,83 @@ +name: 'Run SYCL E2E tests' + +inputs: + ref: + required: false + binaries_artifact: + required: false + testing_mode: + required: true + extra_cmake_args: + required: false + target_devices: + required: true + extra_lit_opts: + required: false + retention-days: + required: false + cxx_compiler: + required: false + + +runs: + using: "composite" + steps: + - name: Checkout E2E tests + uses: ./devops/actions/cached_checkout + with: + path: llvm + ref: ${{ inputs.ref || github.sha }} + cache_path: "/__w/repo_cache/" + + - name: Download E2E Binaries + if: inputs.testing_mode == 'run-only' + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.binaries_artifact }} + - name: Extract E2E Binaries + if: inputs.testing_mode == 'run-only' + shell: bash + run: | + mkdir build-e2e + tar -I 'zstd' -xf e2e_binaries.tar.zst -C build-e2e + + - name: Deduce E2E CMake options + if: inputs.testing_mode != 'run-only' + id: cmake_opts + shell: bash + env: + CMAKE_EXTRA_ARGS: ${{ inputs.extra_cmake_args }} + run: | + if [ -n "$CMAKE_EXTRA_ARGS" ]; then + echo "opts=$CMAKE_EXTRA_ARGS" >> $GITHUB_OUTPUT + fi + - name: Configure E2E tests + if: inputs.testing_mode != 'run-only' + shell: bash + run: | + cmake -GNinja -B./build-e2e -S./llvm/sycl/test-e2e -DCMAKE_CXX_COMPILER="${{ inputs.cxx_compiler || '$(which clang++)'}}" -DLLVM_LIT="$PWD/llvm/llvm/utils/lit/lit.py" ${{ steps.cmake_opts.outputs.opts }} + - name: SYCL End-to-end tests + shell: bash {0} + env: + LIT_OPTS: -v --no-progress-bar --show-unsupported --show-pass --show-xfail --max-time 3600 --time-tests --param print_features=True --param test-mode=${{ inputs.testing_mode }} --param sycl_devices=${{ inputs.target_devices }} ${{ inputs.extra_lit_opts }} + run: | + ninja -C build-e2e check-sycl-e2e > e2e.log 2>&1 + exit_code=$? + cat e2e.log + if [ $exit_code -ne 0 ]; then + awk '/^Failed Tests|Unexpectedly Passed Tests|Unresolved tests|Timed Out Tests|Testing Time/{flag=1}/FAILED: CMakeFiles/{flag=0}flag' e2e.log >> $GITHUB_STEP_SUMMARY + fi + exit $exit_code + + - name: Pack E2E binaries + if: ${{ always() && !cancelled() && inputs.binaries_artifact != '' && inputs.testing_mode != 'run-only'}} + shell: bash + run: | + tar -I 'zstd -9' -cf e2e_binaries.tar.zst -C ./build-e2e . + - name: Upload E2E binaries + if: ${{ always() && !cancelled() && inputs.binaries_artifact != '' && inputs.testing_mode != 'run-only'}} + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.binaries_artifact }} + path: e2e_binaries.tar.zst + retention-days: ${{ inputs.retention-days }} diff --git a/devops/actions/run-tests/windows/cts/action.yml b/devops/actions/run-tests/windows/cts/action.yml new file mode 100644 index 0000000000000..4d4d7aa7f9f46 --- /dev/null +++ b/devops/actions/run-tests/windows/cts/action.yml @@ -0,0 +1,178 @@ +name: 'Run SYCL CTS tests on Windows' + +inputs: + ref: + description: "Commit SHA or branch to checkout tests" + required: true + cts_exclude_ref: + description: "Commit SHA or branch to checkout the cts_exclude_filter dir" + required: true + extra_cmake_args: + required: false + cts_testing_mode: + required: true + sycl_cts_artifact: + require: false + target_devices: + required: true + retention-days: + required: false + +runs: + using: "composite" + steps: + - name: Checkout cts_exclude_filter folder + uses: actions/checkout@v4 + with: + ref: ${{ inputs.cts_exclude_ref }} + path: cts_exclude + sparse-checkout: | + sycl/cts_exclude_filter + - name: Move sycl to root + shell: bash + run: | + mv cts_exclude/sycl . + rm -rf cts_exclude + - name: Checkout SYCL CTS tests + if: inputs.cts_testing_mode != 'run-only' + uses: ./devops/actions/cached_checkout + with: + path: khronos_sycl_cts + repository: 'KhronosGroup/SYCL-CTS' + ref: ${{ inputs.ref }} + cache_path: "D:\\\\github\\\\_work\\\\repo_cache\\\\" + - name: SYCL CTS GIT submodules init + if: inputs.cts_testing_mode != 'run-only' + shell: bash + run: | + git -C khronos_sycl_cts submodule update --init + - name: Build SYCL CTS tests + if: inputs.cts_testing_mode != 'run-only' + shell: bash + env: + CMAKE_EXTRA_ARGS: ${{ inputs.extra_cmake_args }} + run: | + cts_exclude_filter="" + # If CTS_TESTS_TO_BUILD is null - use filter + if [ -z "$CTS_TESTS_TO_BUILD" ]; then + if [ "${{ contains(inputs.cts_testing_mode, 'build-only') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/compfails + elif [ "${{ contains(inputs.target_devices, 'opencl:cpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/OCL_CPU + elif [ "${{ contains(inputs.target_devices, 'level_zero:gpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/L0_GPU + fi + + # List excluded SYCL CTS categories: + # SYCL_CTS_EXCLUDE_TEST_CATEGORIES - Optional file specifying a list + # of test categories to be excluded from the build. + echo "::group::Excluded test categories" + cat $cts_exclude_filter + echo "::endgroup::" + fi + + cmake -GNinja -B./build-cts -S./khronos_sycl_cts \ + -DSYCL_IMPLEMENTATION=DPCPP \ + -DSYCL_CTS_EXCLUDE_TEST_CATEGORIES="$cts_exclude_filter" \ + -DSYCL_CTS_ENABLE_OPENCL_INTEROP_TESTS=OFF \ + -DDPCPP_INSTALL_DIR="$(dirname $(which clang++))/.." \ + -DCMAKE_CXX_COMPILER=cl \ + -DCMAKE_BUILD_TYPE=Release \ + $CMAKE_EXTRA_ARGS + # Ignore errors so that if one category build fails others still have a + # chance to finish and be executed at the run stage. Note that + # "test_conformance" target skips building "test_all" executable. + ninja -C build-cts -k0 $( [ -n "$CTS_TESTS_TO_BUILD" ] && echo "$CTS_TESTS_TO_BUILD" || echo "test_conformance") + + - name: Pack SYCL-CTS binaries + if: always() && !cancelled() && inputs.cts_testing_mode == 'build-only' + shell: bash + run: tar -I 'zstd -9' -cf sycl_cts_bin.tar.zst -C ./build-cts/bin . + + - name: Upload SYCL-CTS binaries + if: always() && !cancelled() && inputs.cts_testing_mode == 'build-only' + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.sycl_cts_artifact }} + path: sycl_cts_bin.tar.zst + retention-days: ${{ inputs.retention-days }} + + - name: Download SYCL-CTS binaries + if: inputs.cts_testing_mode == 'run-only' + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.sycl_cts_artifact }} + + - name: Extract SYCL-CTS binaries + if: inputs.cts_testing_mode == 'run-only' + shell: bash + run: | + mkdir -p build-cts/bin + tar -I 'zstd' -xf sycl_cts_bin.tar.zst -C build-cts/bin + + - name: SYCL CTS List devices + # Proceed with execution even if the 'build' step did not succeed. + if: (always() && !cancelled()) && inputs.cts_testing_mode != 'build-only' + shell: bash + env: + ONEAPI_DEVICE_SELECTOR: ${{ inputs.target_devices }} + run: | + ./build-cts/bin/* --list-devices + + # If the suite was built on another machine then the build contains the full + # set of tests. We have special files to filter out some test categories, + # see "sycl/cts_exclude_filter/*". Each configuration has its own file, e.g. + # there is "cts_exclude_filter/OCL_CPU" for opencl:cpu device. Therefore, + # these files may differ from each other, so when there is a pre-built set of + # tests, we need to filter it according to the filter-file. + - name: Filter SYCL CTS test categories + if: inputs.cts_testing_mode == 'run-only' + shell: bash + run: | + cts_exclude_filter="" + if [ "${{ contains(inputs.target_devices, 'opencl:cpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/OCL_CPU + elif [ "${{ contains(inputs.target_devices, 'level_zero:gpu') }}" = "true" ]; then + cts_exclude_filter=$PWD/sycl/cts_exclude_filter/L0_GPU + fi + + while IFS= read -r line; do + if [[ $line != \#* ]]; then + rm "./build-cts/bin/test_$line" + fi + done < "$cts_exclude_filter" + + - name: Run SYCL CTS tests + # Proceed with execution even if the previous two steps did not succeed. + if: (always() && !cancelled()) && inputs.cts_testing_mode != 'build-only' + env: + ONEAPI_DEVICE_SELECTOR: ${{ inputs.target_devices }} + # By-default GitHub actions execute the "run" shell script with -e option, + # so the execution terminates if any command returns a non-zero status. + # Since we're using a loop to run all test-binaries separately, some test + # may fail and terminate the execution. Setting "shell" value to override + # the default behavior. + # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#custom-shell + shell: bash {0} + run: | + # Run each test category separately so that + # - crash on one would not affect others + # - multiple tests could be run in parallel + mkdir logs + find build-cts/bin/ -type f -print | \ + xargs -t -I % -P 8 sh -c 'log=logs/$(basename %).log ; echo % >$log ; date >>$log ; timeout 60m % >>$log 2>&1 ; ret=$? ; echo "exit code: $ret" >>$log ; date >>$log ; exit $ret' + ret=$? + + for f in logs/* ; do + echo "::group::$f" + cat $f + echo "::endgroup::" + done + + echo "::group::Fails:" + grep 'exit code: [^0]' -r logs + echo "::endgroup::" + + grep 'exit code: [^0]' -r logs >> $GITHUB_STEP_SUMMARY + + exit $ret diff --git a/devops/actions/setup_linux_oneapi_env/action.yml b/devops/actions/setup_linux_oneapi_env/action.yml new file mode 100644 index 0000000000000..e54bd13d99b5e --- /dev/null +++ b/devops/actions/setup_linux_oneapi_env/action.yml @@ -0,0 +1,22 @@ +name: Linux setup oneAPI env + +runs: + using: "composite" + steps: + - name: Setup oneAPI env + shell: bash + run: | + sudo apt-get --fix-broken -y install + wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor \ + | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \ + sudo echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" \ + | sudo tee /etc/apt/sources.list.d/oneAPI.list && \ + sudo apt update && sudo apt-get -y install intel-oneapi-compiler-dpcpp-cpp-2025.0 + + env_before=$(env | sort) + source /opt/intel/oneapi/setvars.sh + env_after=$(env | sort) + changed_envvars=$(comm -13 <(echo "$env_before") <(echo "$env_after")) + while IFS= read -r line; do + echo "$line" >> $GITHUB_ENV + done <<< "$changed_envvars" diff --git a/devops/actions/setup_windows_oneapi_env/action.yml b/devops/actions/setup_windows_oneapi_env/action.yml new file mode 100644 index 0000000000000..7b7463f697f23 --- /dev/null +++ b/devops/actions/setup_windows_oneapi_env/action.yml @@ -0,0 +1,28 @@ +name: Windows setup oneAPI env + +runs: + using: "composite" + steps: + - name: Setup oneAPI env + shell: powershell + run: | + $batchFilePath = "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" + + $githubEnvFilePath = $env:GITHUB_ENV + + $envBefore = Get-ChildItem Env: | ForEach-Object { "$($_.Name)=$($_.Value)" } + + $envVars = & cmd.exe /c "call `"$batchFilePath`" && set" | Out-String + + $envAfter = $envVars -split "`r`n" | Where-Object { $_ -match "^(.*?)=(.*)$" } + + foreach ($envVar in $envAfter) { + if ($envVar -match "^(.*?)=(.*)$") { + $name = $matches[1] + $value = $matches[2] + $envBeforeVar = $envBefore | Where-Object { $_ -like "$name=*" } + if (-not $envBeforeVar -or $envBeforeVar -ne "$name=$value") { + Add-Content -Path $githubEnvFilePath -Value "$name=$value" + } + } + } diff --git a/devops/bandit.config b/devops/bandit.config new file mode 100644 index 0000000000000..4e501feef37ef --- /dev/null +++ b/devops/bandit.config @@ -0,0 +1,402 @@ + +### Bandit config file generated from: +# './bandit/bandit/cli/config_generator.py --out bandit.config' + +### This config may optionally select a subset of tests to run or skip by +### filling out the 'tests' and 'skips' lists given below. If no tests are +### specified for inclusion then it is assumed all tests are desired. The skips +### set will remove specific tests from the include set. This can be controlled +### using the -t/-s CLI options. Note that the same test ID should not appear +### in both 'tests' and 'skips', this would be nonsensical and is detected by +### Bandit at runtime. + +# Available tests: +# B101 : assert_used +# B102 : exec_used +# B103 : set_bad_file_permissions +# B104 : hardcoded_bind_all_interfaces +# B105 : hardcoded_password_string +# B106 : hardcoded_password_funcarg +# B107 : hardcoded_password_default +# B108 : hardcoded_tmp_directory +# B110 : try_except_pass +# B112 : try_except_continue +# B201 : flask_debug_true +# B301 : pickle +# B302 : marshal +# B303 : md5 +# B304 : ciphers +# B305 : cipher_modes +# B306 : mktemp_q +# B307 : eval +# B308 : mark_safe +# B310 : urllib_urlopen +# B311 : random +# B312 : telnetlib +# B313 : xml_bad_cElementTree +# B314 : xml_bad_ElementTree +# B315 : xml_bad_expatreader +# B316 : xml_bad_expatbuilder +# B317 : xml_bad_sax +# B318 : xml_bad_minidom +# B319 : xml_bad_pulldom +# B321 : ftplib +# B323 : unverified_context +# B324 : hashlib_new_insecure_functions +# B401 : import_telnetlib +# B402 : import_ftplib +# B403 : import_pickle +# B404 : import_subprocess +# B405 : import_xml_etree +# B406 : import_xml_sax +# B407 : import_xml_expat +# B408 : import_xml_minidom +# B409 : import_xml_pulldom +# B411 : import_xmlrpclib +# B412 : import_httpoxy +# B413 : import_pycrypto +# B501 : request_with_no_cert_validation +# B502 : ssl_with_bad_version +# B503 : ssl_with_bad_defaults +# B504 : ssl_with_no_version +# B505 : weak_cryptographic_key +# B506 : yaml_load +# B507 : ssh_no_host_key_verification +# B601 : paramiko_calls +# B602 : subprocess_popen_with_shell_equals_true +# B603 : subprocess_without_shell_equals_true +# B604 : any_other_function_with_shell_equals_true +# B605 : start_process_with_a_shell +# B606 : start_process_with_no_shell +# B607 : start_process_with_partial_path +# B608 : hardcoded_sql_expressions +# B609 : linux_commands_wildcard_injection +# B610 : django_extra_used +# B611 : django_rawsql_used +# B701 : jinja2_autoescape_false +# B702 : use_of_mako_templates +# B703 : django_mark_safe + +# (optional) list included test IDs here, eg '[B101, B406]': +# IPAS Required Checkers. Do not disable these +# Additional checkers may be added if desired +tests: + [ 'B301', 'B302', 'B303', 'B304', 'B305', 'B306', 'B308', 'B310', 'B311', 'B312', 'B313', 'B314', 'B315', 'B316', 'B317', 'B318', 'B319', 'B321', 'B323', 'B324', 'B401', 'B402', 'B403', 'B404', 'B405', 'B406', 'B407', 'B408', 'B409', 'B411', 'B412', 'B413'] + +# (optional) list skipped test IDs here, eg '[B101, B406]': +# The following checkers are not required but be added to tests list if desired +skips: + [ 'B101', 'B102', 'B103', 'B104', 'B105', 'B106', 'B107', 'B108', 'B110', 'B112', 'B201', 'B501', 'B502', 'B503', 'B504', 'B505', 'B506', 'B507', 'B601', 'B602', 'B603', 'B604', 'B605', 'B606', 'B607', 'B608', 'B609', 'B610', 'B611', 'B701', 'B702', 'B703'] + +# Exclude projects that are not part of SYCL (we don't build them / run any content) +exclude_dirs: ['bolt', 'cross-project-tests', 'libc/', 'lldb', 'mlir', 'openmp', 'polly', 'pstl', 'third-party'] + +### (optional) plugin settings - some test plugins require configuration data +### that may be given here, per-plugin. All bandit test plugins have a built in +### set of sensible defaults and these will be used if no configuration is +### provided. It is not necessary to provide settings for every (or any) plugin +### if the defaults are acceptable. + +any_other_function_with_shell_equals_true: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +assert_used: + skips: [] +hardcoded_tmp_directory: + tmp_dirs: + - /tmp + - /var/tmp + - /dev/shm +linux_commands_wildcard_injection: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +ssl_with_bad_defaults: + bad_protocol_versions: + - PROTOCOL_SSLv2 + - SSLv2_METHOD + - SSLv23_METHOD + - PROTOCOL_SSLv3 + - PROTOCOL_TLSv1 + - SSLv3_METHOD + - TLSv1_METHOD +ssl_with_bad_version: + bad_protocol_versions: + - PROTOCOL_SSLv2 + - SSLv2_METHOD + - SSLv23_METHOD + - PROTOCOL_SSLv3 + - PROTOCOL_TLSv1 + - SSLv3_METHOD + - TLSv1_METHOD +start_process_with_a_shell: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +start_process_with_no_shell: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +start_process_with_partial_path: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +subprocess_popen_with_shell_equals_true: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +subprocess_without_shell_equals_true: + no_shell: + - os.execl + - os.execle + - os.execlp + - os.execlpe + - os.execv + - os.execve + - os.execvp + - os.execvpe + - os.spawnl + - os.spawnle + - os.spawnlp + - os.spawnlpe + - os.spawnv + - os.spawnve + - os.spawnvp + - os.spawnvpe + - os.startfile + shell: + - os.system + - os.popen + - os.popen2 + - os.popen3 + - os.popen4 + - popen2.popen2 + - popen2.popen3 + - popen2.popen4 + - popen2.Popen3 + - popen2.Popen4 + - commands.getoutput + - commands.getstatusoutput + subprocess: + - subprocess.Popen + - subprocess.call + - subprocess.check_call + - subprocess.check_output + - subprocess.run +try_except_continue: + check_typed_exception: false +try_except_pass: + check_typed_exception: false +weak_cryptographic_key: + weak_key_size_dsa_high: 1024 + weak_key_size_dsa_medium: 2048 + weak_key_size_ec_high: 160 + weak_key_size_ec_medium: 224 + weak_key_size_rsa_high: 1024 + weak_key_size_rsa_medium: 2048 + diff --git a/devops/benchmarking/config.ini b/devops/benchmarking/config.ini new file mode 100644 index 0000000000000..988d1d9f08af9 --- /dev/null +++ b/devops/benchmarking/config.ini @@ -0,0 +1,44 @@ +; +; This file contains configuration options to change the behaviour of the +; benchmarking workflow in sycl-linux-run-tests.yml. +; +; DO NOT USE THE CONTENTS OF THIS FILE DIRECTLY -- Due to security concerns, The +; contents of this file must be sanitized first before use. +; See: /devops/scripts/benchmarking/common.py +; + +; Compute-benchmark compile/run options +[compute_bench] +; Value for -j during compilation of compute-benchmarks +compile_jobs = 40 +; Number of iterations to run compute-benchmark tests +iterations = 5000 + +; Options for benchmark result metrics (to record/compare against) +[metrics] +; Sets the metrics to record/aggregate in the historical average. +; Format: comma-separated list of column names in compute-benchmark results +recorded = Median,StdDev +; Sets the tolerance for each recorded metric and their allowed deviation from +; the historical average. Metrics not included here are not compared against +; when passing/failing benchmark results. +; Format: comma-separated list of : +tolerances = Median:0.08 + +; Options for computing historical averages +[average] +; Number of days (from today) to look back for results when computing historical +; average +cutoff_range = 7 +; Minimum number of samples required to compute a historical average +min_threshold = 10 + +; ONEAPI_DEVICE_SELECTOR linting/options +[device_selector] +; Backends to allow in device_selector +enabled_backends = level_zero,opencl,cuda,hip +; native_cpu is disabled + +; Devices to allow in device_selector +enabled_devices = cpu,gpu +; fpga is disabled diff --git a/devops/benchmarking/constants.ini b/devops/benchmarking/constants.ini new file mode 100644 index 0000000000000..9281ece8f4950 --- /dev/null +++ b/devops/benchmarking/constants.ini @@ -0,0 +1,48 @@ +; +; This file defines constants used throughout the benchmarking workflow in +; sycl-linux-run-tests.yml. If you're trying to change the behavior of this +; workflow, you're likely looking for /devops/benchmarking/config.ini instead. +; +; DO NOT USE THE CONTENTS OF THIS FILE DIRECTLY -- Due to security concerns, The +; contents of this file must be sanitized first before use. +; See: /devops/scripts/benchmarking/common.py +; + +; Constants for compute-benchmarks +[compute_bench] +git_repo = intel/compute-benchmarks +git_branch = master +git_commit = 230a3db4d8d03c0e9a663988f7c3abbd1137a1e0 +; path = ./compute-benchmarks + +; Constants for git repo storing benchmark performance results +[perf_res] +git_repo = intel/llvm-ci-perf-results +git_branch = main +; Path to clone performance result repo +; path = ./llvm-ci-perf-results + +; It was decided that paths should be hardcoded throughout this workflow for +; security reasons and ease of readability. Do not use paths as constants. + +; ; Constants for artifacts +; [artifact] +; ; Path to root folder storing benchmark CI artifact +; path = ./artifact +; ; Path (relative to artifact.path) to cache compute-benchmark results +; ; +; ; If a test result does not get moved out of this catch-all cache path, it is +; ; considered to have failed +; output_cache = ./artifact/failed_tests +; ; Path (relative to artifact.path) to cache passing compute-benchmark results +; passing_cache = ./artifact/passing_tests + +; [timestamp] +; ; Timestamp format used for +; format = %%Y%%m%%d_%%H%%M%%S + +; [benchmark_log] +; ; Log file for test cases that perform over the allowed variance +; slow = ./artifact/benchmarks_failed.log +; ; Log file for test cases that errored / failed to build +; error = ./artifact/benchmarks_errored.log diff --git a/devops/benchmarking/enabled_tests.conf b/devops/benchmarking/enabled_tests.conf new file mode 100644 index 0000000000000..20659cbea636d --- /dev/null +++ b/devops/benchmarking/enabled_tests.conf @@ -0,0 +1,8 @@ +# Test cases to be enabled: +api_overhead_benchmark_sycl +memory_benchmark_sycl +miscellaneous_benchmark_sycl +ulls_benchmark_sycl + +# As of January 2025, these are every compute-benchmark tests with a SYCL +# implementation. diff --git a/devops/containers/nightly.Dockerfile b/devops/containers/nightly.Dockerfile new file mode 100644 index 0000000000000..7639f676c1c95 --- /dev/null +++ b/devops/containers/nightly.Dockerfile @@ -0,0 +1,18 @@ +ARG base_tag=alldeps +ARG base_image=ghcr.io/intel/llvm/ubuntu2404_intel_drivers + +FROM $base_image:$base_tag + +USER root + +COPY scripts/drivers_entrypoint.sh /drivers_entrypoint.sh +RUN mkdir -p /opt/sycl +ADD sycl_linux.tar.gz /opt/sycl/ + +ENV PATH /opt/sycl/bin:$PATH +ENV LD_LIBRARY_PATH /opt/sycl/lib:$LD_LIBRARY_PATH + +USER sycl + +ENTRYPOINT ["/bin/bash", "/drivers_entrypoint.sh"] + diff --git a/devops/containers/ubuntu2204_base.Dockerfile b/devops/containers/ubuntu2204_base.Dockerfile index 07bb343cae93c..d08b6025e9908 100644 --- a/devops/containers/ubuntu2204_base.Dockerfile +++ b/devops/containers/ubuntu2204_base.Dockerfile @@ -8,25 +8,14 @@ USER root COPY scripts/install_build_tools.sh /install.sh RUN /install.sh -# By default Ubuntu sets an arbitrary UID value, that is different from host -# system. When CI passes default UID value of 1001, some of LLVM tools fail to -# discover user home directory and fail a few LIT tests. Fixes UID and GID to -# 1001, that is used as default by GitHub Actions. -RUN groupadd -g 1001 sycl && useradd sycl -u 1001 -g 1001 -m -s /bin/bash -# Add sycl user to video/irc groups so that it can access GPU -RUN usermod -aG video sycl -RUN usermod -aG irc sycl - -# group 109 is required for sycl user to access PVC card. -RUN groupadd -g 109 render -RUN usermod -aG render sycl - -# Allow sycl user to run as sudo -RUN echo "sycl ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers +COPY scripts/create-sycl-user.sh /user-setup.sh +RUN /user-setup.sh COPY actions/cached_checkout /actions/cached_checkout COPY actions/cleanup /actions/cleanup COPY scripts/docker_entrypoint.sh /docker_entrypoint.sh COPY scripts/install_drivers.sh /opt/install_drivers.sh +USER sycl + ENTRYPOINT ["/docker_entrypoint.sh"] diff --git a/devops/containers/ubuntu2204_build.Dockerfile b/devops/containers/ubuntu2204_build.Dockerfile index 313b455dbc25b..1aa814aaa775c 100644 --- a/devops/containers/ubuntu2204_build.Dockerfile +++ b/devops/containers/ubuntu2204_build.Dockerfile @@ -24,23 +24,18 @@ gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null && \ # Add rocm repo echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/6.1.1 jammy main" \ | tee --append /etc/apt/sources.list.d/rocm.list && \ -printf 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | tee /etc/apt/preferences.d/rocm-pin-600 && \ -apt update +printf 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | tee /etc/apt/preferences.d/rocm-pin-600 # Install the kernel driver -RUN apt install -yqq rocm-dev && \ +RUN apt update && apt install -yqq rocm-dev && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -# By default Ubuntu sets an arbitrary UID value, that is different from host -# system. When CI passes default UID value of 1001, some of LLVM tools fail to -# discover user home directory and fail a few LIT tests. Fixes UID and GID to -# 1001, that is used as default by GitHub Actions. -RUN groupadd -g 1001 sycl && useradd sycl -u 1001 -g 1001 -m -s /bin/bash -# Add sycl user to video/irc groups so that it can access GPU -RUN usermod -aG video sycl -RUN usermod -aG irc sycl +COPY scripts/create-sycl-user.sh /user-setup.sh +RUN /user-setup.sh COPY scripts/docker_entrypoint.sh /docker_entrypoint.sh +USER sycl + ENTRYPOINT ["/docker_entrypoint.sh"] diff --git a/devops/containers/ubuntu2204_intel_drivers.Dockerfile b/devops/containers/ubuntu2204_intel_drivers.Dockerfile index fb018f195a48f..b27aee1b633b0 100644 --- a/devops/containers/ubuntu2204_intel_drivers.Dockerfile +++ b/devops/containers/ubuntu2204_intel_drivers.Dockerfile @@ -5,7 +5,9 @@ FROM $base_image:$base_tag ENV DEBIAN_FRONTEND=noninteractive -ARG use_latest=true +ARG use_unstable_driver=true + +USER root RUN apt update && apt install -yqq wget @@ -16,7 +18,7 @@ COPY dependencies.json / RUN mkdir /runtimes ENV INSTALL_LOCATION=/runtimes RUN --mount=type=secret,id=github_token \ - if [ "$use_latest" = "true" ]; then \ + if [ "$use_unstable_driver" = "true" ]; then \ install_driver_opt=" --use-latest"; \ else \ install_driver_opt=" dependencies.json"; \ @@ -25,5 +27,7 @@ RUN --mount=type=secret,id=github_token \ COPY scripts/drivers_entrypoint.sh /drivers_entrypoint.sh +USER sycl + ENTRYPOINT ["/bin/bash", "/drivers_entrypoint.sh"] diff --git a/devops/containers/ubuntu2404_base.Dockerfile b/devops/containers/ubuntu2404_base.Dockerfile new file mode 100644 index 0000000000000..3cdad5b74366e --- /dev/null +++ b/devops/containers/ubuntu2404_base.Dockerfile @@ -0,0 +1,28 @@ +FROM ubuntu:24.04 + +ENV DEBIAN_FRONTEND=noninteractive + +USER root + +# Install SYCL prerequisites +COPY scripts/install_build_tools.sh /install.sh +RUN /install.sh + +# libzstd-dev installed by default on Ubuntu 24.04 is not compiled with -fPIC flag. +# This causes linking errors when building SYCL runtime. +# Bug: https://github.com/intel/llvm/issues/15935 +# Workaround: build zstd from sources with -fPIC flag. +COPY scripts/build_zstd_1_5_6_ub24.sh /build_zstd_1_5_6_ub24.sh +RUN /build_zstd_1_5_6_ub24.sh + +COPY scripts/create-sycl-user.sh /user-setup.sh +RUN /user-setup.sh + +COPY actions/cached_checkout /actions/cached_checkout +COPY actions/cleanup /actions/cleanup +COPY scripts/docker_entrypoint.sh /docker_entrypoint.sh +COPY scripts/install_drivers.sh /opt/install_drivers.sh + +USER sycl + +ENTRYPOINT ["/docker_entrypoint.sh"] diff --git a/devops/containers/ubuntu2404_build.Dockerfile b/devops/containers/ubuntu2404_build.Dockerfile new file mode 100644 index 0000000000000..c659eabbced51 --- /dev/null +++ b/devops/containers/ubuntu2404_build.Dockerfile @@ -0,0 +1,49 @@ +FROM nvidia/cuda:12.6.3-devel-ubuntu24.04 + +ENV DEBIAN_FRONTEND=noninteractive + +USER root + +# Install SYCL prerequisites +COPY scripts/install_build_tools.sh /install.sh +RUN /install.sh + +# libzstd-dev installed by default on Ubuntu 24.04 is not compiled with -fPIC flag. +# This causes linking errors when building SYCL runtime. +# Bug: https://github.com/intel/llvm/issues/15935 +# Workaround: build zstd from sources with -fPIC flag. +COPY scripts/build_zstd_1_5_6_ub24.sh /build_zstd_1_5_6_ub24.sh +RUN /build_zstd_1_5_6_ub24.sh + +SHELL ["/bin/bash", "-ec"] + +# Make the directory if it doesn't exist yet. +# This location is recommended by the distribution maintainers. +RUN mkdir --parents --mode=0755 /etc/apt/keyrings +# Download the key, convert the signing-key to a full +# keyring required by apt and store in the keyring directory +RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | \ +gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null && \ +# Add rocm repo +echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/amdgpu/6.3/ubuntu noble main" \ + | tee /etc/apt/sources.list.d/amdgpu.list && \ +echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/6.3 noble main" \ + | tee --append /etc/apt/sources.list.d/rocm.list && \ +echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' \ + | tee /etc/apt/preferences.d/rocm-pin-600 && \ +echo -e 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' \ + | tee /etc/apt/preferences.d/rocm-pin-600 +# Install the ROCM kernel driver +RUN apt update && apt install -yqq rocm-dev && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY scripts/create-sycl-user.sh /user-setup.sh +RUN /user-setup.sh + +COPY scripts/docker_entrypoint.sh /docker_entrypoint.sh + +USER sycl + +ENTRYPOINT ["/docker_entrypoint.sh"] + diff --git a/devops/containers/ubuntu2404_intel_drivers.Dockerfile b/devops/containers/ubuntu2404_intel_drivers.Dockerfile new file mode 100644 index 0000000000000..a0970f3900141 --- /dev/null +++ b/devops/containers/ubuntu2404_intel_drivers.Dockerfile @@ -0,0 +1,33 @@ +ARG base_tag=latest +ARG base_image=ghcr.io/intel/llvm/ubuntu2404_base + +FROM $base_image:$base_tag + +ENV DEBIAN_FRONTEND=noninteractive + +ARG use_unstable_driver=true + +USER root + +RUN apt update && apt install -yqq wget + +COPY scripts/get_release.py / +COPY scripts/install_drivers.sh / +COPY dependencies.json / + +RUN mkdir /runtimes +ENV INSTALL_LOCATION=/runtimes +RUN --mount=type=secret,id=github_token \ + if [ "$use_unstable_driver" = "true" ]; then \ + install_driver_opt=" --use-latest"; \ + else \ + install_driver_opt=" dependencies.json"; \ + fi && \ + GITHUB_TOKEN=$(cat /run/secrets/github_token) /install_drivers.sh $install_driver_opt --all + +COPY scripts/drivers_entrypoint.sh /drivers_entrypoint.sh + +USER sycl + +ENTRYPOINT ["/bin/bash", "/drivers_entrypoint.sh"] + diff --git a/devops/containers/ubuntu2404_intel_drivers_igc_dev.Dockerfile b/devops/containers/ubuntu2404_intel_drivers_igc_dev.Dockerfile new file mode 100644 index 0000000000000..25cb0ff9819ed --- /dev/null +++ b/devops/containers/ubuntu2404_intel_drivers_igc_dev.Dockerfile @@ -0,0 +1,28 @@ +ARG base_tag=latest +ARG base_image=ghcr.io/intel/llvm/ubuntu2404_base + +FROM $base_image:$base_tag + +ENV DEBIAN_FRONTEND=noninteractive + +USER root + +RUN apt update && apt install -yqq libllvm14 + +COPY scripts/get_release.py / +COPY scripts/install_drivers.sh / +COPY dependencies.json / +COPY dependencies-igc-dev.json / + +RUN mkdir /runtimes +ENV INSTALL_LOCATION=/runtimes +RUN --mount=type=secret,id=github_token \ + install_driver_opt="dependencies.json dependencies-igc-dev.json --use-dev-igc"; \ + GITHUB_TOKEN=$(cat /run/secrets/github_token) /install_drivers.sh $install_driver_opt --all + +COPY scripts/drivers_entrypoint.sh /drivers_entrypoint.sh + +USER sycl + +ENTRYPOINT ["/bin/bash", "/drivers_entrypoint.sh"] + diff --git a/devops/dependencies-igc-dev.json b/devops/dependencies-igc-dev.json index 37cb1ee1d97f7..0f1ce44811665 100644 --- a/devops/dependencies-igc-dev.json +++ b/devops/dependencies-igc-dev.json @@ -1,10 +1,10 @@ { "linux": { "igc_dev": { - "github_tag": "igc-dev-7dad678", - "version": "7dad678", - "updated_at": "2024-11-24T10:48:51Z", - "url": "https://api.github.com/repos/intel/intel-graphics-compiler/actions/artifacts/2229466354/zip", + "github_tag": "igc-dev-a9e1ef2", + "version": "a9e1ef2", + "updated_at": "2025-03-09T09:38:44Z", + "url": "https://api.github.com/repos/intel/intel-graphics-compiler/actions/artifacts/2717684926/zip", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" } } diff --git a/devops/dependencies.json b/devops/dependencies.json index 755a1f10625fb..e2eaab57ae049 100644 --- a/devops/dependencies.json +++ b/devops/dependencies.json @@ -1,15 +1,15 @@ { "linux": { "compute_runtime": { - "github_tag": "24.39.31294.12", - "version": "24.39.31294.12", - "url": "https://github.com/intel/compute-runtime/releases/tag/24.39.31294.12", + "github_tag": "25.09.32961.5", + "version": "25.09.32961.5", + "url": "https://github.com/intel/compute-runtime/releases/tag/25.09.32961.5", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "igc": { - "github_tag": "igc-1.0.17791.9", - "version": "1.0.17791.9", - "url": "https://github.com/intel/intel-graphics-compiler/releases/tag/igc-1.0.17791.9", + "github_tag": "v2.8.3", + "version": "v2.8.3", + "url": "https://github.com/intel/intel-graphics-compiler/releases/tag/v2.8.3", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "cm": { @@ -19,9 +19,9 @@ "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "level_zero": { - "github_tag": "v1.18.5", - "version": "v1.18.5", - "url": "https://github.com/oneapi-src/level-zero/releases/tag/v1.18.5", + "github_tag": "v1.20.2", + "version": "v1.20.2", + "url": "https://github.com/oneapi-src/level-zero/releases/tag/v1.20.2", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "tbb": { diff --git a/devops/scripts/benchmarking/aggregate.py b/devops/scripts/benchmarking/aggregate.py new file mode 100644 index 0000000000000..f62a8ffed83c5 --- /dev/null +++ b/devops/scripts/benchmarking/aggregate.py @@ -0,0 +1,205 @@ +import csv +import sys +from pathlib import Path +import heapq +import statistics +from common import Validate, SanitizedConfig +from abc import ABC, abstractmethod +import os + + +class Aggregator(ABC): + """ + Aggregator classes used to "aggregate" a pool of elements, and produce an + "average" (precisely, some "measure of central tendency") from the elements. + """ + + @staticmethod + @abstractmethod + def get_type() -> str: + """ + Return a string indicating the type of average this aggregator + produces. + """ + pass + + @abstractmethod + def add(self, n: float): + """ + Add/aggregate an element to the pool of elements used by this aggregator + to produce an average calculation. + """ + pass + + @abstractmethod + def get_avg(self) -> float: + """ + Produce an average from the pool of elements aggregated using add(). + """ + pass + + +class SimpleMedian(Aggregator): + """ + Simple median calculation: if the number of samples being generated are low, + this is the fastest median method. + """ + + def __init__(self): + self.elements = [] + + @staticmethod + def get_type() -> str: + return "median" + + def add(self, n: float): + self.elements.append(n) + + def get_avg(self) -> float: + return statistics.median(self.elements) + + +class StreamingMedian(Aggregator): + """ + Calculate medians incrementally using heaps: Theoretically the fastest way + to calculate a median from a stream of elements, but realistically is only + faster when dealing with huge numbers of samples that would be generated by + i.e. enabling this workflow in precommit and using longer periods of time. + """ + + def __init__(self): + # Gist: we keep a minheap and a maxheap, and store the median as the top + # of the minheap. When a new element comes it gets put into the heap + # based on if the element is bigger than the current median. Then, the + # heaps are heapified and the median is repopulated by heapify. + self.minheap_larger = [] + self.maxheap_smaller = [] + + @staticmethod + def get_type() -> str: + return "median" + + # Note: numbers on maxheap should be negative, as heapq + # is minheap by default + + def add(self, n: float): + if len(self.maxheap_smaller) == 0 or -self.maxheap_smaller[0] >= n: + heapq.heappush(self.maxheap_smaller, -n) + else: + heapq.heappush(self.minheap_larger, n) + + # Ensure minheap has more elements than maxheap + if len(self.maxheap_smaller) > len(self.minheap_larger) + 1: + heapq.heappush(self.minheap_larger, -heapq.heappop(self.maxheap_smaller)) + elif len(self.maxheap_smaller) < len(self.minheap_larger): + heapq.heappush(self.maxheap_smaller, -heapq.heappop(self.minheap_larger)) + + def get_avg(self) -> float: + if len(self.maxheap_smaller) == len(self.minheap_larger): + # Equal number of elements smaller and larger than "median": + # thus, there are two median values. The median would then become + # the average of both median values. + return (-self.maxheap_smaller[0] + self.minheap_larger[0]) / 2.0 + else: + # Otherwise, median is always in minheap, as minheap is always + # bigger + return -self.maxheap_smaller[0] + + +class Aggregate: + """ + Static class providing methods for aggregating data + """ + + @staticmethod + def hist_avg( + benchmark_name: str, res_dir: str, cutoff: str, aggregator=SimpleMedian + ): + if not os.path.isdir(res_dir): + print(f"Not a directory: {res_dir}.", file=sys.stderr) + exit(1) + + def get_csv_samples() -> list[str]: + """Get all valid .csv samples from the results folder.""" + cache_dir = Path(f"{res_dir}") + # Filter all benchmark .csv files in the result directory: + return list( + filter( + # Make sure the .csv "file" is a file: + lambda f: f.is_file() + # Make sure timestamp of .csv file is good format: + # [-19:-4] corresponds to the timestamp in the filename. + and Validate.timestamp(str(f)[-19:-4]) + # Make sure timestamp is bigger than cutoff timestamp: + and str(f)[-19:-4] > cutoff, + cache_dir.glob(f"{benchmark_name}-*_*.csv"), + ) + ) + + # Calculate median of every desired metric: + samples_aggregate = dict() + filtered_samples = get_csv_samples() + if len(filtered_samples) == 0: + print( + f"WARNING: No results for {benchmark_name} found from {cutoff} to now", + file=sys.stderr, + ) + for sample_path in filtered_samples: + with open(sample_path, "r") as sample_file: + for sample in csv.DictReader(sample_file): + test = sample["TestCase"] + # Construct entry in aggregator for test if it doesn't exist + # already: + if test not in samples_aggregate: + samples_aggregate[test] = { + metric: aggregator() + for metric in SanitizedConfig.METRICS_TOLERANCES + } + + # For each metric of concern, add to aggregator: + for metric in SanitizedConfig.METRICS_TOLERANCES: + sample_value = Validate.sanitize_stat(sample[metric]) + if not isinstance(sample_value, float): + print( + f"Malformatted statistic in {str(sample_path)}: " + + f"'{sample[metric]}' for {test}." + ) + exit(1) + # Add metric from sample for current test to aggregate: + samples_aggregate[test][metric].add(sample_value) + + # Calculate + write new average (from samples_aggregate) in new .csv file: + with open( + f"{res_dir}/{benchmark_name}-{aggregator.get_type()}.csv", "w" + ) as output_csv: + writer = csv.DictWriter( + output_csv, + fieldnames=["TestCase", *SanitizedConfig.METRICS_TOLERANCES.keys()], + ) + writer.writeheader() + for test in samples_aggregate: + writer.writerow( + {"TestCase": test} + | { + metric: samples_aggregate[test][metric].get_avg() + for metric in SanitizedConfig.METRICS_TOLERANCES + } + ) + + +if __name__ == "__main__": + if len(sys.argv) != 5: + print( + f"Usage: {sys.argv[0]} " + ) + exit(1) + if not Validate.timestamp(sys.argv[4]): + print(f"Bad cutoff timestamp, please use YYYYMMDD_HHMMSS.", file=sys.stderr) + exit(1) + if not Validate.filepath(sys.argv[1]): + print(f"Not a valid filepath: {sys.argv[1]}", file=sys.stderr) + exit(1) + # If the filepath provided passed filepath validation, then it is clean + SanitizedConfig.load(sys.argv[1]) + + Aggregate.hist_avg(sys.argv[2], sys.argv[3], sys.argv[4]) diff --git a/devops/scripts/benchmarking/benchmark.sh b/devops/scripts/benchmarking/benchmark.sh new file mode 100755 index 0000000000000..bbfd669774f9a --- /dev/null +++ b/devops/scripts/benchmarking/benchmark.sh @@ -0,0 +1,300 @@ +#!/bin/sh + +# +# benchmark.sh: Benchmark dpcpp using compute-benchmarks +# + +usage () { + >&2 echo "Usage: $0 -t [-B ] + -n Github runner name -- Required + -c Clean up working directory + -C Clean up working directory and exit + -s Cache results + +This script builds and runs benchmarks from compute-benchmarks." + exit 1 +} + +# Ensures test cases read from enabled_tests.conf contains no malicious content +_validate_testname () { + if [ -n "$(printf "%s" "$1" | sed "s/[a-zA-Z_]*//g")" ]; then + echo "Illegal characters in $TEST_CONFIG. Permitted characters: a-zA-Z_" + exit 1 + fi +} + +clone_perf_res() { + echo "### Cloning llvm-ci-perf-results ($SANITIZED_PERF_RES_GIT_REPO:$SANITIZED_PERF_RES_GIT_BRANCH) ###" + git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" ./llvm-ci-perf-results + [ "$?" -ne 0 ] && exit "$?" +} + +clone_compute_bench() { + echo "### Cloning compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO:$SANITIZED_COMPUTE_BENCH_GIT_BRANCH) ###" + git clone -b "$SANITIZED_COMPUTE_BENCH_GIT_BRANCH" \ + --recurse-submodules "https://github.com/$SANITIZED_COMPUTE_BENCH_GIT_REPO" \ + ./compute-benchmarks + if [ ! -d "./compute-benchmarks" ]; then + echo "Failed to clone compute-benchmarks." + exit 1 + elif [ -n "$SANITIZED_COMPUTE_BENCH_GIT_COMMIT" ]; then + cd ./compute-benchmarks + git checkout "$SANITIZED_COMPUTE_BENCH_GIT_COMMIT" + if [ "$?" -ne 0 ]; then + echo "Failed to get compute-benchmarks commit '$SANITIZED_COMPUTE_BENCH_GIT_COMMIT'." + exit 1 + fi + cd - + fi +} + +build_compute_bench() { + echo "### Building compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO:$SANITIZED_COMPUTE_BENCH_GIT_BRANCH) ###" + mkdir ./compute-benchmarks/build && cd ./compute-benchmarks/build && + # No reason to turn on ccache, if this docker image will be disassembled later on + cmake .. -DBUILD_SYCL=ON -DBUILD_L0=OFF -DBUILD=OCL=OFF -DCCACHE_ALLOWED=FALSE + # TODO enable mechanism for opting into L0 and OCL -- the concept is to + # subtract OCL/L0 times from SYCL times in hopes of deriving SYCL runtime + # overhead, but this is mostly an idea that needs to be mulled upon. + + if [ "$?" -eq 0 ]; then + while IFS= read -r case; do + # Skip lines starting with '#' + [ "${case##\#*}" ] || continue + + _validate_testname "$case" + make "-j$SANITIZED_COMPUTE_BENCH_COMPILE_JOBS" "$case" + done < "$TESTS_CONFIG" + fi + cd - +} + +# Check if the number of samples for a given test case is less than a threshold +# set in benchmark-ci.conf +# +# Usage: +samples_under_threshold () { + # Directory doesn't exist, samples automatically under threshold + [ ! -d "./llvm-ci-perf-results/$1" ] && return 0 + file_count="$(find "./llvm-ci-perf-results/$1" -maxdepth 1 -type f | wc -l )" + [ "$file_count" -lt "$SANITIZED_AVERAGE_MIN_THRESHOLD" ] +} + +# Check for a regression via compare.py +# +# Usage: check_regression +check_regression() { + csv_relpath="$(dirname "$1")" + csv_name="$(basename "$1")" + if samples_under_threshold "$csv_relpath"; then + echo "Not enough samples to construct a good average, performance\ + check skipped!" + return 0 # Success status + fi + python "$DEVOPS_PATH/scripts/benchmarking/compare.py" \ + "$DEVOPS_PATH" "$csv_relpath" "$csv_name" + return $? +} + +# Move the results of our benchmark into the git repo, and save benchmark +# results to artifact archive +# +# Usage: cache +cache() { + mkdir -p "$(dirname ./artifact/passing_tests/$1)" "$(dirname ./artifact/failed_tests/$1)" + cp "./artifact/failed_tests/$1" "./artifact/passing_tests/$1" + mkdir -p "$(dirname ./llvm-ci-perf-results/$1)" + mv "./artifact/failed_tests/$1" "./llvm-ci-perf-results/$1" +} + +# Check for a regression + cache if no regression found +# +# Usage: check_and_cache +check_and_cache() { + echo "Checking $1..." + if check_regression $1; then + if [ "$CACHE_RESULTS" -eq "1" ]; then + echo "Caching $1..." + cache $1 + fi + else + [ "$CACHE_RESULTS" -eq "1" ] && echo "Regression found -- Not caching!" + fi +} + +# Run and process the results of each enabled benchmark in enabled_tests.conf +process_benchmarks() { + echo "### Running and processing selected benchmarks ###" + if [ -z "$TESTS_CONFIG" ]; then + echo "Setting tests to run via cli is not currently supported." + exit 1 + else + rm ./artifact/benchmarks_errored.log ./artifact/benchmarks_failed.log 2> /dev/null + mkdir -p ./artifact + # Loop through each line of enabled_tests.conf, but ignore lines in the + # test config starting with #'s: + grep "^[^#]" "$TESTS_CONFIG" | while read -r testcase; do + _validate_testname "$testcase" + echo "# Running $testcase..." + + # The benchmark results git repo and this script's output both share + # the following directory structure: + # + # /// + # + # Instead of specifying 2 paths with a slightly different root + # folder name for every function we use, we can use a relative path + # to represent the file in both folders. + # + # Figure out the relative path of our testcase result: + test_dir_relpath="$DEVICE_SELECTOR_DIRNAME/$RUNNER/$testcase" + output_csv_relpath="$test_dir_relpath/$testcase-$TIMESTAMP.csv" + mkdir -p "./artifact/failed_tests/$test_dir_relpath" # Ensure directory exists + + # Tests are first placed in ./artifact/failed_tests, and are only + # moved to passing_tests or the performance results repo if the + # benchmark results are passing + output_csv="./artifact/failed_tests/$output_csv_relpath" + "./compute-benchmarks/build/bin/$testcase" --csv \ + --iterations="$SANITIZED_COMPUTE_BENCH_ITERATIONS" > "$output_csv" + + exit_status="$?" + if [ "$exit_status" -eq 0 ] && [ -s "$output_csv" ]; then + # Filter out header lines not in csv format: + tail +8 "$output_csv" > .tmp_res + mv .tmp_res "$output_csv" + check_and_cache $output_csv_relpath + else + echo "[ERROR] $testcase returned exit status $exit_status" + echo "-- $testcase: error $exit_status" >> ./artifact/benchmarks_errored.log + fi + done + fi +} + +# Handle failures + produce a report on what failed +process_results() { + fail=0 + if [ -s ./artifact/benchmarks_failed.log ]; then + printf "\n### Tests performing over acceptable range of average: ###\n" + cat ./artifact/benchmarks_failed.log + echo "" + fail=2 + fi + if [ -s ./artifact/benchmarks_errored.log ]; then + printf "\n### Tests that failed to run: ###\n" + cat ./artifact/benchmarks_errored.log + echo "" + fail=1 + fi + exit $fail +} + +cleanup() { + echo "### Cleaning up compute-benchmark builds from prior runs ###" + rm -rf ./compute-benchmarks + rm -rf ./llvm-ci-perf-results + [ ! -z "$_exit_after_cleanup" ] && exit +} + +load_configs() { + # This script needs to know where the intel/llvm "/devops" directory is, + # containing all the configuration files and the compare script. + # + # If this is not provided, this function tries to guess where the files + # are based on how the script is called, and verifies that all necessary + # configs and scripts are reachable. + + # This benchmarking script is usually at: + # + # /devops/scripts/benchmarking/benchmark.sh + # + # Derive /devops based on location of this script: + [ -z "$DEVOPS_PATH" ] && DEVOPS_PATH="$(dirname "$0")/../.." + if [ -z "$(printf '%s' "$DEVOPS_PATH" | grep -oE '^[a-zA-Z0-9._\/-]+$')" ]; then + echo "Bad DEVOPS_PATH, please specify DEVOPS_PATH variable." + exit 1 + fi + + TESTS_CONFIG="$(realpath "$DEVOPS_PATH/benchmarking/enabled_tests.conf")" + COMPARE_PATH="$(realpath "$DEVOPS_PATH/scripts/benchmarking/compare.py")" + LOAD_CONFIG_PY="$(realpath "$DEVOPS_PATH/scripts/benchmarking/load_config.py")" + + for file in \ + "$TESTS_CONFIG" "$COMPARE_PATH" "$LOAD_CONFIG_PY" + do + if [ ! -f "$file" ]; then + echo "Please provide path to /devops in DEVOPS_PATH." + exit -1 + fi + done + + $(python "$LOAD_CONFIG_PY" "$DEVOPS_PATH" config) + $(python "$LOAD_CONFIG_PY" "$DEVOPS_PATH" constants) +} + +##### + +load_configs + +COMPUTE_BENCH_COMPILE_FLAGS="" +CACHE_RESULTS="0" +# Timestamp format is YYYYMMDD_HHMMSS +TIMESTAMP="$(date +%Y%m%d_%H%M%S)" + +# CLI flags + overrides to configuration options: +while getopts "n:cCs" opt; do + case "$opt" in + n) + if [ -n "$(printf "%s" "$OPTARG" | sed "s/[a-zA-Z0-9_-]*//g")" ]; then + echo "Illegal characters in runner name." + exit 1 + fi + RUNNER="$OPTARG" + ;; + # Cleanup status is saved in a var to ensure all arguments are processed before + # performing cleanup + c) _cleanup=1 ;; + C) _cleanup=1 && _exit_after_cleanup=1 ;; + s) CACHE_RESULTS=1;; + \?) usage ;; + esac +done + +# Check all necessary variables exist: +if [ -z "$CMPLR_ROOT" ]; then + echo "Please set CMPLR_ROOT first; it is needed by compute-benchmarks to build." + exit 1 +elif [ -z "$ONEAPI_DEVICE_SELECTOR" ]; then + echo "Please set ONEAPI_DEVICE_SELECTOR first to specify which device to use." + exit 1 +elif [ -z "$RUNNER" ]; then + echo "Please specify runner name using -n first; it is needed for storing/comparing benchmark results." + exit 1 +fi + +# Make sure ONEAPI_DEVICE_SELECTOR doesn't try to enable multiple devices at the +# same time, or use specific device id's +_dev_sel_backend_re="$(echo "$SANITIZED_DEVICE_SELECTOR_ENABLED_BACKENDS" | sed 's/,/|/g')" +_dev_sel_device_re="$(echo "$SANITIZED_DEVICE_SELECTOR_ENABLED_DEVICES" | sed 's/,/|/g')" +_dev_sel_re="s/($_dev_sel_backend_re):($_dev_sel_device_re)//" +if [ -n "$(echo "$ONEAPI_DEVICE_SELECTOR" | sed -E "$_dev_sel_re")" ]; then + echo "Unsupported ONEAPI_DEVICE_SELECTOR value: please ensure only one \ +device is selected, and devices are not selected by indices." + echo "Enabled backends: $SANITIZED_DEVICE_SELECTOR_ENABLED_BACKENDS" + echo "Enabled device types: $SANITIZED_DEVICE_SELECTOR_ENABLED_DEVICES" + exit 1 +fi +# ONEAPI_DEVICE_SELECTOR values are not valid directory names in unix: this +# value lets us use ONEAPI_DEVICE_SELECTOR as actual directory names +DEVICE_SELECTOR_DIRNAME="$(echo "$ONEAPI_DEVICE_SELECTOR" | sed 's/:/-/')" + +# Clean up and delete all cached files if specified: +[ ! -z "$_cleanup" ] && cleanup +# Clone and build only if they aren't already cached/deleted: +[ ! -d ./llvm-ci-perf-results ] && clone_perf_res +[ ! -d ./compute-benchmarks ] && clone_compute_bench +[ ! -d ./compute-benchmarks/build ] && build_compute_bench +# Process benchmarks: +process_benchmarks +process_results \ No newline at end of file diff --git a/devops/scripts/benchmarking/common.py b/devops/scripts/benchmarking/common.py new file mode 100644 index 0000000000000..c400b686db90f --- /dev/null +++ b/devops/scripts/benchmarking/common.py @@ -0,0 +1,196 @@ +import re +import os +import sys +import string +import configparser + + +class Validate: + """Static class containing methods for validating various fields""" + + @staticmethod + def filepath(path: str) -> bool: + """ + Returns True if path is clean (no illegal characters), otherwise False. + """ + filepath_re = re.compile(r"[a-zA-Z0-9\/\._\-]+") + return filepath_re.match(path) is not None + + @staticmethod + def timestamp(t: str) -> bool: + """ + Returns True if t is in form YYYYMMDD_HHMMSS, otherwise False. + """ + timestamp_re = re.compile( + r"^\d{4}(0[1-9]|1[0-2])([0-2][0-9]|3[01])_([01][0-9]|2[0-3])[0-5][0-9][0-5][0-9]$" + ) + return timestamp_re.match(t) is not None + + @staticmethod + def sanitize_stat(stat: str) -> float: + """ + Sanitize statistics found in compute-benchmark output csv files. Returns + float if sanitized, None if not sanitizable. + """ + # Get rid of % + if stat[-1] == "%": + stat = stat[:-1] + + # Cast to float: If cast succeeds, the statistic is clean. + try: + return float(stat) + except ValueError: + return None + + +class SanitizedConfig: + """ + Static class for holding sanitized configuration values used within python. + + Configuration option names follow
_