From 976c0bcbc449db89588837b44805746e72d211ea Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Wed, 15 Oct 2025 22:19:06 +0800 Subject: [PATCH 1/2] [CI] fix ROCm CI --- .github/workflows/ci.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1782cedf3..279898147 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -90,7 +90,7 @@ jobs: name: self-hosted-amd # Format: [Nightly-]ROCm-.[.]. E.g., "ROCm-6.4" or "Nightly-ROCm-7.0". # Use "Nightly-" prefix to use torch nightly builds. - toolkit: Nightly-ROCm-7.0 + toolkit: ROCm-6.3 - tags: [macos-latest] name: macos-latest toolkit: Metal # or Nightly-Metal @@ -352,8 +352,6 @@ jobs: - name: Run ROCm tests with Python ${{ matrix.python-version }} (${{ matrix.runner.toolkit }}) id: rocm-tests if: contains(matrix.runner.toolkit, 'ROCm') - # FIXME: ROCm test incorrectly skips tests - continue-on-error: true run: | cd testing PYTEST=( @@ -362,7 +360,6 @@ jobs: ) "${PYTEST[@]}" --maxfail=3 --numprocesses=4 \ ./python/amd/test_tilelang_test_amd.py - echo "::error::ROCm tests are known to be skipped incorrectly due to ROCm TVM build issues." >&2 # Apple Metal tests - name: Run Metal tests with Python ${{ matrix.python-version }} (${{ matrix.runner.toolkit }}) From abcde0dc49e8af9a597a1c5586f36d3b0f376347 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Wed, 15 Oct 2025 23:08:10 +0800 Subject: [PATCH 2/2] feat: add a hook to error out on no test runs --- examples/conftest.py | 24 ++++++++++++++++++++++++ testing/conftest.py | 24 ++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/examples/conftest.py b/examples/conftest.py index 13f3cbd2a..9f49d40a9 100644 --- a/examples/conftest.py +++ b/examples/conftest.py @@ -1,5 +1,6 @@ import os import random +import pytest os.environ["PYTHONHASHSEED"] = "0" @@ -18,3 +19,26 @@ pass else: np.random.seed(0) + + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + """Ensure that at least one test is collected. Error out if all tests are skipped.""" + known_types = { + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", + } + if (sum( + len(terminalreporter.stats.get(k, [])) + for k in known_types.difference({"skipped", "deselected"})) == 0): + terminalreporter.write_sep( + "!", + (f"Error: No tests were collected. " + f"{dict(sorted((k, len(v)) for k, v in terminalreporter.stats.items()))}"), + ) + pytest.exit("No tests were collected.", returncode=5) diff --git a/testing/conftest.py b/testing/conftest.py index 13f3cbd2a..9f49d40a9 100644 --- a/testing/conftest.py +++ b/testing/conftest.py @@ -1,5 +1,6 @@ import os import random +import pytest os.environ["PYTHONHASHSEED"] = "0" @@ -18,3 +19,26 @@ pass else: np.random.seed(0) + + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + """Ensure that at least one test is collected. Error out if all tests are skipped.""" + known_types = { + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", + } + if (sum( + len(terminalreporter.stats.get(k, [])) + for k in known_types.difference({"skipped", "deselected"})) == 0): + terminalreporter.write_sep( + "!", + (f"Error: No tests were collected. " + f"{dict(sorted((k, len(v)) for k, v in terminalreporter.stats.items()))}"), + ) + pytest.exit("No tests were collected.", returncode=5)