diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index d82adda..8e536da 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -28,24 +28,24 @@ jobs: uses: rapidsai/shared-workflows/.github/workflows/conda-python-build.yaml@branch-24.10 with: build_type: pull-request - # Package is pure Python and only ever requires one build. - matrix_filter: 'map(select(.ARCH == "amd64" and (.LINUX_VER | test("centos")|not))) | sort_by(.PY_VER | split(".") | map(tonumber)) | [.[-1]]' + # This selects "ARCH=amd64 + the latest supported Python + CUDA". + matrix_filter: map(select(.ARCH == "amd64")) | max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]) | [.] conda-python-tests: needs: conda-python-build secrets: inherit uses: rapidsai/shared-workflows/.github/workflows/conda-python-tests.yaml@branch-24.10 with: build_type: pull-request - # Package is pure Python and only ever requires one build. - matrix_filter: 'map(select(.ARCH == "amd64" and (.LINUX_VER | test("centos")|not))) | sort_by(.PY_VER | split(".") | map(tonumber)) | [.[-1]]' + # This selects "ARCH=amd64 + the latest supported Python + CUDA". + matrix_filter: map(select(.ARCH == "amd64")) | max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]) | [.] wheel-build: needs: checks secrets: inherit uses: rapidsai/shared-workflows/.github/workflows/wheels-build.yaml@branch-24.10 with: build_type: pull-request - # Package is pure Python and only ever requires one build. - matrix_filter: 'map(select((.LINUX_VER | test("centos")|not))) | sort_by((.PY_VER | split(".") | map(tonumber))) | [.[-1] + {ARCH: "amd64"}]' + # This selects the latest supported Python + CUDA + matrix_filter: max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]) | [.] script: "ci/build_wheel.sh" wheel-tests: needs: wheel-build @@ -53,6 +53,6 @@ jobs: uses: rapidsai/shared-workflows/.github/workflows/wheels-test.yaml@branch-24.10 with: build_type: pull-request - # Package is pure Python and only ever requires one build. - matrix_filter: 'map(select(.ARCH == "amd64" and (.LINUX_VER | test("centos")|not))) | sort_by(.PY_VER | split(".") | map(tonumber)) | [.[-1]]' - script: "ci/test_wheel.sh" \ No newline at end of file + # This selects the latest supported Python + CUDA + matrix_filter: max_by([(.PY_VER|split(".")|map(tonumber)), (.CUDA_VER|split(".")|map(tonumber))]) | [.] + script: "ci/test_wheel.sh" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b37a358..45227a8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,20 +2,20 @@ repos: - repo: https://github.com/psf/black - rev: 23.11.0 + rev: 24.8.0 hooks: - id: black files: jupyterlab_nvdashboard/.* # Explicitly specify the pyproject.toml at the repo root, not per-project. args: ['--config', 'pyproject.toml'] - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 + rev: 7.1.1 hooks: - id: flake8 args: ['--config=.flake8'] files: jupyterlab_nvdashboard/.*$ - repo: https://github.com/rapidsai/dependency-file-generator - rev: v1.13.11 + rev: v1.14.0 hooks: - id: rapids-dependency-file-generator args: ['--clean'] diff --git a/ci/build_wheel.sh b/ci/build_wheel.sh index 5a765f1..773e6c3 100755 --- a/ci/build_wheel.sh +++ b/ci/build_wheel.sh @@ -40,4 +40,4 @@ python -m build -s -w rapids-logger "Uploading JupyterLab NVDashboard wheels to S3" # Upload Python wheels to S3 -RAPIDS_PY_WHEEL_NAME="${package_name}" rapids-upload-wheels-to-s3 dist +RAPIDS_PY_WHEEL_NAME="${package_name}" RAPIDS_PY_WHEEL_PURE="1" rapids-upload-wheels-to-s3 dist diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index 8497bd5..e9bc4fc 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -7,7 +7,7 @@ set -eou pipefail package_name="jupyterlab-nvdashboard" rapids-logger "Downloading artifacts from previous jobs" -RAPIDS_PY_WHEEL_NAME="${package_name}" rapids-download-wheels-from-s3 ./dist +RAPIDS_PY_WHEEL_NAME="${package_name}" RAPIDS_PY_WHEEL_PURE="1" rapids-download-wheels-from-s3 ./dist # echo to expand wildcard before adding `[extra]` required for pip python -m pip install $(echo ./dist/jupyterlab_nvdashboard*.whl)[test] diff --git a/jupyterlab_nvdashboard/apps/gpu.py b/jupyterlab_nvdashboard/apps/gpu.py index 6ba8eee..f8db278 100644 --- a/jupyterlab_nvdashboard/apps/gpu.py +++ b/jupyterlab_nvdashboard/apps/gpu.py @@ -26,9 +26,7 @@ # Maximum bandwidth is bidirectional, divide by 2 for separate RX & TX max_bw = ( - max( - sum(i.value.ullVal for i in bw) * 1024**2 for bw in bandwidth - ) + max(sum(i.value.ullVal for i in bw) * 1024**2 for bw in bandwidth) / 2 ) except (IndexError, pynvml.nvml.NVMLError_NotSupported): diff --git a/jupyterlab_nvdashboard/tests/test_cpu_handlers.py b/jupyterlab_nvdashboard/tests/test_cpu_handlers.py index a67f068..f967453 100644 --- a/jupyterlab_nvdashboard/tests/test_cpu_handlers.py +++ b/jupyterlab_nvdashboard/tests/test_cpu_handlers.py @@ -17,9 +17,10 @@ def mock_handler(monkeypatch): @pytest.fixture def handler_args(): - with patch("tornado.web.Application") as mock_application, patch( - "tornado.httputil.HTTPServerRequest" - ) as mock_request: + with ( + patch("tornado.web.Application") as mock_application, + patch("tornado.httputil.HTTPServerRequest") as mock_request, + ): # Mock the settings to return appropriate values mock_settings = { "base_url": "/", diff --git a/jupyterlab_nvdashboard/tests/test_gpu_handlers.py b/jupyterlab_nvdashboard/tests/test_gpu_handlers.py index 97752a0..7e997ca 100644 --- a/jupyterlab_nvdashboard/tests/test_gpu_handlers.py +++ b/jupyterlab_nvdashboard/tests/test_gpu_handlers.py @@ -23,9 +23,10 @@ def mock_handler(monkeypatch): @pytest.fixture def handler_args(): - with patch("tornado.web.Application") as mock_application, patch( - "tornado.httputil.HTTPServerRequest" - ) as mock_request: + with ( + patch("tornado.web.Application") as mock_application, + patch("tornado.httputil.HTTPServerRequest") as mock_request, + ): # Mock the settings to return appropriate values mock_settings = { "base_url": "/",