diff --git a/context/notebooks.sh b/context/notebooks.sh index ca7462bb..dc982828 100755 --- a/context/notebooks.sh +++ b/context/notebooks.sh @@ -9,7 +9,13 @@ set -euo pipefail -NOTEBOOK_REPOS=(cudf cuml cugraph) +# TODO: restore cuml notebook testing on CUDA 13 once there are CUDA 13 xgboost packages and 'rapids' depends on them +# ref: https://github.com/rapidsai/integration/issues/798 +if [[ "${CUDA_VER%%.*}" == "12" ]]; then + NOTEBOOK_REPOS=(cudf cuml cugraph) +else + NOTEBOOK_REPOS=(cudf cugraph) +fi mkdir -p /notebooks /dependencies for REPO in "${NOTEBOOK_REPOS[@]}"; do diff --git a/cuvs-bench/README.md b/cuvs-bench/README.md index 89986300..ec54495c 100644 --- a/cuvs-bench/README.md +++ b/cuvs-bench/README.md @@ -36,7 +36,7 @@ export DATA_FOLDER=path/to/store/results/and/data docker run --gpus all --rm -it \ -v $DATA_FOLDER:/home/rapids/benchmarks \ -u $(id -u) \ - rapidsai/cuvs-bench:25.10a-cuda12.9-py3.13 \ + rapidsai/cuvs-bench:25.10a-cuda13.0-py3.13 \ "--dataset deep-image-96-angular" \ "--normalize" \ "--algorithms cuvs_cagra" \ @@ -47,7 +47,7 @@ Where: - `DATA_FOLDER=path/to/store/results/and/data`: Results and datasets will be written to this host folder. - `-u $(id -u)`: This flag allows the container to use the host user for permissions -- `rapidsai/cuvs-bench:25.10a-cuda12.9-py3.13`: Image to use, either `cuvs-bench` or `cuvs-bench-datasets`, cuVS version, CUDA version, and Python version. +- `rapidsai/cuvs-bench:25.10a-cuda13.0-py3.13`: Image to use, either `cuvs-bench` or `cuvs-bench-datasets`, cuVS version, CUDA version, and Python version. - "--dataset deep-image-96-angular": Dataset name(s). See https://docs.rapids.ai/api/cuvs/nightly/cuvs_bench for more details. - "--normalize": Whether to normalize the dataset, leave string empty ("") to not normalize. - "--algorithms cuvs_cagra": What algorithm(s) to use as a ; separated list, as well as any other argument to pass to `cuvs_bench.run`. @@ -74,7 +74,7 @@ export DATA_FOLDER=path/to/store/results/and/data docker run --gpus all --rm -it \ -v $DATA_FOLDER:/home/rapids/benchmarks \ -u $(id -u) \ - rapidsai/cuvs-bench:25.10a-cuda12.9-py3.13 \ + rapidsai/cuvs-bench:25.10a-cuda13.0-py3.13 \ --entrypoint /bin/bash ``` diff --git a/dockerhub-readme.md b/dockerhub-readme.md index 64cc3bdb..7cac5d1e 100644 --- a/dockerhub-readme.md +++ b/dockerhub-readme.md @@ -38,7 +38,7 @@ There are two types: The tag naming scheme for RAPIDS images incorporates key platform details into the tag as shown below: ```text -25.10-cuda12.9-py3.13 +25.10-cuda13.0-py3.13 ^ ^ ^ | | Python version | | @@ -47,7 +47,7 @@ The tag naming scheme for RAPIDS images incorporates key platform details into t RAPIDS version ``` -**Note: Nightly builds of the images have the RAPIDS version appended with an `a` (ie `25.10a-cuda12.9-py3.13`)** +**Note: Nightly builds of the images have the RAPIDS version appended with an `a` (ie `25.10a-cuda13.0-py3.13`)** ## Usage @@ -80,7 +80,7 @@ $ docker run \ -e EXTRA_CONDA_PACKAGES="jq" \ -e EXTRA_PIP_PACKAGES="beautifulsoup4" \ -p 8888:8888 \ - rapidsai/notebooks:25.10-cuda12.9-py3.13 + rapidsai/notebooks:25.10-cuda13.0-py3.13 ``` ### Bind Mounts @@ -105,7 +105,7 @@ $ docker run \ --gpus all \ --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 \ -v $(pwd)/environment.yml:/home/rapids/environment.yml \ - rapidsai/base:25.10-cuda12.9-py3.13 + rapidsai/base:25.10-cuda13.0-py3.13 ``` ### Use JupyterLab to Explore the Notebooks diff --git a/matrix-test.yaml b/matrix-test.yaml index 241d6c76..d56f2c1c 100644 --- a/matrix-test.yaml +++ b/matrix-test.yaml @@ -1,14 +1,14 @@ # Copyright (c) 2023-2025, NVIDIA CORPORATION. -# CUDA_VER is `.` (e.g. `12.0`) +# CUDA_VER is `.` (e.g. `13.0`) pull-request: - { CUDA_VER: '12.0', ARCH: 'amd64', PYTHON_VER: '3.10', GPU: 'l4', DRIVER: 'earliest' } - - { CUDA_VER: '12.9', ARCH: 'arm64', PYTHON_VER: '3.11', GPU: 'a100', DRIVER: 'latest' } - - { CUDA_VER: '12.9', ARCH: 'amd64', PYTHON_VER: '3.13', GPU: 'l4', DRIVER: 'latest' } + - { CUDA_VER: '12.9', ARCH: 'amd64', PYTHON_VER: '3.12', GPU: 'l4', DRIVER: 'latest' } + - { CUDA_VER: '13.0', ARCH: 'arm64', PYTHON_VER: '3.13', GPU: 'a100', DRIVER: 'latest' } + - { CUDA_VER: '13.0', ARCH: 'amd64', PYTHON_VER: '3.13', GPU: 'h100', DRIVER: 'latest' } branch: - - { CUDA_VER: '12.0', ARCH: 'amd64', PYTHON_VER: '3.10', GPU: 'l4', DRIVER: 'earliest' } - - { CUDA_VER: '12.0', ARCH: 'amd64', PYTHON_VER: '3.10', GPU: 'l4', DRIVER: 'latest' } - - { CUDA_VER: '12.0', ARCH: 'arm64', PYTHON_VER: '3.11', GPU: 'a100', DRIVER: 'latest' } - - { CUDA_VER: '12.0', ARCH: 'amd64', PYTHON_VER: '3.12', GPU: 'l4', DRIVER: 'latest' } - - { CUDA_VER: '12.9', ARCH: 'amd64', PYTHON_VER: '3.13', GPU: 'l4', DRIVER: 'latest' } + - { CUDA_VER: '12.0', ARCH: 'arm64', PYTHON_VER: '3.11', GPU: 'a100', DRIVER: 'earliest' } + - { CUDA_VER: '12.9', ARCH: 'amd64', PYTHON_VER: '3.11', GPU: 'l4', DRIVER: 'latest' } - { CUDA_VER: '12.9', ARCH: 'arm64', PYTHON_VER: '3.13', GPU: 'a100', DRIVER: 'latest' } + - { CUDA_VER: '13.0', ARCH: 'amd64', PYTHON_VER: '3.11', GPU: 'l4', DRIVER: 'latest' } + - { CUDA_VER: '13.0', ARCH: 'arm64', PYTHON_VER: '3.12', GPU: 'a100', DRIVER: 'latest' } diff --git a/matrix.yaml b/matrix.yaml index b63e05ab..fd33acf0 100644 --- a/matrix.yaml +++ b/matrix.yaml @@ -1,8 +1,9 @@ # Copyright (c) 2023-2025, NVIDIA CORPORATION. -CUDA_VER: # Should be `..` (e.g. `12.9.0`) +CUDA_VER: # Should be `..` (e.g. `13.0.0`) - "12.0.1" - "12.9.1" + - "13.0.0" PYTHON_VER: - "3.10" - "3.11" diff --git a/tests/container-canary/README.md b/tests/container-canary/README.md index 94a5bd20..dc145da6 100644 --- a/tests/container-canary/README.md +++ b/tests/container-canary/README.md @@ -9,7 +9,7 @@ Install `container-canary` following the instructions in that project's repo. Run the tests against a built image, the same way they're run in CI. ```shell -IMAGE_URI="rapidsai/notebooks:25.10a-cuda12.9-py3.13" +IMAGE_URI="rapidsai/notebooks:25.10a-cuda13.0-py3.13" ci/run-validation-checks.sh \ --dask-scheduler \