diff --git a/_includes/selector.html b/_includes/selector.html
index 72c1e6eb23e..bf1ec150217 100644
--- a/_includes/selector.html
+++ b/_includes/selector.html
@@ -265,7 +265,7 @@
Image CUDA
-
+
@@ -273,7 +273,7 @@
Python
-
+
@@ -373,7 +373,7 @@
active_python_ver: "3.13",
active_conda_cuda_ver: "12",
active_pip_cuda_ver: "12",
- active_docker_cuda_ver: "12.8",
+ active_docker_cuda_ver: "12.9",
active_method: "Conda",
active_release: "Stable",
active_img_type: "Base",
@@ -382,17 +382,13 @@
active_additional_packages: [],
// all possible values
- python_vers: ["3.10", "3.11", "3.12", "3.13"],
python_vers_stable: ["3.10", "3.11", "3.12", "3.13"],
python_vers_nightly: ["3.10", "3.11", "3.12", "3.13"],
- conda_cuda_vers: ["11", "12"],
- conda_cuda_vers_stable: ["11", "12"],
+ conda_cuda_vers_stable: ["12"],
conda_cuda_vers_nightly: ["12"],
- pip_cuda_vers: ["11.4 - 11.8", "12"],
- pip_cuda_vers_stable: ["11.4 - 11.8", "12"],
+ pip_cuda_vers_stable: ["12"],
pip_cuda_vers_nightly: ["12"],
- docker_cuda_vers: ["11.8", "12.0", "12.8", "12.9"],
- docker_cuda_vers_stable: ["11.8", "12.0", "12.8"],
+ docker_cuda_vers_stable: ["12.0", "12.9"],
docker_cuda_vers_nightly: ["12.0", "12.9"],
methods: ["Conda", "pip", "Docker"],
releases: ["Stable", "Nightly"],
@@ -448,8 +444,7 @@
getCondaVersionSupport(version) {
var cuda_version_info = {
"Stable": {
- "11": ["11.4", "11.8"],
- "12": ["12.0", "12.8"]
+ "12": ["12.0", "12.9"]
},
"Nightly": {
"12": ["12.0", "12.9"]
@@ -533,13 +528,9 @@
},
getpipCmdHtml() {
var pip_install = `${this.highlightCmd("pip")} install`;
- var cuda_suffix = "-cu12";
+ var cuda_suffix = `-cu${this.active_pip_cuda_ver}`;
var indentation = " ";
- if (this.active_pip_cuda_ver.startsWith("11")) {
- cuda_suffix = "-cu11";
- }
-
// Change index depending on stable vs nightly for pip
// Also add versioning commands for nightly installs so that --pre is unnecessary
// This has duplicate code, but makes for easier edits in the future
@@ -654,10 +645,6 @@
},
getCondaNotes() {
var notes = [];
- if (this.active_conda_cuda_ver.startsWith("11")) {
- notes = [...notes, "RAPIDS on CUDA 11 doesn't support channel_priority: strict; use channel_priority: flexible instead"];
- }
-
var pkgs_to_show = this.rapids_meta_pkgs;
if (this.active_packages.length === 1 && this.active_packages[0] === "Standard") {
diff --git a/install/index.md b/install/index.md
index 37ad73f2232..befa353dd40 100644
--- a/install/index.md
+++ b/install/index.md
@@ -50,18 +50,16 @@ Use the selector tool below to select your preferred method, packages, and envir
### **Conda Issues**
A `conda create error` occurs:
To resolve this error please follow one of these steps:
-- If the Conda installation is older than `22.11`, please update to the latest version. This will include [libmamba](https://www.anaconda.com/blog/a-faster-conda-for-a-growing-community/){: target="_blank"}, a Mamba-powered Conda solver that is now included with all conda installations to significantly accelerate environment solving.
-- If the Conda installation is version `22.11` or newer, run: `conda install -n base conda-libmamba-solver` and run
-`conda create --solver=libmamba ...`
-- Use [Mamba directly](https://mamba.readthedocs.io/en/latest/installation.html){: target="_blank"} as `mamba create ...`
+- If the Conda installation is older than `23.10`, please update to the latest version. This will include [libmamba](https://conda.org/blog/2023-11-06-conda-23-10-0-release/){: target="_blank"} to significantly accelerate environment solving
+- Use [Mamba directly](https://mamba.readthedocs.io/en/latest/installation/mamba-installation.html){: target="_blank"} as `mamba create ...`
A `__cuda` constraint conflict occurs:
You may see something like:
```
LibMambaUnsatisfiableError: Encountered problems while solving:
- - package cuda-version-12.0-hffde075_0 has constraint __cuda >=12 conflicting with __cuda-11.4-0
+ - package cuda-version-12.0-hffde075_0 has constraint __cuda >=12 conflicting with __cuda-11.8-0
```
-This means the CUDA driver currently installed on your machine (e.g. `__cuda`: 11.4.0) is
+This means the CUDA driver currently installed on your machine (e.g. `__cuda`: 11.8.0) is
incompatible with the `cuda-version` (12.0) you are trying to install. You will have to ensure the [CUDA
driver on your machine supports the CUDA version](#system-req) you are trying to install with conda.
@@ -72,7 +70,7 @@ Check if any packages in your environment have been installed from the `defaults
The `defaults` channel is not supported by RAPIDS packages, which are built to be compatible with dependencies from the `conda-forge` channel.
If you installed conda with [the Miniconda or Anaconda distributions](https://www.anaconda.com/docs/getting-started/miniconda/main#should-i-install-miniconda-or-anaconda-distribution), the `defaults` channel will be included unless you modify your `.condarc` file or specify `-c nodefaults` in the install commands for RAPIDS packages.
If you find any packages from `defaults` in your environment, please make those changes and try recreating your environment from scratch.
-Note that if you installed conda with [Miniforge](https://conda-forge.org/download/) ([our recommendation for best compatibility](https://docs.rapids.ai/install/#conda)) then the `defaults` channel is not included.
+Note that if you installed conda with [Miniforge](https://conda-forge.org/download/) ([our recommendation for best compatibility](#conda)) then the `defaults` channel is not included.
In general [mixing `conda-forge` and `defaults` channels is not supported](https://conda-forge.org/docs/user/transitioning_from_defaults/). RAPIDS packages are published to a separate `rapidsai` channel that is designed for compatibility with `conda-forge`, not `defaults`.
@@ -92,13 +90,9 @@ To learn more about these changes, please see the [RAPIDS Container README](http
### **pip Issues**
- pip installations require using the matching wheel to the system's installed CUDA toolkit. For CUDA 11 toolkits, install the `-cu11` wheels, and for CUDA 12 toolkits install the `-cu12` wheels. If your installation has a CUDA 12 driver but a CUDA 11 toolkit, use the `-cu11` wheels.
+ pip installations require using the matching wheel to the system's installed CUDA toolkit. For example, if you have the CUDA 12 toolkit, install the `-cu12` wheels.
Infiniband is not supported yet.
These packages are not compatible with Tensorflow pip packages. Please use the [NGC containers](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tensorflow){: target="_blank"} or conda packages instead.
- If you experience a "Failed to import CuPy" error, please uninstall any existing versions of cupy and install `cupy-cuda11x`. For example:
-```sh
-pip uninstall cupy-cuda115; pip install cupy-cuda11x
-```
The following error message indicates a problem with your environment:
@@ -108,23 +102,7 @@ ERROR: No matching distribution found for cudf-cu12
```
Check the suggestions below for possible resolutions:
-- The pip index has moved from the initial experimental release! Ensure the correct `--extra-index-url=https://pypi.nvidia.com`
- Ensure you're using a Python version that RAPIDS supports (compare the values in the [the install selector](#selector) to the Python version reported by `python --version`).
-- RAPIDS pip packages require a recent version of pip that [supports PEP600](https://peps.python.org/pep-0600/){: target="_blank"}. Some users may need to update pip: `pip install -U pip`
-
-
-
- Dask / Jupyter / Tornado 6.2 dependency conflicts can occur. Install `jupyter-client` 7.3.4 if the error below occurs:
-```
-ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behavior is the source of the following dependency conflicts.
-jupyter-client 7.4.2 requires tornado>=6.2, but you have tornado 6.1 which is incompatible.
-```
-
- cuSpatial installation may yield the error below:
-```
-ERROR: GDAL >= 3.2 is required for fiona. Please upgrade GDAL.
-```
-To resolve, either GDAL needs to be updated, or fiona needs to be pinned to specific versions depending on the installation OS. please see the [cuSpatial README](https://github.com/rapidsai/cuspatial/tree/branch-23.06#install-with-pip){: target="_blank"} to resolve this error.
@@ -155,28 +133,18 @@ All provisioned systems need to be RAPIDS capable. Here's what is required:
**CUDA & NVIDIA Drivers:** One of the following supported versions:
{: .no-tb-margins }
-- [CUDA 11.2](https://developer.nvidia.com/cuda-11.2.0-download-archive){: target="_blank"} with Driver 470.42.01 or newer
-- [CUDA 11.4](https://developer.nvidia.com/cuda-11-4-0-download-archive){: target="_blank"} with Driver 470.42.01 or newer
-- [CUDA 11.5](https://developer.nvidia.com/cuda-11-5-0-download-archive){: target="_blank"} with Driver 495.29.05 or newer
-- [CUDA 11.8](https://developer.nvidia.com/cuda-11-8-0-download-archive){: target="_blank"} with Driver 520.61.05 or newer
-- [CUDA 12.0](https://developer.nvidia.com/cuda-12-0-1-download-archive){: target="_blank"} with Driver 525.60.13 or newer **see CUDA 12 section below for notes on usage**
-- [CUDA 12.2](https://developer.nvidia.com/cuda-12-2-2-download-archive){: target="_blank"} with Driver 535.86.10 or newer **see CUDA 12 section below for notes on usage**
-- [CUDA 12.5](https://developer.nvidia.com/cuda-12-5-1-download-archive){: target="_blank"} with Driver 555.42.06 or newer **see CUDA 12 section below for notes on usage**
-
- **Note**: RAPIDS is tested with and officially supports the versions listed above. Newer CUDA and driver versions may also work with RAPIDS. See [CUDA compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/index.html) for details.
+- CUDA 12 with Driver 525.60.13 or newer
+- Compatibility with CUDA 13 is coming soon
-## **CUDA Support**
+See [CUDA compatibility](https://docs.nvidia.com/deploy/cuda-compatibility/) for details.
-### **Docker and Conda**
-
-- conda packages and Docker images support CUDA 12 on systems with a CUDA 12 driver.
-- CUDA 11 conda packages and Docker images can be used on a system with a CUDA 12 driver because they include their own CUDA toolkit.
+## **CUDA Support Notes**
### **pip**
- pip installations require using a wheel matching the system's installed CUDA toolkit.
- RAPIDS pip packages require NVRTC for Numba to function properly. For Docker users, this means that RAPIDS wheels require the `devel` flavor of `nvidia/cuda` images for full functionality. The `base` and `runtime` flavors of `nvidia/cuda` Docker images are currently not sufficient.
-- For CUDA 11 toolkits, install the -cu11 wheels, and for CUDA 12 toolkits install the -cu12 wheels. If your installation has a CUDA 12 driver but a CUDA 11 toolkit, use the -cu11 wheels.
+- pip installations require using the matching wheel to the system's installed CUDA toolkit. For example, if you have the CUDA 12 toolkit, install the `-cu12` wheels.
@@ -224,11 +192,7 @@ bash Miniforge3-$(uname)-$(uname -m).sh
**3. Start Conda.** Open a new terminal window, which should now show Conda initialized.
-**4. Check Conda Configuration.** Installing RAPIDS may require you to use `channel_priority: flexible`.
-
-If you are installing RAPIDS with CUDA 12 or greater, then you can use either `strict` or `flexible` channel priority.
-
-If you are installing RAPIDS with CUDA 11, then you must set `channel_priority: flexible`.
+**4. Check Conda Configuration.** RAPIDS supports either `flexible` or `strict` channel priority.
You can check this and change it, if required, by doing:
```sh
@@ -240,21 +204,14 @@ conda config --set channel_priority flexible
## **Docker**
-RAPIDS requires both Docker CE v19.03+ and [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-docker#quickstart){: target="_blank"} installed.
-- Legacy Support: Docker CE v17-18 and [nvidia-docker2](https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(version-2.0)){: target="_blank"}
+RAPIDS requires Docker Engine and [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html){: target="_blank"} installed.
-**1. Download and Install.** Copy command below to download and install the latest Docker CE Edition:
+**1. Download and Install.** Copy command below to download and install the latest Docker Engine:
```sh
curl https://get.docker.com | sh
```
-**2. Install Latest NVIDIA Docker.** Select the [appropriate supported distribution](https://nvidia.github.io/nvidia-container-runtime/){: target="_blank"}:
-```sh
-curl -s -L https://nvidia.github.io/nvidia-container-runtime/gpgkey | sudo apt-key add - distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
-curl -s -L https://nvidia.github.io/nvidia-container-runtime/$distribution/nvidia-container-runtime.list | sudo tee /etc/apt/sources.list.d/nvidia-container-runtime.list
-sudo apt-get update
-sudo apt-get install nvidia-container-runtime
-```
+**2. Install Latest NVIDIA Container Toolkit.** Follow the instructions for your Linux distribution in the [nvidia-container-toolkit installation guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html){: target="_blank"}.
**3. Start Docker.** In new terminal window run:
```sh
@@ -262,14 +219,11 @@ sudo service docker stop
sudo service docker start
```
-**4a. Test NVIDIA Docker.** In a terminal window run:
+**4. Test Docker with GPU support.** In a terminal window run:
```sh
docker run --gpus all nvcr.io/nvidia/k8s/cuda-sample:nbody nbody -gpu -benchmark
```
-**4b. Legacy Docker Users.** Docker CE v18 & [nvidia-docker2](https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(version-2.0)){: target="_blank"} users will need to replace the following for compatibility:
-`docker run --gpus all` with `docker run --runtime=nvidia`
-
### **JupyterLab.**
@@ -280,22 +234,13 @@ The command provided from the selector for the `notebooks` Docker image will run
docker run -t -d --gpus all --shm-size=1g --ulimit memlock=-1 --ulimit stack= 67108864 -v $PWD:/ws
```
-The standard docker command may be sufficient, but the additional arguments ensures more stability. See the [NCCL docs](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/troubleshooting.html#sharing-data){: target="_blank"} and [UCX docs](https://github.com/openucx/ucx/blob/master/docs/source/running.md#running-in-docker-containers){: target="_blank"} for more details on MNMG usage.
-
-
-**Custom Datasets.** See the [RAPIDS Container README](https://hub.docker.com/r/rapidsai/rapidsai){: target="_blank"} for more information about using custom datasets. [Docker Hub](https://hub.docker.com/r/rapidsai/rapidsai/){: target="_blank"} and [NVIDIA GPU Cloud](https://ngc.nvidia.com/catalog/containers/nvidia:rapidsai:rapidsai){: target="_blank"} host RAPIDS containers with a full list of [available tags](https://hub.docker.com/r/rapidsai/rapidsai/tags){: target="_blank"}.
-
+The standard docker command may be sufficient, but the additional arguments ensures more stability. See the [NCCL docs](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/troubleshooting.html#sharing-data){: target="_blank"} and [UCX docs](https://github.com/openucx/ucx/blob/master/docs/source/running.md#running-in-docker-containers){: target="_blank"} for more details on MNMG usage.
## **pip**
-RAPIDS pip packages are available for CUDA 11 and CUDA 12 on the NVIDIA Python Package Index.
-
-### **pip Additional Prerequisites**
- The CUDA toolkit version on your system must match the pip CUDA version you install (`-cu11` or `-cu12`).
- **glibc version:** x86_64 wheels require glibc >= 2.17.
- **glibc version:** ARM architecture (aarch64) wheels require glibc >= 2.32 (only ARM Server Base System Architecture is supported).
+RAPIDS pip packages are available on the NVIDIA Python Package Index.
@@ -319,7 +264,7 @@ Windows users can now tap into GPU accelerated data science on their local machi
### **WSL2 Additional Prerequisites**
- **OS:** Windows 11 with a WSL2 installation of Ubuntu (minimum version 20.04).
+ **OS:** Windows 11 with a WSL2 installation of Ubuntu.
**WSL Version:** WSL2 (WSL1 not supported).
**GPU:** GPUs with [Compute Capability](https://developer.nvidia.com/cuda-gpus){: target="_blank"} 7.0 or higher (16GB+ GPU RAM is recommended).
@@ -338,8 +283,6 @@ Windows users can now tap into GPU accelerated data science on their local machi
When installing with Conda or pip, if an `WSL2 Jitify fatal error: libcuda.so: cannot open shared object file` error occurs, follow suggestions in [this WSL issue](https://github.com/microsoft/WSL/issues/8587) to resolve.
- When installing with Docker Desktop, if the container pull command is successful, but the run command hangs indefinitely, [ensure you're on Docker Desktop >= 4.18](https://docs.docker.com/desktop/release-notes/){: target="_blank"}.
-
@@ -401,9 +344,8 @@ print(cudf.Series([1, 2, 3]))
1. Install WSL2 and the Ubuntu distribution [using Microsoft's instructions](https://docs.microsoft.com/en-us/windows/wsl/install){: target="_blank"}.
2. Install the [latest NVIDIA Drivers](https://www.nvidia.com/download/index.aspx){: target="_blank"} on the Windows host.
3. Log in to the WSL2 Linux instance.
-4. Follow [this helpful developer guide](https://docs.nvidia.com/cuda/wsl-user-guide/index.html#cuda-support-for-wsl2){: target="_blank"} and then install the WSL-specific [CUDA 11](https://developer.nvidia.com/cuda-11-8-0-download-archive?target_os=Linux&target_arch=x86_64&Distribution=WSL-Ubuntu&target_version=2.0&target_type=deb_local){: target="_blank"} or [CUDA 12](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=WSL-Ubuntu&target_version=2.0&target_type=deb_local){: target="_blank"} Toolkit without drivers into the WSL2 instance.
- - The installed CUDA Toolkit version must match the pip wheel version (`-cu11` or `-cu12`)
- - Any CUDA 12 CTK will work with RAPIDS `-cu12` pip packages
+4. Follow [this helpful developer guide](https://docs.nvidia.com/cuda/wsl-user-guide/index.html#cuda-support-for-wsl2){: target="_blank"} and then install the WSL-specific [CUDA 12](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=WSL-Ubuntu&target_version=2.0&target_type=deb_local){: target="_blank"} Toolkit without drivers into the WSL2 instance.
+ - The installed CUDA Toolkit major version must match the package suffix (e.g. `-cu12`)
5. Install RAPIDS pip packages on the WSL2 Linux Instance using the [release selector](#selector) commands.
6. Run this code to check that the RAPIDS installation is working:
```python
@@ -415,7 +357,7 @@ print(cudf.Series([1, 2, 3]))
## **Build from Source**
-To build from source, check each [RAPIDS GitHub](https://github.com/rapidsai){: target="_blank"} README, such as the [cuDF's](https://github.com/rapidsai/cudf#buildinstall-from-source){: target="_blank"} source environment set up and build instructions. Further links are provided in the [selector tool](#selector). If additional help is needed reach out on our [Slack Channel]({{ site.social.slack.url }}).
+To build from source, find the library on the [RAPIDS GitHub](https://github.com/rapidsai){: target="_blank"}. Libraries provide guidance on building from source in `README.md` or `CONTRIBUTING.md`. If additional help is needed, file an issue on GitHub or reach out on our [Slack Channel]({{ site.social.slack.url }}).