diff --git a/.github/workflows/build_test_publish_images.yaml b/.github/workflows/build_test_publish_images.yaml index fcf85651d..af066e41e 100644 --- a/.github/workflows/build_test_publish_images.yaml +++ b/.github/workflows/build_test_publish_images.yaml @@ -32,7 +32,7 @@ on: description: 'JSON array of architectures to build for' cuda_ver: type: string - default: '["12.8.0"]' + default: '["12.9.0"]' description: 'JSON array of CUDA versions to build for' python_ver: type: string diff --git a/.github/workflows/self_hosted_service_test.yaml b/.github/workflows/self_hosted_service_test.yaml index 8d3f6215b..3b12dcecb 100644 --- a/.github/workflows/self_hosted_service_test.yaml +++ b/.github/workflows/self_hosted_service_test.yaml @@ -59,7 +59,7 @@ jobs: runs-on: linux-amd64-gpu-l4-latest-1 strategy: matrix: - ctk: ["12.8.0"] + ctk: ["12.9.0"] linux_ver: ["ubuntu24.04"] py_ver: ["3.12"] container: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3dd28dad3..133c86e28 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -70,7 +70,7 @@ library features. The following instructions are for building with a conda envir CUDA/GPU Runtime: -* CUDA 12.8 +* CUDA 12.9 * Volta architecture or better ([Compute Capability](https://docs.nvidia.com/deploy/cuda-compatibility/) >=7.0) Python: diff --git a/README.md b/README.md index fcab5ddc2..f7390b958 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ on the major version of CUDA available in your environment: For CUDA 12.x: ```bash -pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.10.* cuopt-sh-client==25.10.* nvidia-cuda-runtime-cu12==12.8.* +pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.10.* cuopt-sh-client==25.10.* nvidia-cuda-runtime-cu12==12.9.* ``` Development wheels are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/` to install latest nightly packages. @@ -85,10 +85,10 @@ of our latest development branch. Just replace `-c rapidsai` with `-c rapidsai-n Users can pull the cuOpt container from the NVIDIA container registry. ```bash -docker pull nvidia/cuopt:latest-cuda12.8-py312 +docker pull nvidia/cuopt:latest-cuda12.9-py312 ``` -Note: The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. +Note: The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.9-py312`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py312`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. More information about the cuOpt container can be found [here](https://docs.nvidia.com/cuopt/user-guide/latest/cuopt-server/quick-start.html#container-from-docker-hub). diff --git a/ci/build-service.Dockerfile b/ci/build-service.Dockerfile deleted file mode 100644 index d1e282cee..000000000 --- a/ci/build-service.Dockerfile +++ /dev/null @@ -1,75 +0,0 @@ -# syntax=docker/dockerfile:1.2 -# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG arch=amd -ARG cuda_ver=12.5.1 - -# To copy cuda files -FROM nvcr.io/nvidia/cuda:${cuda_ver}-runtime-ubuntu22.04 AS cuda-env - - -# To copy nvvm -FROM rapidsai/ci-wheel:cuda${cuda_ver}-rockylinux8-py3.11 AS cuopt_build - - -# Install cuOpt -FROM python:3.12.8-slim-bookworm AS install-env - -COPY --chown=nvs:nvs ./wheels /tmp/wheels/ -ARG cuda-suffix=cu12 - -RUN apt-get update && apt-get install -y git gcc - -ENV PIP_EXTRA_INDEX_URL="https://pypi.nvidia.com https://pypi.anaconda.org/rapidsai-wheels-nightly/simple" - -RUN python -m pip install nvidia-cuda-runtime-cu12==12.5.* -RUN python -m pip install /tmp/wheels/cuopt_mps_parser* /tmp/wheels/cuopt_${cuda_suffix}* /tmp/wheels/cuopt_server_${cuda_suffix}* -RUN python -m pip uninstall setuptools -y - - -# Build release container -FROM nvcr.io/nvidian/distroless/python:3.12-v3.4.4-${arch}64 - -ARG cuda_ver=12.5.1 -COPY --from=install-env --chown=nvs:nvs \ - /usr/local/lib/python3.12/site-packages \ - /usr/local/lib/python3.12/dist-packages - -COPY --from=cuda-env --chown=nvs:nvs \ - /usr/local/cuda-* \ - /usr/local/cuda - -COPY --from=cuopt_build --chown=nvs:nvs /usr/local/cuda/nvvm/ /usr/local/cuda/nvvm/ - -ARG nspect_id -ARG server_port=5000 - -ENV CUOPT_SERVER_PORT=${server_port} - -ENV CUOPT_SERVER_NSPECT_ID=${nspect_id} - -ENV RMM_DEBUG_LOG_FILE=/tmp/rmm_log.txt -ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64 -ENV CUPY_CACHE_DIR=/tmp/.cupy - -WORKDIR /cache - -COPY ./LICENSE /cache/LICENSE -COPY ./container-builder/README.md /cache/ -COPY ./container-builder/CHANGELOG.md /cache/ -COPY ./git_info.txt /cache/ - -CMD ["python3", "-m", "cuopt_server.cuopt_service"] diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index 3d5b1f241..9c5f21a8e 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -112,7 +112,7 @@ sed_runner "/^set(cuopt_version/ s/[0-9][0-9].[0-9][0-9].[0-9][0-9]/${NEXT_FULL_ sed_runner 's/'"cuopt_version: \"[0-9][0-9].[0-9][0-9]\""'/'"cuopt_version: \"${NEXT_SHORT_TAG}\""'/g' .github/workflows/nightly.yaml # Update Helm chart files -sed_runner 's/\(tag: "\)[0-9][0-9]\.[0-9]\+\.[0-9]\+\(-cuda12\.8-py3\.12"\)/\1'${DOCKER_TAG}'\2/g' helmchart/cuopt-server/values.yaml +sed_runner 's/\(tag: "\)[0-9][0-9]\.[0-9]\+\.[0-9]\+\(-cuda12\.9-py3\.12"\)/\1'${DOCKER_TAG}'\2/g' helmchart/cuopt-server/values.yaml sed_runner 's/\(appVersion: \)[0-9][0-9]\.[0-9]\+\.[0-9]\+/\1'${DOCKER_TAG}'/g' helmchart/cuopt-server/Chart.yaml sed_runner 's/\(version: \)[0-9][0-9]\.[0-9]\+\.[0-9]\+/\1'${DOCKER_TAG}'/g' helmchart/cuopt-server/Chart.yaml diff --git a/conda/environments/all_cuda-128_arch-aarch64.yaml b/conda/environments/all_cuda-129_arch-aarch64.yaml similarity index 96% rename from conda/environments/all_cuda-128_arch-aarch64.yaml rename to conda/environments/all_cuda-129_arch-aarch64.yaml index ba6b6945e..3565eae6a 100644 --- a/conda/environments/all_cuda-128_arch-aarch64.yaml +++ b/conda/environments/all_cuda-129_arch-aarch64.yaml @@ -18,7 +18,7 @@ dependencies: - cuda-nvtx-dev - cuda-nvvm - cuda-sanitizer-api -- cuda-version=12.8 +- cuda-version=12.9 - cudf==25.10.*,>=0.0.0a0 - cupy>=12.0.0 - cuvs==25.10.*,>=0.0.0a0 @@ -85,4 +85,4 @@ dependencies: - nvidia_sphinx_theme - swagger-plugin-for-sphinx - veroviz -name: all_cuda-128_arch-aarch64 +name: all_cuda-129_arch-aarch64 diff --git a/conda/environments/all_cuda-128_arch-x86_64.yaml b/conda/environments/all_cuda-129_arch-x86_64.yaml similarity index 96% rename from conda/environments/all_cuda-128_arch-x86_64.yaml rename to conda/environments/all_cuda-129_arch-x86_64.yaml index f91c22957..04132f8c7 100644 --- a/conda/environments/all_cuda-128_arch-x86_64.yaml +++ b/conda/environments/all_cuda-129_arch-x86_64.yaml @@ -18,7 +18,7 @@ dependencies: - cuda-nvtx-dev - cuda-nvvm - cuda-sanitizer-api -- cuda-version=12.8 +- cuda-version=12.9 - cudf==25.10.*,>=0.0.0a0 - cupy>=12.0.0 - cuvs==25.10.*,>=0.0.0a0 @@ -85,4 +85,4 @@ dependencies: - nvidia_sphinx_theme - swagger-plugin-for-sphinx - veroviz -name: all_cuda-128_arch-x86_64 +name: all_cuda-129_arch-x86_64 diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 53071afae..290016f90 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -104,7 +104,7 @@ message("-- Building for GPU_ARCHS = ${CMAKE_CUDA_ARCHITECTURES}") # make the flags global in order to propagate flags to test cmake files set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr --expt-extended-lambda") -if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8 AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 13.0) +if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.9 AND CMAKE_CUDA_COMPILER_VERSION VERSION_LESS 13.0) set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -static-global-template-stub=false") endif() list(APPEND CUOPT_CUDA_FLAGS -Werror=cross-execution-space-call -Wno-deprecated-declarations -Xcompiler=-Werror) diff --git a/dependencies.yaml b/dependencies.yaml index 07de52533..ae7c345a7 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -19,7 +19,7 @@ files: all: output: conda matrix: - cuda: ["12.8"] + cuda: ["12.9"] arch: [x86_64, aarch64] includes: - build_common diff --git a/docs/cuopt/source/cuopt-c/quick-start.rst b/docs/cuopt/source/cuopt-c/quick-start.rst index 7ed104018..624ca6a2a 100644 --- a/docs/cuopt/source/cuopt-c/quick-start.rst +++ b/docs/cuopt/source/cuopt-c/quick-start.rst @@ -20,7 +20,7 @@ This wheel is a Python wrapper around the C++ library and eases installation and # This is a deprecated module and no longer used, but it shares the same name for the CLI, so we need to uninstall it first if it exists. pip uninstall cuopt-thin-client - pip install --extra-index-url=https://pypi.nvidia.com libcuopt-cu12==25.10.* nvidia-cuda-runtime-cu12==12.8.* + pip install --extra-index-url=https://pypi.nvidia.com libcuopt-cu12==25.10.* nvidia-cuda-runtime-cu12==12.9.* Conda diff --git a/docs/cuopt/source/cuopt-python/quick-start.rst b/docs/cuopt/source/cuopt-python/quick-start.rst index f9f6f989a..bdb94f7d7 100644 --- a/docs/cuopt/source/cuopt-python/quick-start.rst +++ b/docs/cuopt/source/cuopt-python/quick-start.rst @@ -14,7 +14,7 @@ For CUDA 12.x: .. code-block:: bash - pip install --extra-index-url=https://pypi.nvidia.com cuopt-cu12==25.10.* nvidia-cuda-runtime-cu12==12.8.* + pip install --extra-index-url=https://pypi.nvidia.com cuopt-cu12==25.10.* nvidia-cuda-runtime-cu12==12.9.* .. note:: @@ -41,19 +41,19 @@ NVIDIA cuOpt is also available as a container from Docker Hub: .. code-block:: bash - docker pull nvidia/cuopt:latest-cuda12.8-py3.12 + docker pull nvidia/cuopt:latest-cuda12.9-py3.12 .. note:: - The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py3.12`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py3.12`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. + The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.9-py3.12`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.9-py3.12`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. .. note:: - The nightly version of cuOpt is available as ``[VERSION]a-cuda12.8-py3.12`` tag. For example, to use cuOpt 25.8.0a, you can use the ``25.8.0a-cuda12.8-py3.12`` tag. + The nightly version of cuOpt is available as ``[VERSION]a-cuda12.9-py3.12`` tag. For example, to use cuOpt 25.8.0a, you can use the ``25.8.0a-cuda12.9-py3.12`` tag. The container includes both the Python API and self-hosted server components. To run the container: .. code-block:: bash - docker run --gpus all -it --rm nvidia/cuopt:latest-cuda12.8-py3.12 /bin/bash + docker run --gpus all -it --rm nvidia/cuopt:latest-cuda12.9-py3.12 /bin/bash This will start an interactive session with cuOpt pre-installed and ready to use. diff --git a/docs/cuopt/source/cuopt-server/quick-start.rst b/docs/cuopt/source/cuopt-server/quick-start.rst index 617a2c2e4..6084b73f5 100644 --- a/docs/cuopt/source/cuopt-server/quick-start.rst +++ b/docs/cuopt/source/cuopt-server/quick-start.rst @@ -12,7 +12,7 @@ For CUDA 12.x: .. code-block:: bash - pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.10.* cuopt-sh-client==25.10.* nvidia-cuda-runtime-cu12==12.8.* + pip install --extra-index-url=https://pypi.nvidia.com cuopt-server-cu12==25.10.* cuopt-sh-client==25.10.* nvidia-cuda-runtime-cu12==12.9.* .. note:: For development wheels which are available as nightlies, please update `--extra-index-url` to `https://pypi.anaconda.org/rapidsai-wheels-nightly/simple/`. @@ -37,19 +37,19 @@ NVIDIA cuOpt is also available as a container from Docker Hub: .. code-block:: bash - docker pull nvidia/cuopt:latest-cuda12.8-py3.12 + docker pull nvidia/cuopt:latest-cuda12.9-py3.12 .. note:: - The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.8-py3.12`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.8-py3.12`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. + The ``latest`` tag is the latest stable release of cuOpt. If you want to use a specific version, you can use the ``-cuda12.9-py3.12`` tag. For example, to use cuOpt 25.5.0, you can use the ``25.5.0-cuda12.9-py3.12`` tag. Please refer to `cuOpt dockerhub page `_ for the list of available tags. The container includes both the Python API and self-hosted server components. To run the container: .. code-block:: bash - docker run --gpus all -it --rm -p 8000:8000 -e CUOPT_SERVER_PORT=8000 nvidia/cuopt:latest-cuda12.8-py3.12 + docker run --gpus all -it --rm -p 8000:8000 -e CUOPT_SERVER_PORT=8000 nvidia/cuopt:latest-cuda12.9-py3.12 .. note:: - The nightly version of cuOpt is available as ``[VERSION]a-cuda12.8-py3.12`` tag. For example, to use cuOpt 25.8.0a, you can use the ``25.8.0a-cuda12.8-py3.12`` tag. + The nightly version of cuOpt is available as ``[VERSION]a-cuda12.9-py3.12`` tag. For example, to use cuOpt 25.8.0a, you can use the ``25.8.0a-cuda12.9-py3.12`` tag. .. note:: Make sure you have the NVIDIA Container Toolkit installed on your system to enable GPU support in containers. See the `installation guide `_ for details. diff --git a/docs/cuopt/source/system-requirements.rst b/docs/cuopt/source/system-requirements.rst index 132e4bc47..e7d963ae5 100644 --- a/docs/cuopt/source/system-requirements.rst +++ b/docs/cuopt/source/system-requirements.rst @@ -46,7 +46,7 @@ Dependencies are installed automatically when using the pip and Conda installati - CUDA 12.0 with Driver 525.60.13+ - CUDA 12.2 with Driver 535.86.10+ - CUDA 12.5 with Driver 555.42.06+ - - CUDA 12.8 with Driver 570.42.01+ + - CUDA 12.9 with Driver 570.42.01+ .. dropdown:: Recommended Requirements for Best Performance @@ -67,7 +67,7 @@ Dependencies are installed automatically when using the pip and Conda installati - 100+ GB free space * CUDA: - - 12.8 + - 12.9 * Latest NVIDIA drivers (570.42.01+) diff --git a/helmchart/cuopt-server/values.yaml b/helmchart/cuopt-server/values.yaml index 0b6b9746f..79224fbc7 100644 --- a/helmchart/cuopt-server/values.yaml +++ b/helmchart/cuopt-server/values.yaml @@ -7,7 +7,7 @@ replicaCount: 1 image: repository: nvidia/cuopt pullPolicy: IfNotPresent - tag: "25.10.0-cuda12.8-py3.12" + tag: "25.10.0-cuda12.9-py3.12" imagePullSecrets: [] nameOverride: ""