From e9e50ad069bca65d8b3409b5afa997920ecec45f Mon Sep 17 00:00:00 2001
From: gs-olive <113141689+gs-olive@users.noreply.github.com>
Date: Tue, 13 Jun 2023 16:57:24 -0700
Subject: [PATCH 1/6] chore: Upgrade to CUDA 12.1 and CuDNN 8.9
- Upgrade CUDA version across CI, dependencies, and build systems
- Upgrade versions for Torch, CuDNN, TensorRT, and other dependencies in
accordance with new CUDA version 12.1
- Upgrade Torch nightly date to latest available
---
.circleci/config.yml | 89 ++++++++-----------
README.md | 6 +-
WORKSPACE | 20 ++---
docker/Dockerfile | 4 +-
docker/README.md | 4 +-
py/ci/build_whl.sh | 2 +-
py/requirements.txt | 6 +-
py/torch_tensorrt/__init__.py | 2 +-
py/torch_tensorrt/fx/README.md | 2 +-
py/versions.py | 4 +-
tests/modules/requirements.txt | 2 +-
toolchains/ci_workspaces/WORKSPACE.x86_64 | 2 +-
.../WORKSPACE.x86_64.release.rhel | 10 +--
.../WORKSPACE.x86_64.release.ubuntu | 10 +--
14 files changed, 73 insertions(+), 90 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index d286866fd1..50ab360b1c 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -92,17 +92,9 @@ commands:
key: cmake-<< parameters.cache-key >>-<< parameters.version >>
paths:
- << parameters.install-dir >>
- uninstall-cuda:
- description: "Uninstall CUDA-11.4"
- steps:
- - run:
- name: "Uninstall CUDA-11.4"
- command: |
- sudo apt-get --purge remove "cuda*" "nsight*"
- sudo apt-get --purge remove "*nvidia*"
install-cudnn:
- description: "Install CUDNN 8.8.0"
+ description: "Install CUDNN 8.9.1"
parameters:
os:
type: string
@@ -112,10 +104,10 @@ commands:
default: "x86_64"
cudnn-version:
type: string
- default: "8.8.0.121"
+ default: "8.9.1.23"
cuda-version:
type: string
- default: "cuda11.8"
+ default: "cuda12.1"
steps:
- run:
name: Install CUDNN
@@ -149,13 +141,13 @@ commands:
default: "amd64"
cuda-pkg-name:
type: string
- default: "cuda-toolkit-11-8"
+ default: "cuda-toolkit-12-1"
cuda-pkg-version:
type: string
- default: "11-8"
+ default: "12-1"
cuda-version:
type: string
- default: "11.8.0"
+ default: "12.1.1"
steps:
- run:
name: Install CUDA
@@ -186,21 +178,12 @@ commands:
architecture:
type: string
default: "amd64"
- cuda-pkg-name:
- type: string
- default: "cuda-toolkit-11-8"
- cuda-pkg-version:
- type: string
- default: "11-8"
- cuda-version:
- type: string
- default: "11.8.0"
cuda-string-version:
type: string
- default: "cuda11.8"
+ default: "cuda12.0"
cudnn-version:
type: string
- default: "8.8.0.121"
+ default: "8.9.1.23"
trt-version-short:
type: string
default: "8.6.1"
@@ -252,7 +235,7 @@ commands:
default: "8.6.1"
cudnn-version-long:
type: string
- default: "8.8.0.121"
+ default: "8.9.1.23"
steps:
- run:
name: Set up python environment
@@ -261,7 +244,7 @@ commands:
pip3 install wheel setuptools
pip3 install nvidia-pyindex
pip3 install tabulate
- pip3 install tensorrt==<< parameters.trt-version-long >> nvidia-cudnn-cu11==<< parameters.cudnn-version-long >>
+ pip3 install tensorrt==<< parameters.trt-version-long >> nvidia-cudnn-cu12==<< parameters.cudnn-version-long >>
pip3 install pytest parameterized expecttest nox
install-torch-from-index:
@@ -269,13 +252,13 @@ commands:
parameters:
torch-build:
type: string
- default: "2.1.0.dev20230605+cu118"
+ default: "2.1.0.dev20230613+cu121"
torchvision-build:
type: string
- default: "0.16.0.dev20230605+cu118"
+ default: "0.16.0.dev20230613+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu118"
+ default: "https://download.pytorch.org/whl/nightly/cu121"
steps:
- run:
name: Install Torch
@@ -293,7 +276,7 @@ commands:
- run:
name: Build torch-tensorrt python release (pre-cxx11-abi)
command: |
- export CUDA_HOME=/usr/local/cuda-11.8/
+ export CUDA_HOME=/usr/local/cuda-12.1/
mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE
cd py
python3 -m pip install wheel setuptools
@@ -313,7 +296,7 @@ commands:
- run:
name: Build torch-tensorrt python legacy release (pre-cxx11-abi)
command: |
- export CUDA_HOME=/usr/local/cuda-11.8/
+ export CUDA_HOME=/usr/local/cuda-12.1/
mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE
cd py
python3 -m pip install wheel setuptools
@@ -345,7 +328,7 @@ commands:
- run:
name: Build torch-tensorrt python release package
command: |
- export CUDA_HOME=/usr/local/cuda-11.8/
+ export CUDA_HOME=/usr/local/cuda-12.1/
cd ~/project/py
python3 setup.py bdist_wheel --use-cxx11-abi --release
python3 setup.py install --use-cxx11-abi --release
@@ -357,7 +340,7 @@ commands:
- run:
name: Build torch-tensorrt python package
command: |
- export CUDA_HOME=/usr/local/cuda-11.8/
+ export CUDA_HOME=/usr/local/cuda-12.1/
cd ~/project/py
python3 setup.py bdist_wheel --use-cxx11-abi
python3 setup.py install --use-cxx11-abi
@@ -375,7 +358,7 @@ commands:
- run:
name: Build torch-tensorrt python release with only the fx backend
command: |
- export CUDA_HOME=/usr/local/cuda-11.8/
+ export CUDA_HOME=/usr/local/cuda-12.1/
mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE
cd py
python3 -m pip install wheel setuptools
@@ -447,7 +430,7 @@ commands:
name: Build torch-tensorrt library with CMake
command: |
mkdir build
- export PATH=$PATH:/usr/local/cuda-11.8/bin
+ export PATH=$PATH:/usr/local/cuda-12.1/bin
~/cmake/bin/cmake -S. -Bbuild \
-DCMAKE_MODULE_PATH=cmake/Module \
-DTorch_DIR=/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch/share/cmake/Torch \
@@ -499,7 +482,7 @@ commands:
name: Run core / C++ tests
no_output_timeout: 15m
environment:
- LD_LIBRARY_PATH: "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch_tensorrt.libs:/home/circleci/project/bazel-project/external/libtorch_pre_cxx11_abi/lib/:/home/circleci/project/bazel-project/external/tensorrt/lib/:/usr/local/cuda-11.8/lib64/:$LD_LIBRARY_PATH"
+ LD_LIBRARY_PATH: "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch_tensorrt.libs:/home/circleci/project/bazel-project/external/libtorch_pre_cxx11_abi/lib/:/home/circleci/project/bazel-project/external/tensorrt/lib/:/usr/local/cuda-12.1/lib64/:$LD_LIBRARY_PATH"
command: |
set -e
mv toolchains/ci_workspaces/WORKSPACE.<< parameters.platform >> WORKSPACE
@@ -529,7 +512,7 @@ commands:
environment:
USE_HOST_DEPS: "1"
PYT_PATH: "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/"
- LD_LIBRARY_PATH: "/usr/lib/x86_64-linux-gnu/:/usr/local/cuda-11.8/lib64/:$LD_LIBRARY_PATH"
+ LD_LIBRARY_PATH: "/usr/lib/x86_64-linux-gnu/:/usr/local/cuda-12.1/lib64/:$LD_LIBRARY_PATH"
command: |
set -e
mkdir -p /tmp/artifacts/test_results
@@ -819,7 +802,7 @@ jobs:
type: boolean
default: false
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.small
steps:
- checkout
@@ -881,7 +864,7 @@ jobs:
cudnn-version:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.large
parallelism: 4
steps:
@@ -922,7 +905,7 @@ jobs:
python-version:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.large
steps:
- checkout
@@ -956,7 +939,7 @@ jobs:
type: string
parallelism: 8
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.large
steps:
- checkout
@@ -992,7 +975,7 @@ jobs:
type: string
parallelism: 8
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.large
steps:
- checkout
@@ -1030,7 +1013,7 @@ jobs:
python-version:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.large
steps:
- checkout
@@ -1068,7 +1051,7 @@ jobs:
type: string
parallelism: 4
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.small
steps:
- when:
@@ -1116,7 +1099,7 @@ jobs:
torch-build-index:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.small
steps:
- when:
@@ -1252,7 +1235,7 @@ jobs:
python-version:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.small
steps:
- checkout
@@ -1288,7 +1271,7 @@ jobs:
torch-base-image:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.small
steps:
- checkout
@@ -1316,7 +1299,7 @@ jobs:
torch-base-image:
type: string
machine:
- image: linux-cuda-11:2023.02.1
+ image: linux-cuda-12:2023.05.1
resource_class: gpu.nvidia.small
steps:
- when:
@@ -1352,13 +1335,13 @@ parameters:
# Nightly platform config
torch-build:
type: string
- default: "2.1.0.dev20230605+cu118"
+ default: "2.1.0.dev20230613+cu121"
torchvision-build:
type: string
- default: "0.16.0.dev20230605+cu118"
+ default: "0.16.0.dev20230613+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu118"
+ default: "https://download.pytorch.org/whl/nightly/cu121"
torch-build-legacy:
type: string
default: "1.13.1+cu117"
@@ -1373,7 +1356,7 @@ parameters:
default: true
cudnn-version:
type: string
- default: "8.8.0.121"
+ default: "8.9.1.23"
trt-version-short:
type: string
default: "8.6.1"
diff --git a/README.md b/README.md
index 8db561e2e1..715660d173 100644
--- a/README.md
+++ b/README.md
@@ -116,9 +116,9 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedd
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.
- Bazel 5.2.0
-- Libtorch 2.1.0.dev20230605 (built with CUDA 11.8)
-- CUDA 11.8
-- cuDNN 8.8.0
+- Libtorch 2.1.0.dev20230613 (built with CUDA 12.1)
+- CUDA 12.1
+- cuDNN 8.9.1
- TensorRT 8.6.1
## Prebuilt Binaries and Wheel files
diff --git a/WORKSPACE b/WORKSPACE
index 4df265c64c..dce977fffc 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -41,7 +41,7 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-11.8/",
+ path = "/usr/local/cuda-12.1/",
)
#############################################################################################################
@@ -51,17 +51,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "999becce82b73e566d0ffe010cd21fea8cf3a33f90f09dcc6b01150b820ae063",
+ sha256 = "1c3712b3b1de34e9989549f53675b557f6f0ca0b800ccbbc80c941af68abcc65",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230605%2Bcu118.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "786cc728c63ea69c40bd8fb535cf8e5e1dfff1d43eaad3eb5256b9ed89c1b268",
+ sha256 = "6bbb53f2f9533804175a0d2eeae5093ee1907158a9ec75c7fb9d10e2103d5df5",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230605%2Bcu118.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
)
# Download these tarballs manually from the NVIDIA website
@@ -71,20 +71,20 @@ http_archive(
http_archive(
name = "cudnn",
build_file = "@//third_party/cudnn/archive:BUILD",
- sha256 = "36fff137153ef73e6ee10bfb07f4381240a86fb9fb78ce372414b528cbab2293",
- strip_prefix = "cudnn-linux-x86_64-8.8.0.121_cuda11-archive",
+ sha256 = "35163c5c542be0c511738b27e25235193cbeedc5e0e006e44b1cdeaf1922e83e",
+ strip_prefix = "cudnn-linux-x86_64-8.9.1.23_cuda12-archive",
urls = [
- "https://developer.download.nvidia.com/compute/cudnn/secure/8.8.0/local_installers/11.8/cudnn-linux-x86_64-8.8.0.121_cuda11-archive.tar.xz",
+ "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.1/local_installers/12.x/cudnn-linux-x86_64-8.9.1.23_cuda12-archive.tar.xz",
],
)
http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
- sha256 = "15bfe6053d45feec45ecc7123a9106076b0b43fa0435f242d89dca0778337759",
+ sha256 = "0f8157a5fc5329943b338b893591373350afa90ca81239cdadd7580cd1eba254",
strip_prefix = "TensorRT-8.6.1.6",
urls = [
- "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.6.1/tars/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-11.8.tar.gz",
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/secure/8.6.1/tars/TensorRT-8.6.1.6.Linux.x86_64-gnu.cuda-12.0.tar.gz",
],
)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index b8e401735e..612fe042a4 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,7 +1,7 @@
# Base image starts with CUDA
-ARG BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04
+ARG BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04
FROM ${BASE_IMG} as base
-ENV BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04
+ENV BASE_IMG=nvidia/cuda:12.1.1-devel-ubuntu22.04
ARG TENSORRT_VERSION
ENV TENSORRT_VERSION=${TENSORRT_VERSION}
diff --git a/docker/README.md b/docker/README.md
index 527b7ae2b2..9f83f25134 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch
### Instructions
-- The example below uses CUDNN 8.8 and TensorRT 8.6
+- The example below uses CUDNN 8.9 and TensorRT 8.6
- See dependencies for a list of current default dependencies.
> From root of Torch-TensorRT repo
Build:
```
-DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.8 -f docker/Dockerfile -t torch_tensorrt:latest .
+DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.9 -f docker/Dockerfile -t torch_tensorrt:latest .
```
Run:
diff --git a/py/ci/build_whl.sh b/py/ci/build_whl.sh
index 347a66c562..3249900318 100755
--- a/py/ci/build_whl.sh
+++ b/py/ci/build_whl.sh
@@ -3,7 +3,7 @@
# Example usage: docker run -it -v$(pwd)/..:/workspace/TRTorch build_trtorch_wheel /bin/bash /workspace/TRTorch/py/build_whl.sh
export CXX=g++
-export CUDA_HOME=/usr/local/cuda-11.8
+export CUDA_HOME=/usr/local/cuda-12.1
export PROJECT_DIR=/workspace/project
cp -r $CUDA_HOME /usr/local/cuda
diff --git a/py/requirements.txt b/py/requirements.txt
index d49e95c9b6..55e732331c 100644
--- a/py/requirements.txt
+++ b/py/requirements.txt
@@ -1,8 +1,8 @@
numpy
packaging
pybind11==2.6.2
---extra-index-url https://download.pytorch.org/whl/nightly/cu118
-torch==2.1.0.dev20230605+cu118
-torchvision==0.16.0.dev20230605+cu118
+--extra-index-url https://download.pytorch.org/whl/nightly/cu121
+torch==2.1.0.dev20230613+cu121
+torchvision==0.16.0.dev20230613+cu121
--extra-index-url https://pypi.ngc.nvidia.com
tensorrt==8.6.1
diff --git a/py/torch_tensorrt/__init__.py b/py/torch_tensorrt/__init__.py
index bf7fb21546..f7bc963343 100644
--- a/py/torch_tensorrt/__init__.py
+++ b/py/torch_tensorrt/__init__.py
@@ -59,7 +59,7 @@ def _find_lib(name, paths):
elif sys.platform.startswith("linux"):
LINUX_PATHS = [
- "/usr/local/cuda-11.8/lib64",
+ "/usr/local/cuda-12.1/lib64",
]
if "LD_LIBRARY_PATH" in os.environ:
diff --git a/py/torch_tensorrt/fx/README.md b/py/torch_tensorrt/fx/README.md
index 06b86c0a98..0670918129 100644
--- a/py/torch_tensorrt/fx/README.md
+++ b/py/torch_tensorrt/fx/README.md
@@ -9,7 +9,7 @@ FX2TRT is merged as FX module in Torch-TensorRT
$ conda create --name python_env python=3.8
$ conda activate python_env
# Recommend to install PyTorch 2.0 and later
- $ conda install pytorch torchvision torchtext cudatoolkit=11.8 -c pytorch-nightly
+ $ conda install pytorch torchvision torchtext cudatoolkit=12.1 -c pytorch-nightly
# Install TensorRT python package
$ pip3 install nvidia-pyindex
$ pip3 install tensorrt==8.6.1
diff --git a/py/versions.py b/py/versions.py
index 54817acfcd..0decc34b28 100644
--- a/py/versions.py
+++ b/py/versions.py
@@ -1,4 +1,4 @@
__version__ = "1.5.0.dev0"
-__cuda_version__ = "11.8"
-__cudnn_version__ = "8.8"
+__cuda_version__ = "12.1"
+__cudnn_version__ = "8.9"
__tensorrt_version__ = "8.6"
diff --git a/tests/modules/requirements.txt b/tests/modules/requirements.txt
index aba4eb0327..51fcf95500 100644
--- a/tests/modules/requirements.txt
+++ b/tests/modules/requirements.txt
@@ -1,3 +1,3 @@
-timm==v0.4.12
+timm==v0.9.2
transformers==4.30.0
torchvision
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64 b/toolchains/ci_workspaces/WORKSPACE.x86_64
index 5103fd2f75..ff95a22b2f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64
@@ -41,7 +41,7 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-11.8/",
+ path = "/usr/local/cuda-12.1/",
)
new_local_repository(
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
index 23443f18a7..94f91e835f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
@@ -41,7 +41,7 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-11.8",
+ path = "/usr/local/cuda-12.1",
)
new_local_repository(
@@ -56,17 +56,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "999becce82b73e566d0ffe010cd21fea8cf3a33f90f09dcc6b01150b820ae063",
+ sha256 = "1c3712b3b1de34e9989549f53675b557f6f0ca0b800ccbbc80c941af68abcc65",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230605%2Bcu118.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "786cc728c63ea69c40bd8fb535cf8e5e1dfff1d43eaad3eb5256b9ed89c1b268",
+ sha256 = "6bbb53f2f9533804175a0d2eeae5093ee1907158a9ec75c7fb9d10e2103d5df5",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230605%2Bcu118.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
)
####################################################################################
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
index 23443f18a7..94f91e835f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
@@ -41,7 +41,7 @@ local_repository(
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-11.8",
+ path = "/usr/local/cuda-12.1",
)
new_local_repository(
@@ -56,17 +56,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "999becce82b73e566d0ffe010cd21fea8cf3a33f90f09dcc6b01150b820ae063",
+ sha256 = "1c3712b3b1de34e9989549f53675b557f6f0ca0b800ccbbc80c941af68abcc65",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230605%2Bcu118.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "786cc728c63ea69c40bd8fb535cf8e5e1dfff1d43eaad3eb5256b9ed89c1b268",
+ sha256 = "6bbb53f2f9533804175a0d2eeae5093ee1907158a9ec75c7fb9d10e2103d5df5",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu118/libtorch-shared-with-deps-2.1.0.dev20230605%2Bcu118.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
)
####################################################################################
From 27651e84d001ed00e2e1333395f738e4621e0bc2 Mon Sep 17 00:00:00 2001
From: gs-olive <113141689+gs-olive@users.noreply.github.com>
Date: Wed, 14 Jun 2023 22:06:25 -0700
Subject: [PATCH 2/6] Downgrade to CuDNN 8.9.0
---
.circleci/config.yml | 21 ++++++++++++---------
README.md | 2 +-
WORKSPACE | 6 +++---
py/requirements.txt | 3 ++-
4 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 50ab360b1c..b42c7fea81 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -94,7 +94,7 @@ commands:
- << parameters.install-dir >>
install-cudnn:
- description: "Install CUDNN 8.9.1"
+ description: "Install CUDNN 8.9.0"
parameters:
os:
type: string
@@ -104,7 +104,7 @@ commands:
default: "x86_64"
cudnn-version:
type: string
- default: "8.9.1.23"
+ default: "8.9.0.131"
cuda-version:
type: string
default: "cuda12.1"
@@ -183,7 +183,7 @@ commands:
default: "cuda12.0"
cudnn-version:
type: string
- default: "8.9.1.23"
+ default: "8.9.0.131"
trt-version-short:
type: string
default: "8.6.1"
@@ -235,7 +235,7 @@ commands:
default: "8.6.1"
cudnn-version-long:
type: string
- default: "8.9.1.23"
+ default: "8.9.0.131"
steps:
- run:
name: Set up python environment
@@ -252,11 +252,14 @@ commands:
parameters:
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121"
+ default: "2.1.0.dev20230613+cu121.with.pypi.cudnn.with.pypi.cudnn"
torchvision-build:
type: string
default: "0.16.0.dev20230613+cu121"
torch-build-index:
+ type: string
+ default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
+ torchvision-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu121"
steps:
@@ -264,7 +267,7 @@ commands:
name: Install Torch
command: |
pip3 install --upgrade pip
- pip3 install torch==<< parameters.torch-build >> torchvision==<< parameters.torchvision-build >> --extra-index-url << parameters.torch-build-index >>
+ pip3 install torch==<< parameters.torch-build >> torchvision==<< parameters.torchvision-build >> --extra-index-url << parameters.torch-build-index >> --extra-index-url << parameters.torchvision-build-index >>
build-py:
description: "Build the torch-tensorrt python release (pre-cxx11-abi)"
@@ -1335,13 +1338,13 @@ parameters:
# Nightly platform config
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121"
+ default: "2.1.0.dev20230613+cu121.with.pypi.cudnn"
torchvision-build:
type: string
default: "0.16.0.dev20230613+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121"
+ default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
torch-build-legacy:
type: string
default: "1.13.1+cu117"
@@ -1356,7 +1359,7 @@ parameters:
default: true
cudnn-version:
type: string
- default: "8.9.1.23"
+ default: "8.9.0.131"
trt-version-short:
type: string
default: "8.6.1"
diff --git a/README.md b/README.md
index 715660d173..abfb97712d 100644
--- a/README.md
+++ b/README.md
@@ -118,7 +118,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
- Bazel 5.2.0
- Libtorch 2.1.0.dev20230613 (built with CUDA 12.1)
- CUDA 12.1
-- cuDNN 8.9.1
+- cuDNN 8.9.0
- TensorRT 8.6.1
## Prebuilt Binaries and Wheel files
diff --git a/WORKSPACE b/WORKSPACE
index dce977fffc..6de4909833 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -71,10 +71,10 @@ http_archive(
http_archive(
name = "cudnn",
build_file = "@//third_party/cudnn/archive:BUILD",
- sha256 = "35163c5c542be0c511738b27e25235193cbeedc5e0e006e44b1cdeaf1922e83e",
- strip_prefix = "cudnn-linux-x86_64-8.9.1.23_cuda12-archive",
+ sha256 = "477631002be61022b60961cba0a501271507a93f81d6b08384bc320cb8706c98",
+ strip_prefix = "cudnn-linux-x86_64-8.9.0.131_cuda12-archive",
urls = [
- "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.1/local_installers/12.x/cudnn-linux-x86_64-8.9.1.23_cuda12-archive.tar.xz",
+ "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.0/local_installers/12.x/cudnn-linux-x86_64-8.9.0.131_cuda12-archive.tar.xz",
],
)
diff --git a/py/requirements.txt b/py/requirements.txt
index 55e732331c..c1a473ad85 100644
--- a/py/requirements.txt
+++ b/py/requirements.txt
@@ -2,7 +2,8 @@ numpy
packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu121
-torch==2.1.0.dev20230613+cu121
+--extra-index-url https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn
+torch==2.1.0.dev20230613+cu121.with.pypi.cudnn
torchvision==0.16.0.dev20230613+cu121
--extra-index-url https://pypi.ngc.nvidia.com
tensorrt==8.6.1
From 9d33ee70d75de61f54675944c3495aa9b58ec173 Mon Sep 17 00:00:00 2001
From: gs-olive <113141689+gs-olive@users.noreply.github.com>
Date: Wed, 21 Jun 2023 20:50:23 -0700
Subject: [PATCH 3/6] Revert "Downgrade to CuDNN 8.9.0"
This reverts commit 5e5a6d9f9c015f99e25a2751b2d811b3733b1e56.
---
.circleci/config.yml | 12 ++++++------
README.md | 2 +-
WORKSPACE | 6 +++---
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index b42c7fea81..c27bb59379 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -94,7 +94,7 @@ commands:
- << parameters.install-dir >>
install-cudnn:
- description: "Install CUDNN 8.9.0"
+ description: "Install CUDNN 8.9.1"
parameters:
os:
type: string
@@ -104,7 +104,7 @@ commands:
default: "x86_64"
cudnn-version:
type: string
- default: "8.9.0.131"
+ default: "8.9.1.23"
cuda-version:
type: string
default: "cuda12.1"
@@ -183,7 +183,7 @@ commands:
default: "cuda12.0"
cudnn-version:
type: string
- default: "8.9.0.131"
+ default: "8.9.1.23"
trt-version-short:
type: string
default: "8.6.1"
@@ -235,7 +235,7 @@ commands:
default: "8.6.1"
cudnn-version-long:
type: string
- default: "8.9.0.131"
+ default: "8.9.1.23"
steps:
- run:
name: Set up python environment
@@ -252,7 +252,7 @@ commands:
parameters:
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121.with.pypi.cudnn.with.pypi.cudnn"
+ default: "2.1.0.dev20230613+cu121.with.pypi.cudnn"
torchvision-build:
type: string
default: "0.16.0.dev20230613+cu121"
@@ -1359,7 +1359,7 @@ parameters:
default: true
cudnn-version:
type: string
- default: "8.9.0.131"
+ default: "8.9.1.23"
trt-version-short:
type: string
default: "8.6.1"
diff --git a/README.md b/README.md
index abfb97712d..715660d173 100644
--- a/README.md
+++ b/README.md
@@ -118,7 +118,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
- Bazel 5.2.0
- Libtorch 2.1.0.dev20230613 (built with CUDA 12.1)
- CUDA 12.1
-- cuDNN 8.9.0
+- cuDNN 8.9.1
- TensorRT 8.6.1
## Prebuilt Binaries and Wheel files
diff --git a/WORKSPACE b/WORKSPACE
index 6de4909833..dce977fffc 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -71,10 +71,10 @@ http_archive(
http_archive(
name = "cudnn",
build_file = "@//third_party/cudnn/archive:BUILD",
- sha256 = "477631002be61022b60961cba0a501271507a93f81d6b08384bc320cb8706c98",
- strip_prefix = "cudnn-linux-x86_64-8.9.0.131_cuda12-archive",
+ sha256 = "35163c5c542be0c511738b27e25235193cbeedc5e0e006e44b1cdeaf1922e83e",
+ strip_prefix = "cudnn-linux-x86_64-8.9.1.23_cuda12-archive",
urls = [
- "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.0/local_installers/12.x/cudnn-linux-x86_64-8.9.0.131_cuda12-archive.tar.xz",
+ "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.1/local_installers/12.x/cudnn-linux-x86_64-8.9.1.23_cuda12-archive.tar.xz",
],
)
From 0e5e62e8df83f115a64aa66b911438779f33fb00 Mon Sep 17 00:00:00 2001
From: gs-olive <113141689+gs-olive@users.noreply.github.com>
Date: Mon, 26 Jun 2023 17:31:29 -0700
Subject: [PATCH 4/6] with_pypi_cudnn_experiment
---
.circleci/config.yml | 8 ++++----
py/requirements.txt | 4 ++--
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index c27bb59379..a7f092b865 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -252,13 +252,13 @@ commands:
parameters:
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121.with.pypi.cudnn"
+ default: "2.1.0.dev20230613+cu121"
torchvision-build:
type: string
default: "0.16.0.dev20230613+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
+ default: "https://download.pytorch.org/whl/nightly/cu121"
torchvision-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu121"
@@ -1338,13 +1338,13 @@ parameters:
# Nightly platform config
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121.with.pypi.cudnn"
+ default: "2.1.0.dev20230613+cu121"
torchvision-build:
type: string
default: "0.16.0.dev20230613+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
+ default: "https://download.pytorch.org/whl/nightly/cu121"
torch-build-legacy:
type: string
default: "1.13.1+cu117"
diff --git a/py/requirements.txt b/py/requirements.txt
index c1a473ad85..471682371b 100644
--- a/py/requirements.txt
+++ b/py/requirements.txt
@@ -2,8 +2,8 @@ numpy
packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu121
---extra-index-url https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn
-torch==2.1.0.dev20230613+cu121.with.pypi.cudnn
+#--extra-index-url https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn
+torch==2.1.0.dev20230613+cu121#.with.pypi.cudnn
torchvision==0.16.0.dev20230613+cu121
--extra-index-url https://pypi.ngc.nvidia.com
tensorrt==8.6.1
From 16f5d0d47fe4ae8f5de18dd19007f7f142408a6b Mon Sep 17 00:00:00 2001
From: gs-olive <113141689+gs-olive@users.noreply.github.com>
Date: Mon, 26 Jun 2023 20:10:14 -0700
Subject: [PATCH 5/6] Revert "with_pypi_cudnn_experiment"
This reverts commit c9145a6de88830dc3ff5285dc7e546e9ee5937f8.
- Also upgrades to Jun 19 nightly and changes CI WORKSPACE
---
.circleci/config.yml | 12 ++++++------
README.md | 2 +-
WORKSPACE | 8 ++++----
py/requirements.txt | 5 ++---
.../dynamo/backend/lowering/_decompositions.py | 1 -
toolchains/ci_workspaces/WORKSPACE.x86_64 | 16 ++++++++++------
.../ci_workspaces/WORKSPACE.x86_64.release.rhel | 8 ++++----
.../WORKSPACE.x86_64.release.ubuntu | 8 ++++----
8 files changed, 31 insertions(+), 29 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index a7f092b865..24e39f1336 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -252,13 +252,13 @@ commands:
parameters:
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121"
+ default: "2.1.0.dev20230619+cu121.with.pypi.cudnn"
torchvision-build:
type: string
- default: "0.16.0.dev20230613+cu121"
+ default: "0.16.0.dev20230619+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121"
+ default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
torchvision-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu121"
@@ -1338,13 +1338,13 @@ parameters:
# Nightly platform config
torch-build:
type: string
- default: "2.1.0.dev20230613+cu121"
+ default: "2.1.0.dev20230619+cu121.with.pypi.cudnn"
torchvision-build:
type: string
- default: "0.16.0.dev20230613+cu121"
+ default: "0.16.0.dev20230619+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121"
+ default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
torch-build-legacy:
type: string
default: "1.13.1+cu117"
diff --git a/README.md b/README.md
index 715660d173..f7172e8e2e 100644
--- a/README.md
+++ b/README.md
@@ -116,7 +116,7 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedd
These are the following dependencies used to verify the testcases. Torch-TensorRT can work with other versions, but the tests are not guaranteed to pass.
- Bazel 5.2.0
-- Libtorch 2.1.0.dev20230613 (built with CUDA 12.1)
+- Libtorch 2.1.0.dev20230619 (built with CUDA 12.1)
- CUDA 12.1
- cuDNN 8.9.1
- TensorRT 8.6.1
diff --git a/WORKSPACE b/WORKSPACE
index dce977fffc..54344ba74a 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -51,17 +51,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "1c3712b3b1de34e9989549f53675b557f6f0ca0b800ccbbc80c941af68abcc65",
+ sha256 = "5ba55259b65e071346a2b547b8d1378595f1467a39aaa923fecb09f134f1bcba",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "6bbb53f2f9533804175a0d2eeae5093ee1907158a9ec75c7fb9d10e2103d5df5",
+ sha256 = "8f6661bfc11597e77400e9e36cc8dd8e5e385ba82361d630d9ccede8518d7c7e",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
# Download these tarballs manually from the NVIDIA website
diff --git a/py/requirements.txt b/py/requirements.txt
index 471682371b..013b2eeb44 100644
--- a/py/requirements.txt
+++ b/py/requirements.txt
@@ -2,8 +2,7 @@ numpy
packaging
pybind11==2.6.2
--extra-index-url https://download.pytorch.org/whl/nightly/cu121
-#--extra-index-url https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn
-torch==2.1.0.dev20230613+cu121#.with.pypi.cudnn
-torchvision==0.16.0.dev20230613+cu121
+torch==2.1.0.dev20230619+cu121
+torchvision==0.16.0.dev20230619+cu121
--extra-index-url https://pypi.ngc.nvidia.com
tensorrt==8.6.1
diff --git a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
index 1ccc010e3a..03e96fe312 100644
--- a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
+++ b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
@@ -34,7 +34,6 @@ def inplace_op(*args, **kwargs):
replace_inplace_op(aten.index_reduce_, aten.index_reduce)
replace_inplace_op(aten.logit_, aten.logit)
replace_inplace_op(aten.relu_, aten.relu)
-replace_inplace_op(aten.renorm_, aten.renorm)
replace_inplace_op(aten.round_, aten.round)
replace_inplace_op(aten.scatter_, aten.scatter)
replace_inplace_op(aten.scatter_add_, aten.scatter_add)
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64 b/toolchains/ci_workspaces/WORKSPACE.x86_64
index ff95a22b2f..2587c1fa71 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64
@@ -61,16 +61,20 @@ new_local_repository(
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
# for both versions here and do not use --config=pre-cxx11-abi
-new_local_repository(
+http_archive(
name = "libtorch",
- path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
- build_file = "third_party/libtorch/BUILD"
+ build_file = "@//third_party/libtorch:BUILD",
+ sha256 = "5ba55259b65e071346a2b547b8d1378595f1467a39aaa923fecb09f134f1bcba",
+ strip_prefix = "libtorch",
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
-new_local_repository(
+http_archive(
name = "libtorch_pre_cxx11_abi",
- path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
- build_file = "third_party/libtorch/BUILD"
+ build_file = "@//third_party/libtorch:BUILD",
+ sha256 = "8f6661bfc11597e77400e9e36cc8dd8e5e385ba82361d630d9ccede8518d7c7e",
+ strip_prefix = "libtorch",
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
new_local_repository(
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
index 94f91e835f..66e552dc9f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.rhel
@@ -56,17 +56,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "1c3712b3b1de34e9989549f53675b557f6f0ca0b800ccbbc80c941af68abcc65",
+ sha256 = "5ba55259b65e071346a2b547b8d1378595f1467a39aaa923fecb09f134f1bcba",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "6bbb53f2f9533804175a0d2eeae5093ee1907158a9ec75c7fb9d10e2103d5df5",
+ sha256 = "8f6661bfc11597e77400e9e36cc8dd8e5e385ba82361d630d9ccede8518d7c7e",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
####################################################################################
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
index 94f91e835f..66e552dc9f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64.release.ubuntu
@@ -56,17 +56,17 @@ new_local_repository(
http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "1c3712b3b1de34e9989549f53675b557f6f0ca0b800ccbbc80c941af68abcc65",
+ sha256 = "5ba55259b65e071346a2b547b8d1378595f1467a39aaa923fecb09f134f1bcba",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
http_archive(
name = "libtorch_pre_cxx11_abi",
build_file = "@//third_party/libtorch:BUILD",
- sha256 = "6bbb53f2f9533804175a0d2eeae5093ee1907158a9ec75c7fb9d10e2103d5df5",
+ sha256 = "8f6661bfc11597e77400e9e36cc8dd8e5e385ba82361d630d9ccede8518d7c7e",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230613%2Bcu121.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
)
####################################################################################
From e6cc509164cf3ffbc177c2bcc662df96e7ed6fc9 Mon Sep 17 00:00:00 2001
From: gs-olive <113141689+gs-olive@users.noreply.github.com>
Date: Wed, 28 Jun 2023 12:29:32 -0700
Subject: [PATCH 6/6] Roll back to cuDNN 8.8
---
.circleci/config.yml | 20 ++++++++++----------
README.md | 2 +-
WORKSPACE | 6 +++---
docker/README.md | 4 ++--
py/versions.py | 2 +-
toolchains/ci_workspaces/WORKSPACE.x86_64 | 16 ++++++----------
6 files changed, 23 insertions(+), 27 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 24e39f1336..0332d25e4e 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -94,7 +94,7 @@ commands:
- << parameters.install-dir >>
install-cudnn:
- description: "Install CUDNN 8.9.1"
+ description: "Install CUDNN 8.8.1"
parameters:
os:
type: string
@@ -104,10 +104,10 @@ commands:
default: "x86_64"
cudnn-version:
type: string
- default: "8.9.1.23"
+ default: "8.8.1.3"
cuda-version:
type: string
- default: "cuda12.1"
+ default: "cuda12.0"
steps:
- run:
name: Install CUDNN
@@ -183,7 +183,7 @@ commands:
default: "cuda12.0"
cudnn-version:
type: string
- default: "8.9.1.23"
+ default: "8.8.1.3"
trt-version-short:
type: string
default: "8.6.1"
@@ -235,7 +235,7 @@ commands:
default: "8.6.1"
cudnn-version-long:
type: string
- default: "8.9.1.23"
+ default: "8.8.1.3"
steps:
- run:
name: Set up python environment
@@ -252,13 +252,13 @@ commands:
parameters:
torch-build:
type: string
- default: "2.1.0.dev20230619+cu121.with.pypi.cudnn"
+ default: "2.1.0.dev20230619+cu121"
torchvision-build:
type: string
default: "0.16.0.dev20230619+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
+ default: "https://download.pytorch.org/whl/nightly/cu121"
torchvision-build-index:
type: string
default: "https://download.pytorch.org/whl/nightly/cu121"
@@ -1338,13 +1338,13 @@ parameters:
# Nightly platform config
torch-build:
type: string
- default: "2.1.0.dev20230619+cu121.with.pypi.cudnn"
+ default: "2.1.0.dev20230619+cu121"
torchvision-build:
type: string
default: "0.16.0.dev20230619+cu121"
torch-build-index:
type: string
- default: "https://download.pytorch.org/whl/nightly/cu121_pypi_cudnn"
+ default: "https://download.pytorch.org/whl/nightly/cu121"
torch-build-legacy:
type: string
default: "1.13.1+cu117"
@@ -1359,7 +1359,7 @@ parameters:
default: true
cudnn-version:
type: string
- default: "8.9.1.23"
+ default: "8.8.1.3"
trt-version-short:
type: string
default: "8.6.1"
diff --git a/README.md b/README.md
index f7172e8e2e..c43fd0d08e 100644
--- a/README.md
+++ b/README.md
@@ -118,7 +118,7 @@ These are the following dependencies used to verify the testcases. Torch-TensorR
- Bazel 5.2.0
- Libtorch 2.1.0.dev20230619 (built with CUDA 12.1)
- CUDA 12.1
-- cuDNN 8.9.1
+- cuDNN 8.8.1
- TensorRT 8.6.1
## Prebuilt Binaries and Wheel files
diff --git a/WORKSPACE b/WORKSPACE
index 54344ba74a..8bc298f72a 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -71,10 +71,10 @@ http_archive(
http_archive(
name = "cudnn",
build_file = "@//third_party/cudnn/archive:BUILD",
- sha256 = "35163c5c542be0c511738b27e25235193cbeedc5e0e006e44b1cdeaf1922e83e",
- strip_prefix = "cudnn-linux-x86_64-8.9.1.23_cuda12-archive",
+ sha256 = "79d77a769c7e7175abc7b5c2ed5c494148c0618a864138722c887f95c623777c",
+ strip_prefix = "cudnn-linux-x86_64-8.8.1.3_cuda12-archive",
urls = [
- "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.9.1/local_installers/12.x/cudnn-linux-x86_64-8.9.1.23_cuda12-archive.tar.xz",
+ "https://developer.nvidia.com/downloads/compute/cudnn/secure/8.8.1/local_installers/12.0/cudnn-linux-x86_64-8.8.1.3_cuda12-archive.tar.xz",
],
)
diff --git a/docker/README.md b/docker/README.md
index 9f83f25134..527b7ae2b2 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch
### Instructions
-- The example below uses CUDNN 8.9 and TensorRT 8.6
+- The example below uses CUDNN 8.8 and TensorRT 8.6
- See dependencies for a list of current default dependencies.
> From root of Torch-TensorRT repo
Build:
```
-DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.9 -f docker/Dockerfile -t torch_tensorrt:latest .
+DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=8.6 --build-arg CUDNN_VERSION=8.8 -f docker/Dockerfile -t torch_tensorrt:latest .
```
Run:
diff --git a/py/versions.py b/py/versions.py
index 0decc34b28..14f59ce4a0 100644
--- a/py/versions.py
+++ b/py/versions.py
@@ -1,4 +1,4 @@
__version__ = "1.5.0.dev0"
__cuda_version__ = "12.1"
-__cudnn_version__ = "8.9"
+__cudnn_version__ = "8.8"
__tensorrt_version__ = "8.6"
diff --git a/toolchains/ci_workspaces/WORKSPACE.x86_64 b/toolchains/ci_workspaces/WORKSPACE.x86_64
index 2587c1fa71..ff95a22b2f 100644
--- a/toolchains/ci_workspaces/WORKSPACE.x86_64
+++ b/toolchains/ci_workspaces/WORKSPACE.x86_64
@@ -61,20 +61,16 @@ new_local_repository(
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
# for both versions here and do not use --config=pre-cxx11-abi
-http_archive(
+new_local_repository(
name = "libtorch",
- build_file = "@//third_party/libtorch:BUILD",
- sha256 = "5ba55259b65e071346a2b547b8d1378595f1467a39aaa923fecb09f134f1bcba",
- strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
+ path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
+ build_file = "third_party/libtorch/BUILD"
)
-http_archive(
+new_local_repository(
name = "libtorch_pre_cxx11_abi",
- build_file = "@//third_party/libtorch:BUILD",
- sha256 = "8f6661bfc11597e77400e9e36cc8dd8e5e385ba82361d630d9ccede8518d7c7e",
- strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu121/libtorch-shared-with-deps-2.1.0.dev20230619%2Bcu121.zip"],
+ path = "/opt/circleci/.pyenv/versions/3.9.4/lib/python3.9/site-packages/torch",
+ build_file = "third_party/libtorch/BUILD"
)
new_local_repository(