From aaa813ed4cba070e61023ec5f960cc016b5d3a4b Mon Sep 17 00:00:00 2001 From: Kibae Shin Date: Sun, 24 Nov 2024 19:06:05 +0900 Subject: [PATCH 1/2] ci: 1.20.1 release --- README.md | 20 +++++++++++++------- deploy/build-docker/README.md | 2 +- deploy/build-docker/VERSION | 2 +- deploy/build-docker/docker-compose.yaml | 4 ++-- docs/docker.md | 12 ++++++------ docs/swagger/openapi.yaml | 2 +- src/test/test_lib_version.cpp | 2 +- 7 files changed, 25 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index f22b71a..9c93510 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ONNX Runtime Server -[![ONNX Runtime](https://img.shields.io/github/v/release/microsoft/onnxruntime?filter=v1.20.0&label=ONNX%20Runtime)](https://github.com/microsoft/onnxruntime) +[![ONNX Runtime](https://img.shields.io/github/v/release/microsoft/onnxruntime?filter=v1.20.1&label=ONNX%20Runtime)](https://github.com/microsoft/onnxruntime) [![CMake on Linux](https://github.com/kibae/onnxruntime-server/actions/workflows/cmake-linux.yml/badge.svg)](https://github.com/kibae/onnxruntime-server/actions/workflows/cmake-linux.yml) [![CMake on MacOS](https://github.com/kibae/onnxruntime-server/actions/workflows/cmake-macos.yml/badge.svg)](https://github.com/kibae/onnxruntime-server/actions/workflows/cmake-macos.yml) [![License](https://img.shields.io/github/license/kibae/onnxruntime-server)](https://github.com/kibae/onnxruntime-server/blob/main/LICENSE) @@ -68,9 +68,15 @@ brew install onnxruntime #### Ubuntu/Debian ```shell -sudo apt install cmake pkg-config libboost-all-dev libssl-dev -# optional, for Nvidia GPU support -sudo apt install nvidia-cuda-toolkit nvidia-cudnn +sudo apt install cmake pkg-config libboost-all-dev libssl-dev +``` + +##### (optional) CUDA support (CUDA 12.x, cuDNN 9.x) +- Follow the instructions below to install the CUDA Toolkit and cuDNN. + - [CUDA Toolkit Installation Guide](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) + - [CUDA Download for Ubuntu](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04&target_type=deb_network) +```shell +sudo apt install cuda-toolkit-12 libcudnn9-dev-cuda-12 # optional, for Nvidia GPU support with Docker sudo apt install nvidia-container-toolkit ``` @@ -158,11 +164,11 @@ sudo cmake --install build --prefix /usr/local/onnxruntime-server # Docker - Docker hub: [kibaes/onnxruntime-server](https://hub.docker.com/r/kibaes/onnxruntime-server) - - [`1.20.0-linux-cuda12`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cuda12.dockerfile) amd64(CUDA 12.x, cuDNN 9.x) - - [`1.20.0-linux-cpu`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cpu.dockerfile) amd64, arm64 + - [`1.20.1-linux-cuda12`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cuda12.dockerfile) amd64(CUDA 12.x, cuDNN 9.x) + - [`1.20.1-linux-cpu`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cpu.dockerfile) amd64, arm64 ```shell -DOCKER_IMAGE=kibae/onnxruntime-server:1.20.0-linux-cuda12 # or kibae/onnxruntime-server:1.20.0-linux-cpu +DOCKER_IMAGE=kibae/onnxruntime-server:1.20.1-linux-cuda12 # or kibae/onnxruntime-server:1.20.1-linux-cpu docker pull ${DOCKER_IMAGE} diff --git a/deploy/build-docker/README.md b/deploy/build-docker/README.md index f161dca..17ac8d6 100644 --- a/deploy/build-docker/README.md +++ b/deploy/build-docker/README.md @@ -2,7 +2,7 @@ ## x64 with CUDA -- [ONNX Runtime Binary](https://github.com/microsoft/onnxruntime/releases) v1.20.0(latest) requires CUDA 11/12, cudnn 8/9. +- [ONNX Runtime Binary](https://github.com/microsoft/onnxruntime/releases) v1.20.1(latest) requires CUDA 11/12, cudnn 8/9. ``` $ ldd libonnxruntime_providers_cuda.so linux-vdso.so.1 (0x00007fffa4bf8000) diff --git a/deploy/build-docker/VERSION b/deploy/build-docker/VERSION index df41292..269ea57 100644 --- a/deploy/build-docker/VERSION +++ b/deploy/build-docker/VERSION @@ -1,2 +1,2 @@ -export VERSION=1.20.0 +export VERSION=1.20.1 export IMAGE_PREFIX=kibaes/onnxruntime-server diff --git a/deploy/build-docker/docker-compose.yaml b/deploy/build-docker/docker-compose.yaml index 12df14a..5d4a748 100644 --- a/deploy/build-docker/docker-compose.yaml +++ b/deploy/build-docker/docker-compose.yaml @@ -5,7 +5,7 @@ services: onnxruntime_server_simple: # After the docker container is up, you can use the REST API (http://localhost:8080). # API documentation will be available at http://localhost:8080/api-docs. - image: kibaes/onnxruntime-server:1.20.0-linux-cuda12 + image: kibaes/onnxruntime-server:1.20.1-linux-cuda12 ports: - "8080:80" # for http backend volumes: @@ -29,7 +29,7 @@ services: onnxruntime_server_advanced: # After the docker container is up, you can use the REST API (http://localhost, https://localhost). # API documentation will be available at http://localhost/api-docs. - image: kibaes/onnxruntime-server:1.20.0-linux-cuda12 + image: kibaes/onnxruntime-server:1.20.1-linux-cuda12 ports: - "80:80" # for http backend - "443:443" # for https backend diff --git a/docs/docker.md b/docs/docker.md index f983414..690f1d1 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -5,8 +5,8 @@ # Supported tags and respective Dockerfile links -- [`1.20.0-linux-cuda12`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cuda12.dockerfile) amd64(CUDA 12.x, cuDNN 9.x) -- [`1.20.0-linux-cpu`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cpu.dockerfile) amd64, arm64 +- [`1.20.1-linux-cuda12`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cuda12.dockerfile) amd64(CUDA 12.x, cuDNN 9.x) +- [`1.20.1-linux-cpu`](https://github.com/kibae/onnxruntime-server/blob/main/deploy/build-docker/linux-cpu.dockerfile) amd64, arm64 # How to use this image @@ -28,7 +28,7 @@ - API documentation will be available at http://localhost/api-docs. ```shell -DOCKER_IMAGE=kibae/onnxruntime-server:1.20.0-linux-cuda12 # or kibae/onnxruntime-server:1.20.0-linux-cpu +DOCKER_IMAGE=kibae/onnxruntime-server:1.20.1-linux-cuda12 # or kibae/onnxruntime-server:1.20.1-linux-cpu docker pull ${DOCKER_IMAGE} @@ -69,7 +69,7 @@ services: onnxruntime_server_simple: # After the docker container is up, you can use the REST API (http://localhost:8080). # API documentation will be available at http://localhost:8080/api-docs. - image: kibaes/onnxruntime-server:1.20.0-linux-cuda12 + image: kibaes/onnxruntime-server:1.20.1-linux-cuda12 ports: - "8080:80" # for http backend volumes: @@ -100,8 +100,8 @@ services: onnxruntime_server_advanced: # After the docker container is up, you can use the REST API (http://localhost, https://localhost). - # API documentation will be available at http://localhost/api-docs. - image: kibaes/onnxruntime-server:1.20.0-linux-cuda12 + # API documentation wl be available at http://localhost/api-docs. + image: kibaes/onnxruntime-server:1.20.1-linux-cuda12 ports: - "80:80" # for http backend - "443:443" # for https backend diff --git a/docs/swagger/openapi.yaml b/docs/swagger/openapi.yaml index 4e952ba..19e2bca 100644 --- a/docs/swagger/openapi.yaml +++ b/docs/swagger/openapi.yaml @@ -2,7 +2,7 @@ openapi: 3.0.3 info: title: ONNX Runtime Server description: |- - version: 1.20.0 + version: 1.20.1 externalDocs: description: ONNX Runtime Server url: https://github.com/kibae/onnxruntime-server diff --git a/src/test/test_lib_version.cpp b/src/test/test_lib_version.cpp index aa8a649..2f8b4a1 100644 --- a/src/test/test_lib_version.cpp +++ b/src/test/test_lib_version.cpp @@ -6,5 +6,5 @@ #include "./test_common.hpp" TEST(test_lib_version, LibVersion) { - EXPECT_EQ(onnxruntime_server::onnx::version(), "1.20.0"); + EXPECT_EQ(onnxruntime_server::onnx::version(), "1.20.1"); } From 670e1d681e30684215cd507d49e6b734353860b8 Mon Sep 17 00:00:00 2001 From: Kibae Shin Date: Sun, 24 Nov 2024 20:01:01 +0900 Subject: [PATCH 2/2] ci: 1.20.1 release --- deploy/build-docker/linux-cpu.dockerfile | 2 +- deploy/build-docker/linux-cuda12.dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/build-docker/linux-cpu.dockerfile b/deploy/build-docker/linux-cpu.dockerfile index 1da0ff7..eee8778 100644 --- a/deploy/build-docker/linux-cpu.dockerfile +++ b/deploy/build-docker/linux-cpu.dockerfile @@ -17,7 +17,7 @@ RUN case ${TARGETPLATFORM} in \ esac RUN cmake -DBoost_USE_STATIC_LIBS=ON -DOPENSSL_USE_STATIC_LIBS=ON -B build -S . -DCMAKE_BUILD_TYPE=Release -RUN cmake --build build --parallel 4 --target onnxruntime_server_standalone +RUN cmake --build build --parallel 8 --target onnxruntime_server_standalone RUN cmake --install build --prefix /app/onnxruntime-server # target diff --git a/deploy/build-docker/linux-cuda12.dockerfile b/deploy/build-docker/linux-cuda12.dockerfile index c78a89f..a795819 100644 --- a/deploy/build-docker/linux-cuda12.dockerfile +++ b/deploy/build-docker/linux-cuda12.dockerfile @@ -16,7 +16,7 @@ RUN case ${TARGETPLATFORM} in \ esac RUN cmake -DCUDA_SDK_ROOT_DIR=/usr/local/cuda-12 -DBoost_USE_STATIC_LIBS=ON -DOPENSSL_USE_STATIC_LIBS=ON -B build -S . -DCMAKE_BUILD_TYPE=Release -RUN cmake --build build --parallel 4 --target onnxruntime_server_standalone +RUN cmake --build build --parallel 8 --target onnxruntime_server_standalone RUN cmake --install build --prefix /app/onnxruntime-server # target