diff --git a/docker/CPU/Dockerfile b/docker/CPU/Dockerfile new file mode 100644 index 0000000000..43de6b1fcd --- /dev/null +++ b/docker/CPU/Dockerfile @@ -0,0 +1,105 @@ +FROM openvino/ubuntu18_dev:2021.4.2 +ARG PYTHON_VERSION=3.7 +ARG TORCH_VERSION=1.8.0 +ARG TORCHVISION_VERSION=0.9.0 +ARG ONNXRUNTIME_VERSION=1.8.1 +ARG MMCV_VERSION=1.4.0 +ARG CMAKE_VERSION=3.20.0 +USER root +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + libopencv-dev libspdlog-dev \ + gnupg \ + libssl-dev \ + libprotobuf-dev protobuf-compiler \ + build-essential \ + libjpeg-dev \ + libpng-dev \ + ccache \ + cmake \ + gcc \ + g++ \ + git \ + vim \ + wget \ + curl \ + && rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ + /opt/conda/bin/conda clean -ya + +### pytorch +RUN /opt/conda/bin/pip install torch==${TORCH_VERSION}+cpu torchvision==${TORCHVISION_VERSION}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html +ENV PATH /opt/conda/bin:$PATH + +### install open-mim +RUN /opt/conda/bin/pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cpu/torch${TORCH_VERSION}/index.html + +WORKDIR /root/workspace + +### get onnxruntime +RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ + && tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz + +ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} + +### update cmake to 20 +RUN wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz &&\ + tar -zxvf cmake-${CMAKE_VERSION}.tar.gz &&\ + cd cmake-${CMAKE_VERSION} &&\ + ./bootstrap &&\ + make &&\ + make install + +### install onnxruntme and openvino +RUN /opt/conda/bin/pip install onnxruntime==${ONNXRUNTIME_VERSION} openvino-dev + +### build ncnn +RUN git clone https://github.com/Tencent/ncnn.git &&\ + cd ncnn &&\ + export NCNN_DIR=$(pwd) &&\ + git submodule update --init &&\ + mkdir -p build && cd build &&\ + cmake -DNCNN_VULKAN=OFF -DNCNN_SYSTEM_GLSLANG=ON -DNCNN_BUILD_EXAMPLES=ON -DNCNN_PYTHON=ON -DNCNN_BUILD_TOOLS=ON -DNCNN_BUILD_BENCHMARK=ON -DNCNN_BUILD_TESTS=ON .. &&\ + make install &&\ + cd /root/workspace/ncnn/python &&\ + pip install -e . + +### install mmdeploy +WORKDIR /root/workspace +ARG VERSION +RUN git clone https://github.com/open-mmlab/mmdeploy.git &&\ + cd mmdeploy &&\ + if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on master" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ + git submodule update --init --recursive &&\ + rm -rf build &&\ + mkdir build &&\ + cd build &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ncnn -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn .. &&\ + make -j$(nproc) &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ort .. &&\ + make -j$(nproc) &&\ + cd .. &&\ + pip install -e . + +### build SDK +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:/opt/intel/openvino/deployment_tools/ngraph/lib:/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64:${LD_LIBRARY_PATH}" +RUN cd mmdeploy && rm -rf build/CM* && mkdir -p build && cd build && cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON \ + -DCMAKE_CXX_COMPILER=g++-7 \ + -DONNXRUNTIME_DIR=${ONNXRUNTIME_DIR} \ + -Dncnn_DIR=/root/workspace/ncnn/build/install/lib/cmake/ncnn \ + -DInferenceEngine_DIR=/opt/intel/openvino/deployment_tools/inference_engine/share \ + -DMMDEPLOY_TARGET_DEVICES=cpu \ + -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_TARGET_BACKENDS="ort;ncnn;openvino" \ + -DMMDEPLOY_CODEBASES=all &&\ + cmake --build . -- -j$(nproc) && cmake --install . &&\ + cd install/example && mkdir -p build && cd build &&\ + cmake -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy .. &&\ + cmake --build . && export SPDLOG_LEVEL=warn &&\ + if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for CPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for CPU devices successfully!" ; fi diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile new file mode 100644 index 0000000000..60d9c75ea1 --- /dev/null +++ b/docker/GPU/Dockerfile @@ -0,0 +1,90 @@ +FROM nvcr.io/nvidia/tensorrt:21.04-py3 + +ARG CUDA=10.2 +ARG PYTHON_VERSION=3.8 +ARG TORCH_VERSION=1.8.0 +ARG TORCHVISION_VERSION=0.9.0 +ARG ONNXRUNTIME_VERSION=1.8.1 +ARG MMCV_VERSION=1.4.0 +ARG CMAKE_VERSION=3.20.0 +ENV FORCE_CUDA="1" + +ENV DEBIAN_FRONTEND=noninteractive + +### update apt and install libs +RUN apt-get update &&\ + apt-get install -y vim libsm6 libxext6 libxrender-dev libgl1-mesa-glx git wget libssl-dev libopencv-dev libspdlog-dev --no-install-recommends &&\ + rm -rf /var/lib/apt/lists/* + +RUN curl -fsSL -v -o ~/miniconda.sh -O https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ + chmod +x ~/miniconda.sh && \ + ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} conda-build pyyaml numpy ipython cython typing typing_extensions mkl mkl-include ninja && \ + /opt/conda/bin/conda clean -ya + +### pytorch +RUN /opt/conda/bin/conda install pytorch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} cudatoolkit=${CUDA} -c pytorch +ENV PATH /opt/conda/bin:$PATH + +### install mmcv-full +RUN /opt/conda/bin/pip install mmcv-full==${MMCV_VERSION} -f https://download.openmmlab.com/mmcv/dist/cu${CUDA//./}/torch${TORCH_VERSION}/index.html + +WORKDIR /root/workspace +### get onnxruntime +RUN wget https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \ + && tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz &&\ + pip install onnxruntime-gpu==${ONNXRUNTIME_VERSION} + +### cp trt from pip to conda +RUN cp -r /usr/local/lib/python${PYTHON_VERSION}/dist-packages/tensorrt* /opt/conda/lib/python${PYTHON_VERSION}/site-packages/ + +### update cmake +RUN wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz &&\ + tar -zxvf cmake-${CMAKE_VERSION}.tar.gz &&\ + cd cmake-${CMAKE_VERSION} &&\ + ./bootstrap &&\ + make &&\ + make install + +### install mmdeploy +ENV ONNXRUNTIME_DIR=/root/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION} +ENV TENSORRT_DIR=/workspace/tensorrt +ARG VERSION +RUN git clone https://github.com/open-mmlab/mmdeploy &&\ + cd mmdeploy &&\ + if [ -z ${VERSION} ] ; then echo "No MMDeploy version passed in, building on master" ; else git checkout tags/v${VERSION} -b tag_v${VERSION} ; fi &&\ + git submodule update --init --recursive &&\ + rm -rf build &&\ + mkdir build &&\ + cd build &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=ort .. &&\ + make -j$(nproc) &&\ + cmake -DMMDEPLOY_TARGET_BACKENDS=trt .. &&\ + make -j$(nproc) &&\ + cd .. &&\ + pip install -e . + +### build sdk +RUN git clone https://github.com/openppl-public/ppl.cv.git &&\ + cd ppl.cv &&\ + ./build.sh cuda +RUN cd /root/workspace/mmdeploy &&\ + rm -rf build/CM* &&\ + mkdir -p build && cd build &&\ + cmake .. \ + -DMMDEPLOY_BUILD_SDK=ON \ + -DCMAKE_CXX_COMPILER=g++ \ + -Dpplcv_DIR=/root/workspace/ppl.cv/cuda-build/install/lib/cmake/ppl \ + -DTENSORRT_DIR=${TENSORRT_DIR} \ + -DMMDEPLOY_BUILD_SDK_PYTHON_API=ON \ + -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ + -DMMDEPLOY_TARGET_BACKENDS="trt" \ + -DMMDEPLOY_CODEBASES=all &&\ + cmake --build . -- -j$(nproc) && cmake --install . &&\ + cd install/example && mkdir -p build && cd build &&\ + cmake -DMMDeploy_DIR=/root/workspace/mmdeploy/build/install/lib/cmake/MMDeploy .. &&\ + cmake --build . && export SPDLOG_LEVEL=warn &&\ + if [ -z ${VERSION} ] ; then echo "Built MMDeploy master for GPU devices successfully!" ; else echo "Built MMDeploy version v${VERSION} for GPU devices successfully!" ; fi + +ENV LD_LIBRARY_PATH="/root/workspace/mmdeploy/build/lib:${LD_LIBRARY_PATH}" diff --git a/docs/en/build.md b/docs/en/build.md index 9ed5364a51..7c7fff77dc 100644 --- a/docs/en/build.md +++ b/docs/en/build.md @@ -1,5 +1,8 @@ ## Build MMDeploy +We provide building methods for both physical and virtual machines. For virtual machine building methods, please refer to +[how to use docker](tutorials/how_to_use_docker.md). For physical machine, please follow the steps below. + ### Preparation - Download MMDeploy @@ -209,7 +212,7 @@ Each package's installation command is given based on Ubuntu 18.04. cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DCMAKE_CXX_COMPILER=g++-7 \ - -Dpplcv_DIR=/path/to/ppl.cv/install/lib/cmake/ppl \ + -Dpplcv_DIR=/path/to/ppl.cv/cuda-build/install/lib/cmake/ppl \ -DTENSORRT_DIR=/path/to/tensorrt \ -DCUDNN_DIR=/path/to/cudnn \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ diff --git a/docs/en/index.rst b/docs/en/index.rst index da96013832..bc7e10d91f 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -23,6 +23,8 @@ You can switch between Chinese and English documents in the lower-left corner of tutorials/how_to_support_new_backends.md tutorials/how_to_add_test_units_for_backend_ops.md tutorials/how_to_test_rewritten_models.md + tutorials/how_to_use_docker.md + tutorials/how_to_write_config.md tutorials/how_to_install_mmdeploy_on_jetsons.md .. toctree:: diff --git a/docs/en/tutorials/how_to_use_docker.md b/docs/en/tutorials/how_to_use_docker.md new file mode 100644 index 0000000000..2294baf273 --- /dev/null +++ b/docs/en/tutorials/how_to_use_docker.md @@ -0,0 +1,45 @@ +## Docker usage + +We provide two dockerfiles for CPU and GPU respectively. For CPU users, we install MMDeploy with ONNXRuntime, ncnn and OpenVINO backends. For GPU users, we install MMDeploy with TensorRT backend. Besides, users can install mmdeploy with different versions when building the docker image. + +### Build docker image + +For CPU users, we can build the docker image with the latest MMDeploy through: +``` +cd mmdeploy +docker build docker/CPU/ -t mmdeploy:master-cpu +``` +For GPU users, we can build the docker image with the latest MMDeploy through: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:master-gpu +``` + +For installing MMDeploy with a specific version, we can append `--build-arg VERSION=${VERSION}` to build command. GPU for example: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 +``` + +### Run docker container + +After building the docker image succeed, we can use `docker run` to launch the docker service. GPU docker image for example: +``` +docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu +``` + +### FAQs + +1. CUDA error: the provided PTX was compiled with an unsupported toolchain: + + As described [here](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754), update the GPU driver to the latest one for your GPU. +2. docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]]. + ``` + # Add the package repositories + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - + curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + + sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit + sudo systemctl restart docker + ``` diff --git a/docs/zh_cn/build.md b/docs/zh_cn/build.md index c0056e9a02..e83e369131 100644 --- a/docs/zh_cn/build.md +++ b/docs/zh_cn/build.md @@ -1,5 +1,7 @@ ## 安装 MMdeploy +我们提供物理机和虚拟机构建方法。虚拟机搭建方法请参考[如何使用docker](tutorials/how_to_use_docker.md)。对于物理机,请按照以下步骤操作 + ### 准备工作 - 下载代码仓库 MMDeploy @@ -205,7 +207,7 @@ pip install -e . cmake .. \ -DMMDEPLOY_BUILD_SDK=ON \ -DCMAKE_CXX_COMPILER=g++-7 \ - -Dpplcv_DIR=/path/to/ppl.cv/install/lib/cmake/ppl \ + -Dpplcv_DIR=/path/to/ppl.cv/cuda-build/install/lib/cmake/ppl \ -DTENSORRT_DIR=/path/to/tensorrt \ -DCUDNN_DIR=/path/to/cudnn \ -DMMDEPLOY_TARGET_DEVICES="cuda;cpu" \ diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst index e5e065a04d..e06415131c 100644 --- a/docs/zh_cn/index.rst +++ b/docs/zh_cn/index.rst @@ -15,6 +15,7 @@ :caption: 教程 tutorials/how_to_convert_model.md + tutorials/how_to_use_docker.md .. toctree:: :maxdepth: 1 diff --git a/docs/zh_cn/tutorials/how_to_use_docker.md b/docs/zh_cn/tutorials/how_to_use_docker.md new file mode 100644 index 0000000000..6a7eb10670 --- /dev/null +++ b/docs/zh_cn/tutorials/how_to_use_docker.md @@ -0,0 +1,46 @@ +## Docker的使用 + +我们分别为 CPU 和 GPU 提供了两个 dockerfile。对于 CPU 用户,我们对接 ONNXRuntime、ncnn 和 OpenVINO 后端安装 MMDeploy。对于 GPU 用户,我们安装带有 TensorRT 后端的 MMDeploy。此外,用户可以在构建 docker 镜像时安装不同版本的 mmdeploy。 + +### 构建镜像 + +对于 CPU 用户,我们可以通过以下方式使用最新的 MMDeploy 构建 docker 镜像: +``` +cd mmdeploy +docker build docker/CPU/ -t mmdeploy:master-cpu +``` +对于 GPU 用户,我们可以通过以下方式使用最新的 MMDeploy 构建 docker 镜像: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:master-gpu +``` + +要安装具有特定版本的 MMDeploy,我们可以将 `--build-arg VERSION=${VERSION}` 附加到构建命令中。以 GPU 为例: +``` +cd mmdeploy +docker build docker/GPU/ -t mmdeploy:0.1.0 --build-arg VERSION=0.1.0 +``` + +### 运行 docker 容器 + +构建 docker 镜像成功后,我们可以使用 `docker run` 启动 docker 服务。 GPU 镜像为例: +``` +docker run --gpus all -it -p 8080:8081 mmdeploy:master-gpu +``` + +### 常见问答 + +1. CUDA error: the provided PTX was compiled with an unsupported toolchain: + + 如 [这里](https://forums.developer.nvidia.com/t/cuda-error-the-provided-ptx-was-compiled-with-an-unsupported-toolchain/185754)所说,更新 GPU 的驱动到你的GPU能使用的最新版本。 + +2. docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]]. + ``` + # Add the package repositories + distribution=$(. /etc/os-release;echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - + curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list + + sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit + sudo systemctl restart docker + ```