Skip to content

Commit

Permalink
Support ONNX model caching (#13780)
Browse files Browse the repository at this point in the history
* Support model caching

* Cleanup
  • Loading branch information
NickM-27 committed Sep 17, 2024
1 parent 4fc8d33 commit 36d7eb7
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 3 deletions.
5 changes: 4 additions & 1 deletion docker/tensorrt/Dockerfile.amd64
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
ARG DEBIAN_FRONTEND=noninteractive

ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3

# Make this a separate target so it can be built/cached optionally
FROM wheels as trt-wheels
ARG DEBIAN_FRONTEND
Expand All @@ -13,7 +15,7 @@ COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt

# Build CuDNN
FROM tensorrt-base AS cudnn-deps
FROM ${TRT_BASE} AS cudnn-deps

ARG COMPUTE_LEVEL

Expand All @@ -31,6 +33,7 @@ ENV TRT_VER=8.5.3
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda

ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
Expand Down
2 changes: 1 addition & 1 deletion docker/tensorrt/requirements-amd64.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ cython == 0.29.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.5.0.*; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.14.0; platform_machine == 'x86_64'
onnxruntime-gpu==1.17.*; platform_machine == 'x86_64'
Expand Down
23 changes: 22 additions & 1 deletion frigate/detectors/plugins/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,28 @@ def __init__(self, detector_config: ONNXDetectorConfig):

path = detector_config.model.path
logger.info(f"ONNX: loading {detector_config.model.path}")
self.model = ort.InferenceSession(path, providers=ort.get_available_providers())

providers = ort.get_available_providers()
options = []

for provider in providers:
if provider == "TensorrtExecutionProvider":
options.append(
{
"trt_timing_cache_enable": True,
"trt_timing_cache_path": "/config/model_cache/tensorrt/ort",
"trt_engine_cache_enable": True,
"trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines",
}
)
elif provider == "OpenVINOExecutionProvider":
options.append({"cache_dir": "/config/model_cache/openvino/ort"})
else:
options.append({})

self.model = ort.InferenceSession(
path, providers=providers, provider_options=options
)

self.h = detector_config.model.height
self.w = detector_config.model.width
Expand Down

0 comments on commit 36d7eb7

Please sign in to comment.