forked from triton-inference-server/server
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Dockerfile.QA
113 lines (101 loc) · 5.62 KB
/
Dockerfile.QA
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Multistage build.
#
ARG BASE_IMAGE=tensorrtserver
ARG BUILD_IMAGE=tensorrtserver_build
############################################################################
## Build necessary artifacts needed for CI and initialize the qa/ directory.
############################################################################
FROM ${BUILD_IMAGE} AS trtserver_qa
WORKDIR /workspace
RUN mkdir -p qa/clients && mkdir -p qa/pkgs && \
cp src/clients/python/grpc_image_client.py qa/clients/. && \
cp src/clients/python/image_client.py qa/clients/. && \
cp src/clients/python/simple_client.py qa/clients/. && \
cp src/clients/python/simple_string_client.py qa/clients/. && \
cp src/clients/python/simple_sequence_client.py qa/clients/. && \
cp /opt/tensorrtserver/bin/image_client qa/clients/. && \
cp /opt/tensorrtserver/bin/perf_client qa/clients/. && \
cp /opt/tensorrtserver/bin/simple_client qa/clients/. && \
cp /opt/tensorrtserver/bin/simple_string_client qa/clients/. && \
cp /opt/tensorrtserver/bin/simple_sequence_client qa/clients/. && \
cp /opt/tensorrtserver/bin/caffe2plan qa/common/. && \
cp /opt/tensorrtserver/pip/tensorrtserver*.whl qa/pkgs/. && \
mkdir qa/L0_simple_example/models && \
cp -r docs/examples/model_repository/simple qa/L0_simple_example/models/. && \
mkdir qa/L0_simple_string_example/models && \
cp -r docs/examples/model_repository/simple_string qa/L0_simple_string_example/models/. && \
mkdir -p qa/L0_simple_custom_example/models/simple/1 && \
cp /opt/tensorrtserver/custom/libaddsub.so qa/L0_simple_custom_example/models/simple/1/. && \
mkdir -p qa/L0_simple_sequence_example/models/simple_sequence/1 && \
cp /opt/tensorrtserver/custom/libsequence.so qa/L0_simple_sequence_example/models/simple_sequence/1/. && \
mkdir -p qa/custom_models/custom_int32_int32_int32/1 && \
cp /opt/tensorrtserver/custom/libaddsub.so qa/custom_models/custom_int32_int32_int32/1/. && \
mkdir -p qa/custom_models/custom_nobatch_int32_int32_int32/1 && \
cp /opt/tensorrtserver/custom/libaddsub.so qa/custom_models/custom_nobatch_int32_int32_int32/1/. && \
mkdir -p qa/custom_models/custom_float32_float32_float32/1 && \
cp /opt/tensorrtserver/custom/libaddsub.so qa/custom_models/custom_float32_float32_float32/1/. && \
mkdir -p qa/custom_models/custom_nobatch_float32_float32_float32/1 && \
cp /opt/tensorrtserver/custom/libaddsub.so qa/custom_models/custom_nobatch_float32_float32_float32/1/.
############################################################################
## Create CI enabled image
############################################################################
FROM $BASE_IMAGE
ARG PYVER=3.5
RUN apt-get update && apt-get install -y --no-install-recommends \
jmeter \
jmeter-http \
libcurl3 \
libopencv-dev \
libopencv-core-dev \
libpng12-dev \
libzmq3-dev \
python$PYVER \
python$PYVER-dev \
python$PYVER-numpy \
python`echo $PYVER | cut -c1-1`-pil \
python-protobuf \
swig && \
rm -rf /var/lib/apt/lists/*
# Use the PYVER version of python
RUN rm -f /usr/bin/python && \
rm -f /usr/bin/python`echo $PYVER | cut -c1-1` && \
ln -s /usr/bin/python$PYVER /usr/bin/python && \
ln -s /usr/bin/python$PYVER /usr/bin/python`echo $PYVER | cut -c1-1`
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
python$PYVER get-pip.py && \
rm get-pip.py
RUN pip install --upgrade numpy future grpcio
# CI expects tests in /opt/tensorrtserver/qa
WORKDIR /opt/tensorrtserver
COPY --from=trtserver_qa /workspace/qa/ qa/
# Remove CI tests that are meant to run only on build image and
# install the tensorrtserver python client APIs.
RUN rm -fr qa/L0_copyrights qa/L0_unit_test qa/L1_tfs_unit_test && \
pip install --upgrade qa/pkgs/tensorrtserver-*.whl
ENV PYVER ${PYVER}