Skip to content

Commit

Permalink
support for OpenVINO R4 release (#10)
Browse files Browse the repository at this point in the history
added support for DLDT API changes in R4 release,
it is not compatible with R3 and earlier versions
  • Loading branch information
dtrawins authored Jan 18, 2019
1 parent e5d7ab9 commit 1d12f4a
Show file tree
Hide file tree
Showing 24 changed files with 164 additions and 218 deletions.
33 changes: 9 additions & 24 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@ jobs:
docker:
- image: ubuntu:16.04
environment:
PYTHONPATH: /opt/intel/computer_vision_sdk/python/python3.5
PYTHONPATH: /opt/intel/computer_vision_sdk/python/python3.5/ubuntu16
LD_LIBRARY_PATH: >-
/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/external/cldnn/lib
:/opt/intel/computer_vision_sdk/deployment_toolsinference_engine/external/gna/lib
/opt/intel/computer_vision_sdk/deployment_toolsinference_engine/external/gna/lib
:/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/external/mkltiny_lnx/lib
:/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/external/omp/lib
:/opt/intel/computer_vision_sdk/deployment_tools/inference_engine/lib/ubuntu_16.04/intel64
environment:
TEMP_DIR: /tmp/openvino_installer
Expand All @@ -27,13 +27,13 @@ jobs:
mkdir -p $TEMP_DIR &&
cd $TEMP_DIR &&
apt-get update &&
apt-get install -y --no-install-recommends cpio wget cpio cmake sudo python3-pip python3-venv python3-setuptools virtualenv build-essential &&
wget -c $OPENVINO_DOWNLOAD_LINK &&
apt-get install -y --no-install-recommends cpio wget cpio cmake sudo python3-pip python3-venv python3-dev python3-setuptools virtualenv build-essential &&
wget -c $OPENVINO_DOWNLOAD_LINK_R4 &&
tar xf l_openvino_toolkit*.tgz &&
cd l_openvino_toolkit* &&
sed -i 's/decline/accept/g' silent.cfg &&
sed -i 's/COMPONENTS=DEFAULTS/COMPONENTS=;intel-ism__noarch;intel-cv-sdk-full-shared__noarch;intel-cv-sdk-full-l-setupvars__noarch;intel-cv-sdk-full-l-model-optimizer__noarch;intel-cv-sdk-full-l-inference-engine__noarch;intel-cv-sdk-full-gfx-install__noarch;intel-cv-sdk-full-shared-pset/g' silent.cfg &&
./install.sh -s silent.cfg
sed -i 's/COMPONENTS=DEFAULTS/COMPONENTS=;intel-openvino_base__noarch;intel-dldt_base__noarch;intel-setupvars__noarch;intel-inference_engine_sdk__noarch;intel-inference_engine_rt__noarch;intel-inference_engine_cpu__noarch;intel-inference_engine_gna__noarch;intel-inference_engine_dlia__noarch;intel-openvino_base-pset/g' silent.cfg &&
./install.sh -s silent.cfg --ignore-signature
- run:
name: Install ie-serving-py
command: make install
Expand All @@ -51,7 +51,7 @@ jobs:
- checkout
- run:
name: Build docker image
command: wget -c $OPENVINO_DOWNLOAD_LINK && make docker_build_bin
command: wget -c $OPENVINO_DOWNLOAD_LINK_R4 && make docker_build_bin
- run:
name: Get Google Cloud credentials
command: echo "${GOOGLE_KEY}" | base64 -d > ${GOOGLE_APPLICATION_CREDENTIALS}
Expand Down Expand Up @@ -88,21 +88,6 @@ jobs:
- run:
name: Start functional tests
command: pyenv global 3.5.2 && make test
functional_tests_clearlinux:
machine:
enabled: true
resource_class: medium
steps:
- checkout
- run:
name: Build docker image
command: make docker_build_clearlinux
- run:
name: Get Google Cloud credentials
command: echo "${GOOGLE_KEY}" | base64 -d > ${GOOGLE_APPLICATION_CREDENTIALS}
- run:
name: Start functional tests
command: pyenv global 3.5.2 && make test
workflows:
version: 2
build-and-deploy:
Expand All @@ -112,4 +97,4 @@ workflows:
- functional_tests_bin
- functional_tests_src_ubuntu
- functional_tests_src_intelpython
- functional_tests_clearlinux

37 changes: 21 additions & 16 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,19 @@ RUN apt-get update && apt-get install -y \
gstreamer1.0-plugins-base \
libusb-1.0-0-dev \
libopenblas-dev
RUN curl -L -o 2018_R3.tar.gz https://github.com/opencv/dldt/archive/2018_R3.tar.gz && \
tar -zxf 2018_R3.tar.gz && \
rm 2018_R3.tar.gz && \
rm -Rf dldt-2018_R3/model-optimizer
WORKDIR dldt-2018_R3/inference-engine
RUN mkdir build && cd build && cmake -DGEMM=MKL -DENABLE_MKL_DNN=ON -DCMAKE_BUILD_TYPE=Release ..
ARG DLDT_DIR=/dldt-2018_R4
RUN git clone --depth=1 -b 2018_R4 https://github.com/opencv/dldt.git ${DLDT_DIR} && \
cd ${DLDT_DIR} && git submodule init && git submodule update --recursive && \
rm -Rf .git && rm -Rf model-optimizer

WORKDIR ${DLDT_DIR}
RUN curl -L -o ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz && \
tar -xzf ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz && rm ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz
WORKDIR ${DLDT_DIR}/inference-engine
RUN mkdir build && cd build && cmake -DGEMM=MKL -DMKLROOT=${DLDT_DIR}/mklml_lnx_2019.0.1.20180928 -DENABLE_MKL_DNN=ON -DCMAKE_BUILD_TYPE=Release ..
RUN cd build && make -j4
RUN pip3 install cython numpy && mkdir ie_bridges/python/build && cd ie_bridges/python/build && \
cmake -DInferenceEngine_DIR=/dldt-2018_R3/inference-engine/build -DPYTHON_EXECUTABLE=`which python3` -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.5m .. && \
cmake -DInferenceEngine_DIR=${DLDT_DIR}/inference-engine/build -DPYTHON_EXECUTABLE=`which python3` -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.5m .. && \
make -j4

FROM ubuntu:16.04 as PROD
Expand All @@ -54,19 +58,20 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
python3-dev \
vim \
virtualenv
WORKDIR /ie-serving-py

COPY --from=DEV /dldt-2018_R3/inference-engine/bin/intel64/Release/lib/*.so /usr/local/lib/
COPY --from=DEV /dldt-2018_R3/inference-engine/ie_bridges/python/build/ /usr/local/lib/openvino/
COPY --from=DEV /dldt-2018_R3/inference-engine/temp/mkltiny_lnx_20180511/lib/libiomp5.so /usr/local/lib/
ENV LD_LIBRARY_PATH=/usr/local/lib
ENV PYTHONPATH=/usr/local/lib
COPY requirements.txt /ie-serving-py/
RUN virtualenv -p python3 .venv && \
. .venv/bin/activate && pip3 --no-cache-dir install -r requirements.txt

COPY start_server.sh setup.py requirements.txt version /ie-serving-py/
COPY ie_serving /ie-serving-py/ie_serving

WORKDIR /ie-serving-py
RUN . .venv/bin/activate && pip3 install .

RUN virtualenv -p python3 .venv && \
. .venv/bin/activate && pip3 --no-cache-dir install -r requirements.txt
COPY --from=DEV /dldt-2018_R4/inference-engine/bin/intel64/Release/lib/*.so /usr/local/lib/
COPY --from=DEV /dldt-2018_R4/inference-engine/ie_bridges/python/build/ /usr/local/lib/openvino/
COPY --from=DEV /dldt-2018_R4/mklml_lnx_2019.0.1.20180928/lib/lib*.so /usr/local/lib/
ENV LD_LIBRARY_PATH=/usr/local/lib
ENV PYTHONPATH=/usr/local/lib

RUN . .venv/bin/activate && pip3 install .
7 changes: 4 additions & 3 deletions Dockerfile_binary_openvino
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@ RUN cd $TEMP_DIR && pwd && ls && \
cd l_openvino_toolkit* && \
sed -i 's/decline/accept/g' silent.cfg && \
pwd | grep -q openvino_toolkit_p ; \
if [ $? = 0 ];then sed -i 's/COMPONENTS=DEFAULTS/COMPONENTS=;intel-ism__noarch;intel-cv-sdk-base-shared__noarch;intel-cv-sdk-base-l-setupvars__noarch;intel-cv-sdk-base-l-inference-engine__noarch;intel-cv-sdk-base-gfx-install__noarch;intel-cv-sdk-base-shared-pset/g' silent.cfg; fi && \
if [ $? = 0 ];then sed -i 's/COMPONENTS=DEFAULTS/COMPONENTS=;intel-openvino_base__noarch;intel-dldt_base__noarch;intel-setupvars__noarch;intel-inference_engine_sdk__noarch;intel-inference_engine_rt__noarch;intel-inference_engine_cpu__noarch;intel-inference_engine_gna__noarch;intel-inference_engine_dlia__noarch;intel-openvino_base-pset/g' silent.cfg; fi && \
pwd | grep -q openvino_toolkit_fpga ; \
if [ $? = 0 ];then sed -i 's/COMPONENTS=DEFAULTS/COMPONENTS=;intel-ism__noarch;intel-cv-sdk-full-shared__noarch;intel-cv-sdk-full-l-setupvars__noarch;intel-cv-sdk-full-l-inference-engine__noarch;intel-cv-sdk-full-gfx-install__noarch;intel-cv-sdk-full-shared-pset/g' silent.cfg; fi && \
./install.sh -s silent.cfg && \
./install.sh -s silent.cfg --ignore-signature && \
rm -Rf $TEMP_DIR $INSTALL_DIR/install_dependencies $INSTALL_DIR/uninstall* /tmp/* $DL_INSTALL_DIR/documentation $DL_INSTALL_DIR/inference_engine/samples

ENV PYTHONPATH="$INSTALL_DIR/python/python3.5/ubuntu16:$INSTALL_DIR/python/python3.5"
ENV LD_LIBRARY_PATH="$DL_INSTALL_DIR/inference_engine/external/cldnn/lib:$DL_INSTALL_DIR/inference_engine/external/gna/lib:$DL_INSTALL_DIR/inference_engine/external/mkltiny_lnx/lib:$DL_INSTALL_DIR/inference_engine/lib/ubuntu_16.04/intel64"
ENV LD_LIBRARY_PATH="$DL_INSTALL_DIR/inference_engine/external/gna/lib:$DL_INSTALL_DIR/inference_engine/external/mkltiny_lnx/lib:$DL_INSTALL_DIR/inference_engine/lib/ubuntu_16.04/intel64:$DL_INSTALL_DIR/inference_engine/external/omp/lib"

ENV https_proxy $https_proxy
ENV HTTPS_PROXY $https_proxy
Expand All @@ -39,3 +39,4 @@ RUN virtualenv -p python3 .venv && \
. .venv/bin/activate && pip3 --no-cache-dir install -r requirements.txt

RUN . .venv/bin/activate && pip3 install .

16 changes: 0 additions & 16 deletions Dockerfile_clearlinux

This file was deleted.

46 changes: 25 additions & 21 deletions Dockerfile_intelpython
Original file line number Diff line number Diff line change
Expand Up @@ -32,20 +32,20 @@ RUN apt-get update && apt-get install -y \
gstreamer1.0-plugins-base \
libusb-1.0-0-dev \
libopenblas-dev
RUN curl -L -o 2018_R3.tar.gz https://github.com/opencv/dldt/archive/2018_R3.tar.gz && \
tar -zxf 2018_R3.tar.gz && \
rm 2018_R3.tar.gz && \
rm -Rf dldt-2018_R3/model-optimizer
WORKDIR dldt-2018_R3/inference-engine
RUN sed -i -e 's/get_linux_name(LINUX_OS_NAME)/#get_linux_name(LINUX_OS_NAME)/g' cmake/check_features.cmake && \
sed -i -e 's/(${LINUX_OS_NAME} STREQUAL "Ubuntu 16.04")/("${LINUX_OS_NAME}" STREQUAL "Ubuntu 16.04")/g' cmake/dependencies.cmake && \
sed -i -e 's/(${LINUX_OS_NAME} STREQUAL "CentOS 7")/("${LINUX_OS_NAME}" STREQUAL "CentOS 7")/g' cmake/dependencies.cmake && \
sed -i -e 's/(ENABLE_OPENCV "enables OpenCV" ON)/(ENABLE_OPENCV "enables OpenCV" OFF)/g' cmake/features.cmake

RUN mkdir build && cd build && cmake -DGEMM=MKL -DENABLE_MKL_DNN=ON -DCMAKE_BUILD_TYPE=Release ..
ARG DLDT_DIR=/dldt-2018_R4
RUN git clone --depth=1 -b 2018_R4 https://github.com/opencv/dldt.git ${DLDT_DIR} && \
cd ${DLDT_DIR} && git submodule init && git submodule update --recursive && \
rm -Rf .git && rm -Rf model-optimizer

WORKDIR ${DLDT_DIR}
RUN curl -L -o ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz && \
tar -xzf ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz && rm ${DLDT_DIR}/mklml_lnx_2019.0.1.20180928.tgz
WORKDIR ${DLDT_DIR}/inference-engine
RUN mkdir build && cd build && cmake -DGEMM=MKL -DMKLROOT=${DLDT_DIR}/mklml_lnx_2019.0.1.20180928 -DENABLE_MKL_DNN=ON -DCMAKE_BUILD_TYPE=Release ..
RUN cd build && make -j4
RUN pip install cython numpy && mkdir ie_bridges/python/build && cd ie_bridges/python/build && \
cmake -DInferenceEngine_DIR=/dldt-2018_R3/inference-engine/build -DPYTHON_EXECUTABLE=`which python` -DPYTHON_LIBRARY=/opt/conda/lib/libpython3.6m.so -DPYTHON_INCLUDE_DIR=/opt/conda/include/python3.6m .. && \
cmake -DInferenceEngine_DIR=${DLDT_DIR}/inference-engine/build -DPYTHON_EXECUTABLE=`which python` -DPYTHON_LIBRARY=/opt/conda/lib/libpython3.6m.so -DPYTHON_INCLUDE_DIR=/opt/conda/include/python3.6m .. && \
make -j4

FROM intelpython/intelpython3_core as PROD
Expand All @@ -55,23 +55,27 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
vim

COPY --from=DEV /dldt-2018_R3/inference-engine/bin/intel64/Release/lib/*.so /usr/local/lib/
COPY --from=DEV /dldt-2018_R3/inference-engine/ie_bridges/python/build/ /usr/local/lib/openvino/
COPY --from=DEV /dldt-2018_R3/inference-engine/temp/mkltiny_lnx_20180511/lib/libiomp5.so /usr/local/lib/
COPY --from=DEV /dldt-2018_R4/inference-engine/bin/intel64/Release/lib/*.so /usr/local/lib/
COPY --from=DEV /dldt-2018_R4/inference-engine/ie_bridges/python/build/ /usr/local/lib/openvino/
COPY --from=DEV /dldt-2018_R4/mklml_lnx_2019.0.1.20180928/lib/lib*.so /usr/local/lib/
ENV LD_LIBRARY_PATH=/usr/local/lib
ENV PYTHONPATH=/usr/local/lib

COPY start_server.sh setup.py requirements.txt version /ie-serving-py/
COPY requirements.txt /ie-serving-py/
RUN conda create --name myenv -y
ENV PATH /opt/conda/envs/myenv/bin:$PATH
WORKDIR /ie-serving-py
RUN pip --no-cache-dir install -r requirements.txt

COPY start_server.sh setup.py version /ie-serving-py/
RUN sed -i '/activate/d' start_server.sh
COPY ie_serving /ie-serving-py/ie_serving

WORKDIR /ie-serving-py
RUN pip install .




RUN conda create --name myenv -y

ENV PATH /opt/conda/envs/myenv/bin:$PATH

RUN pip --no-cache-dir install -r requirements.txt && \
pip install .

RUN sed -i '/activate/d' start_server.sh
14 changes: 3 additions & 11 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ CONFIG := "$(CONFIG)"
ML_DIR := "$(MK_DIR)"
HTTP_PROXY := "$(http_proxy)"
HTTPS_PROXY := "$(https_proxy)"
OVMS_VERSION := "0.2"
OVMS_VERSION := "0.3"

.PHONY: default install uninstall requirements \
venv test unit_test coverage style dist clean \
Expand Down Expand Up @@ -86,7 +86,7 @@ docker_build_src_ubuntu:
@echo "Building docker image"
@echo OpenVINO Model Server version: $(OVMS_VERSION) > version
@echo Git commit: `git rev-parse HEAD` >> version
@echo OpenVINO version: 2018_R3 src >> version
@echo OpenVINO version: 2018_R4 src >> version
@echo docker build -f Dockerfile --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy="$(HTTPS_PROXY)" -t ie-serving-py:latest .
@docker build -f Dockerfile --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy="$(HTTPS_PROXY)" -t ie-serving-py:latest .

Expand All @@ -102,18 +102,10 @@ docker_build_src_intelpython:
@echo "Building docker image"
@echo OpenVINO Model Server version: $(OVMS_VERSION) > version
@echo Git commit: `git rev-parse HEAD` >> version
@echo OpenVINO version: 2018_R3 src >> version
@echo OpenVINO version: 2018_R4 src >> version
@echo docker build -f Dockerfile_intelpython --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy="$(HTTPS_PROXY)" -t ie-serving-py:latest .
@docker build -f Dockerfile_intelpython --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy="$(HTTPS_PROXY)" -t ie-serving-py:latest .

docker_build_clearlinux:
@echo "Building docker image"
@echo OpenVINO Model Server version: $(OVMS_VERSION) > version
@echo Git commit: `git rev-parse HEAD` >> version
@echo OpenVINO version: 2018_R3 clearlinux >> version
@echo docker build -f Dockerfile_clearlinux --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy="$(HTTPS_PROXY)" -t ie-serving-py:latest .
@docker build -f Dockerfile_clearlinux --build-arg http_proxy=$(HTTP_PROXY) --build-arg https_proxy="$(HTTPS_PROXY)" -t ie-serving-py:latest .

docker_run:
@echo "Starting the docker container with serving model"
@docker run --rm -d --name ie-serving-py-test-multi -v /tmp/test_models/saved_models/:/opt/ml:ro -p 9001:9001 ie-serving-py:latest /ie-serving-py/start_server.sh ie_serving config --config_path /opt/ml/config.json --port 9001
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ You can parse the logs to analyze: volume of requests, processing statistics and

* Currently, only *Predict* and *GetModelMetadata* calls are implemented using Tensorflow Serving API.
*Classify*, *Regress* and *MultiInference* are planned to be added.
* *GetModelMetadata* is reporting incorrect shape of model outputs. A fix is pending in OpenVINO™ API for reporting outputs shape.
* Currently, model versions are detected at server start time. Adding new versions requires service restart or
starting new docker container. There are plans to add online detection of new model versions and config file changes.
* Output_filter is not effective in the Predict call. All outputs defined in the model are returned to the clients.
Expand Down
7 changes: 2 additions & 5 deletions docs/docker_container.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,13 @@ via [Dockerfile_binary_openvino](../Dockerfile_binary_openvino).

The latter option requires downloaded [OpenVINO™ toolkit](https://software.intel.com/en-us/openvino-toolkit/choose-download) and placed in the repository root folder along the Dockerfile. A registration process is required to download the toolkit.
It is recommended to use online installation package because this way the resultant image will be smaller.
An example file looks like: `l_openvino_toolkit_fpga_p_2018.2.300_online.tgz`.
An example file looks like: `l_openvino_toolkit_p_2018.5.445_online.tgz`.


From the root of the git repository, execute the command:

```bash
cp (download path)/l_openvino_toolkit_p_2018.5.445_online.tgz .
make docker_build_bin http_proxy=$http_proxy https_proxy=$https_proxy
```
or
Expand All @@ -25,10 +26,6 @@ or
```bash
make docker_build_src_intelpython http_proxy=$http_proxy https_proxy=$https_proxy
```
or
```bash
make docker_build_clearlinux http_proxy=$http_proxy https_proxy=$https_proxy


**Note:** You can use also publicly available docker image from [dockerhub](https://hub.docker.com/r/intelaipg/openvino-model-server/)

Expand Down
10 changes: 7 additions & 3 deletions ie_serving/models/ir_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ def __init__(self, model_xml, model_bin, mapping_config, exec_net,
self.exec_net = exec_net
self.input_tensor_names = list(inputs.keys())
self.input_tensors = inputs
self.output_tensor_names = outputs
self.output_tensor_names = list(outputs.keys())
self.output_tensors = outputs
self.model_keys = self.set_keys(mapping_config)
self.input_key_names = list(self.model_keys['inputs'].keys())
logger.info("Matched keys for model: {}".format(self.model_keys))
Expand All @@ -44,8 +45,9 @@ def build(cls, model_xml, model_bin, mapping_config):
if CPU_EXTENSION and 'CPU' in DEVICE:
plugin.add_cpu_extension(CPU_EXTENSION)
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
input_blob = next(iter(net.inputs))
batch_size = net.inputs[input_blob].shape[0]
inputs = net.inputs
batch_size = list(inputs.values())[0][0]
outputs = net.outputs
exec_net = plugin.load(network=net, num_requests=batch_size)
ir_engine = cls(model_xml=model_xml, model_bin=model_bin,
Expand Down Expand Up @@ -103,6 +105,8 @@ def set_keys(self, mapping_config):
else:
return self._set_names_in_config_as_keys(mapping_data)

def infer(self, data: dict):
def infer(self, data: dict, batch_size=None):
if batch_size is not None:
self.exec_net.requests[0].set_batch(batch_size)
results = self.exec_net.infer(inputs=data)
return results
Loading

0 comments on commit 1d12f4a

Please sign in to comment.