Skip to content

Commit

Permalink
Showing 3,639 changed files with 266,956 additions and 64,512 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# [OpenVINO™ Toolkit](https://01.org/openvinotoolkit) - Deep Learning Deployment Toolkit repository
[![Stable release](https://img.shields.io/badge/version-2018.R5-green.svg)](https://github.com/opencv/dldt/releases/tag/2018_R5)
[![Stable release](https://img.shields.io/badge/version-2019.R1-green.svg)](https://github.com/opencv/dldt/releases/tag/2019_R1)
[![Apache License Version 2.0](https://img.shields.io/badge/license-Apache_2.0-green.svg)](LICENSE)

This toolkit allows developers to deploy pre-trained deep learning models through a high-level C++ Inference Engine API integrated with application logic.
37 changes: 21 additions & 16 deletions inference-engine/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#

cmake_minimum_required (VERSION 3.3)
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)

project(InferenceEngine)

set(DEV_BUILD TRUE)

include(CTest)

## WA for problem with gtest submodule. It cannot detect uint32 type.
## remove Gtest submodule and this two lines together
include (CheckTypeSize)
@@ -133,25 +135,28 @@ set (CMAKE_POSITION_INDEPENDENT_CODE ON)
include (sanitizer)

include(CheckCXXCompilerFlag)
if(UNIX)
CHECK_CXX_COMPILER_FLAG("-fvisibility=hidden" COMPILER_SUPPORTS_VISIBILITY)
if (COMPILER_SUPPORTS_VISIBILITY)
#add_definitions(-fvisibility=hidden) todo: should be hidden? if so define default visibiliti explicite for each funtion
add_definitions(-fvisibility=default)
endif(COMPILER_SUPPORTS_VISIBILITY)
endif(UNIX)

include(cpplint)

add_subdirectory(src)
add_subdirectory(tests)
add_subdirectory(thirdparty)
if (ENABLE_SAMPLES_CORE)
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")
set(InferenceEngine_DIR "${CMAKE_BINARY_DIR}")

#to be able to link
set (LIB_FOLDER ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)
add_subdirectory(samples)
endif()
#to be able to link
set (LIB_FOLDER ${IE_MAIN_SOURCE_DIR}/${BIN_FOLDER}/${IE_BUILD_CONFIGURATION}/lib)

# gflags and format_reader targets are kept inside of samples directory and
# they must be built even if samples build is disabled (required for tests and tools).
add_subdirectory(samples)

file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h)
add_cpplint_target(sample_cpplint
FOR_SOURCES ${SAMPLES_SOURCES}
EXCLUDE_PATTERNS "thirdparty/*" "pugixml/*")

if (ENABLE_PYTHON)
add_subdirectory(ie_bridges/python)
endif()
endif()

add_cpplint_report_target()
78 changes: 68 additions & 10 deletions inference-engine/README.md
Original file line number Diff line number Diff line change
@@ -16,8 +16,8 @@ Inference Engine plugins for Intel® FPGA and Intel® Movidius™ Neural Compute
## Build on Linux\* Systems

The software was validated on:
- Ubuntu\* 16.04 with default GCC\* 5.4.0
- CentOS\* 7.4 with default GCC\* 4.8.5
- Ubuntu\* 16.04 (64-bit) with default GCC\* 5.4.0
- CentOS\* 7.4 (64-bit) with default GCC\* 4.8.5
- [Intel® Graphics Compute Runtime for OpenCL™ Driver package 18.28.11080](https://github.com/intel/compute-runtime/releases/tag/18.28.11080).

### Software Requirements
@@ -45,11 +45,19 @@ The software was validated on:
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS\* implementation, use `GEMM=OPENBLAS` option and `BLAS_INCLUDE_DIRS` and `BLAS_LIBRARIES` cmake options to specify path to OpenBLAS headers and library, for example use the following options on CentOS\*: `-DGEMM=OPENBLAS -DBLAS_INCLUDE_DIRS=/usr/include/openblas -DBLAS_LIBRARIES=/usr/lib64/libopenblas.so.0`
- To switch to optimized MKL-ML\* GEMM implementation, use `GEMM=MKL` and `MKLROOT` cmake options to specify path to unpacked MKL-ML with `include` and `lib` folders, for example use the following options: `-DGEMM=MKL -DMKLROOT=<path_to_MKL>`. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz)

- OpenMP threading is used by default. To build Inference Engine with TBB threading, set `-DTHREADING=TBB` option.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_lnx_2019.0.1.20180928.tgz)

- To build Python API wrapper, use -DENABLE_PYTHON=ON option. To specify exact Python version, use the following options: `-DPYTHON_EXECUTABLE=`which python3.6` -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.6`
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.

- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=`which python3.7` \
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.7m.so \
-DPYTHON_INCLUDE_DIR=/usr/include/python3.7
```
- To switch on/off the CPU and GPU plugins, use `cmake` options `-DENABLE_MKL_DNN=ON/OFF` and `-DENABLE_CLDNN=ON/OFF`.
@@ -74,7 +82,7 @@ You can use the following additional build options:
## Build on Windows\* Systems:
The software was validated on:
- Microsoft\* Windows\* 10 with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
- Microsoft\* Windows\* 10 (64-bit) with Visual Studio 2017 and Intel® C++ Compiler 2018 Update 3
- [Intel® Graphics Driver for Windows* [24.20] driver package](https://downloadcenter.intel.com/download/27803/Graphics-Intel-Graphics-Driver-for-Windows-10?v=t).
### Software Requirements
@@ -107,25 +115,75 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
- Internal JIT GEMM implementation is used by default.
- To switch to OpenBLAS GEMM implementation, use -DGEMM=OPENBLAS cmake option and specify path to OpenBLAS using `-DBLAS_INCLUDE_DIRS=<OPENBLAS_DIR>\include` and `-DBLAS_LIBRARIES=<OPENBLAS_DIR>\lib\libopenblas.dll.a` options. Prebuilt OpenBLAS\* package can be downloaded [here](https://sourceforge.net/projects/openblas/files/v0.2.14/OpenBLAS-v0.2.14-Win64-int64.zip/download), mingw64* runtime dependencies [here](https://sourceforge.net/projects/openblas/files/v0.2.14/mingw64_dll.zip/download)
- To switch to optimized MKL-ML GEMM implementation, use `GEMM=MKL` and `MKLROOT` cmake options to specify path to unpacked MKL-ML with `include` and `lib` folders, for example use the following options: `-DGEMM=MKL -DMKLROOT=<path_to_MKL>`. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_win_2019.0.1.20180928.zip)
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17/mklml_win_2019.0.1.20180928.zip)
- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.
- OpenMP threading is used by default. To build Inference Engine with TBB threading, set `-DTHREADING=TBB` option.
- Required versions of TBB and OpenCV packages are downloaded automatically by the CMake-based script. If you already have installed TBB or OpenCV packages configured in your environment, you may need to clean the `TBBROOT` and `OpenCV_DIR` environment variables before running the `cmake` command, otherwise they won't be downloaded and the build may fail if incompatible versions were installed.

- To build Python API wrapper, use -DENABLE_PYTHON=ON option. To specify exact Python version, use the following options: `-DPYTHON_EXECUTABLE="C:\Program Files\Python36\python.exe" -DPYTHON_INCLUDE_DIR="C:\Program Files\Python36\include" -DPYTHON_LIBRARY="C:\Program Files\Python36\libs\python36.lib"`.
- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE="C:\Program Files\Python37\python.exe" ^
-DPYTHON_LIBRARY="C:\Program Files\Python37\libs\python37.lib" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python37\include"
```

6. Build generated solution in Visual Studio 2017 or run `cmake --build . --config Release` to build from the command line.

7. Before running the samples, add paths to TBB and OpenCV binaries used for the build to the %PATH% environment variable. By default, TBB binaries are downloaded by the CMake-based script to the `<dldt_repo>/inference-engine/temp/tbb/lib` folder, OpenCV binaries - to the `<dldt_repo>/inference-engine/temp/opencv_4.1.0/bin` folder.

### Building Inference Engine with Ninja

```sh
call "C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\bin\ipsxe-comp-vars.bat" intel64 vs2017
set CXX=icl
set CC=icl
:: clean TBBROOT value set by ipsxe-comp-vars.bat, required TBB package will be downloaded by dldt cmake script
set TBBROOT=
cmake -G Ninja -Wno-dev -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --config Release
```

Before running the samples on Microsoft\* Windows\*, please add path to OpenMP library (<dldt_repo>/inference-engine/temp/omp/lib) and OpenCV libraries (<dldt_repo>/inference-engine/temp/opencv_4.0.0/bin) to the %PATH% environment variable.
## Build on macOS\* Systems

The software was validated on:
- macOS\* 10.14, 64-bit

### Software Requirements
- [CMake\*](https://cmake.org/download/) 3.9 or higher
- Clang\* compiler from Xcode\* 10.1
- Python\* 3.4 or higher for the Inference Engine Python API wrapper

### Build Steps
1. Clone submodules:
```sh
cd dldt/inference-engine
git submodule init
git submodule update --recursive
```
2. Install build dependencies using the `install_dependencies.sh` script in the project root folder.
3. Create a build folder:
```sh
mkdir build
```
4. Inference Engine uses a CMake-based build system. In the created `build` directory, run `cmake` to fetch project dependencies and create Unix makefiles, then run `make` to build the project:
```sh
cmake -DCMAKE_BUILD_TYPE=Release ..
make -j16
```
You can use the following additional build options:
- Internal JIT GEMM implementation is used by default.
- To switch to the optimized MKL-ML\* GEMM implementation, use `-DGEMM=MKL` and `-DMKLROOT=<path_to_MKL>` cmake options to specify a path to unpacked MKL-ML with the `include` and `lib` folders. MKL-ML\* package can be downloaded [here](https://github.com/intel/mkl-dnn/releases/download/v0.17.1/mklml_mac_2019.0.1.20180928.tgz)

- Threading Building Blocks (TBB) is used by default. To build the Inference Engine with OpenMP* threading, set the `-DTHREADING=OMP` option.

- To build the Python API wrapper, use the `-DENABLE_PYTHON=ON` option. To specify an exact Python version, use the following options:
```sh
-DPYTHON_EXECUTABLE=/Library/Frameworks/Python.framework/Versions/3.7/bin/python3.7 \
-DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.7/lib/libpython3.7m.dylib \
-DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.7/include/python3.7m
```


---
\* Other names and brands may be claimed as the property of others.
4 changes: 1 addition & 3 deletions inference-engine/cmake/FindlibGNA.cmake
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#

#module to locate GNA libraries

cmake_minimum_required(VERSION 2.8)

if (WIN32)
set(GNA_PLATFORM_DIR win64)
set(GNA_LIB_DIR x64)
23 changes: 5 additions & 18 deletions inference-engine/cmake/check_features.cmake
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
@@ -65,10 +65,6 @@ if (ENABLE_PROFILING_RAW)
add_definitions(-DENABLE_PROFILING_RAW=1)
endif()

if (ENABLE_GTEST_PATCHES)
add_definitions(-DENABLE_GTEST_PATCHES=1)
endif()

if (ENABLE_CLDNN)
add_definitions(-DENABLE_CLDNN=1)
endif()
@@ -77,22 +73,14 @@ if (ENABLE_MKL_DNN)
add_definitions(-DENABLE_MKL_DNN=1)
endif()

if (ENABLE_STRESS_UNIT_TESTS)
add_definitions(-DENABLE_STRESS_UNIT_TESTS=1)
endif()

if (ENABLE_SEGMENTATION_TESTS)
add_definitions(-DENABLE_SEGMENTATION_TESTS=1)
endif()

if (ENABLE_OBJECT_DETECTION_TESTS)
add_definitions(-DENABLE_OBJECT_DETECTION_TESTS=1)
endif()

if (ENABLE_GNA)
add_definitions(-DENABLE_GNA)
endif()

if (ENABLE_SAMPLES)
set (ENABLE_SAMPLES_CORE ON)
endif()

if (DEVELOPMENT_PLUGIN_MODE)
message (STATUS "Enabled development plugin mode")

@@ -112,5 +100,4 @@ if (VERBOSE_BUILD)
set(CMAKE_VERBOSE_MAKEFILE ON)
endif()


print_enabled_features()
2 changes: 1 addition & 1 deletion inference-engine/cmake/config.cmake.in
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2018 Intel Corporation
# Copyright (C) 2018-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
Loading

0 comments on commit b235c73

Please sign in to comment.