From 05bbb3065c40c7b0334fd9614f0cef262c057cd8 Mon Sep 17 00:00:00 2001 From: "S. Manohar Karlapalem" Date: Mon, 5 Aug 2019 15:28:46 -0700 Subject: [PATCH] [OpenVINO-EP] Update hardware branding of VAD-R as VAD-M (#1552) Replaces all occurrences of VAD-R/VAD_R with VAD-M/VAD_M. Aligns with the official hardware branding. --- BUILD.md | 2 +- cmake/CMakeLists.txt | 4 ++-- dockerfiles/README.md | 6 +++--- .../OpenVINO-ExecutionProvider.md | 4 ++-- .../openvino/openvino_execution_provider.cc | 2 +- .../core/providers/openvino/openvino_graph.cc | 6 +++--- .../providers/cpu/activation/activation_op_test.cc | 4 ++-- .../providers/cpu/math/element_wise_ops_test.cc | 14 +++++++------- tools/ci_build/build.py | 4 ++-- 9 files changed, 23 insertions(+), 23 deletions(-) diff --git a/BUILD.md b/BUILD.md index 911389aa7867b..f4d1650ee03ad 100644 --- a/BUILD.md +++ b/BUILD.md @@ -191,7 +191,7 @@ The OpenVINO Execution Provider can be built using the following commands: | GPU_FP32 | Intel® Integrated Graphics | | GPU_FP16 | Intel® Integrated Graphics with FP16 quantization of models | | MYRIAD_FP16 | Intel® MovidiusTM USB sticks |  -| VAD-R_FP16 | Intel® Vision Accelerator Design based on 8 MovidiusTM MyriadX VPUs | +| VAD-M_FP16 | Intel® Vision Accelerator Design based on 8 MovidiusTM MyriadX VPUs | For more information on OpenVINO Execution Provider's ONNX Layer support, Topology support, and Intel hardware enabled, please refer to the document OpenVINO-ExecutionProvider.md in $onnxruntime_root/docs/execution_providers diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index f6252b90aa9e2..9ee7470b0b2b3 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -542,8 +542,8 @@ if(onnxruntime_USE_OPENVINO) add_definitions(-DOPENVINO_CONFIG_CPU_FP32=1) endif() - if(onnxruntime_USE_OPENVINO_VAD_R) - add_definitions(-DOPENVINO_CONFIG_VAD_R=1) + if(onnxruntime_USE_OPENVINO_VAD_M) + add_definitions(-DOPENVINO_CONFIG_VAD_M=1) endif() endif() diff --git a/dockerfiles/README.md b/dockerfiles/README.md index 7a7a2642b6fde..f395acc2ef6b5 100644 --- a/dockerfiles/README.md +++ b/dockerfiles/README.md @@ -102,7 +102,7 @@ | GPU_FP32 |Intel Integrated Graphics | | GPU_FP16 | Intel Integrated Graphics | | MYRIAD_FP16 | Intel MovidiusTM USB sticks | - | VAD-R_FP16 | Intel Vision Accelerator Design based on MovidiusTM MyriadX VPUs | + | VAD-M_FP16 | Intel Vision Accelerator Design based on MovidiusTM MyriadX VPUs | ## CPU Version @@ -155,12 +155,12 @@ docker run -it --network host --privileged -v /dev:/dev onnxruntime-myriad:latest ``` -## VAD-R Accelerator Version +## VAD-M Accelerator Version 1. Retrieve your docker image in one of the following ways. - Build the docker image from the DockerFile in this repository. ``` - docker build -t onnxruntime-vadr --build-arg DEVICE=VAD-R_FP16 --network host . + docker build -t onnxruntime-vadr --build-arg DEVICE=VAD-M_FP16 --network host . ``` - Pull the official image from DockerHub. ``` diff --git a/docs/execution_providers/OpenVINO-ExecutionProvider.md b/docs/execution_providers/OpenVINO-ExecutionProvider.md index a3691f7db1a6c..1d5838268d3f6 100644 --- a/docs/execution_providers/OpenVINO-ExecutionProvider.md +++ b/docs/execution_providers/OpenVINO-ExecutionProvider.md @@ -83,9 +83,9 @@ Below topologies are supported from ONNX open model zoo using OpenVINO Execution |TinyYOLOv2 | Yes | Yes | Yes | ResNet101\_DUC\_HDC | Yes | No | No -# Application code changes for VAD-R performance scaling +# Application code changes for VAD-M performance scaling -VAD-R has 8 VPUs and is suitable for applications that require multiple inferences to run in parallel. We use batching approach for performance scaling on VAD-R. +VAD-M has 8 VPUs and is suitable for applications that require multiple inferences to run in parallel. We use batching approach for performance scaling on VAD-M. Below python code snippets provide sample classification code to batch input images, load a model and process the output results. diff --git a/onnxruntime/core/providers/openvino/openvino_execution_provider.cc b/onnxruntime/core/providers/openvino/openvino_execution_provider.cc index d434c9e4e4f85..f2a15562bb0b0 100644 --- a/onnxruntime/core/providers/openvino/openvino_execution_provider.cc +++ b/onnxruntime/core/providers/openvino/openvino_execution_provider.cc @@ -453,7 +453,7 @@ std::vector> OpenVINOExecutionProvider::GetCa device_id = "MYRIAD"; #endif -#ifdef OPENVINO_CONFIG_VAD_R +#ifdef OPENVINO_CONFIG_VAD_M precision_fp32 = false; device_id = "HDDL"; #endif diff --git a/onnxruntime/core/providers/openvino/openvino_graph.cc b/onnxruntime/core/providers/openvino/openvino_graph.cc index 53e5e6388b613..b64dce7951d0d 100644 --- a/onnxruntime/core/providers/openvino/openvino_graph.cc +++ b/onnxruntime/core/providers/openvino/openvino_graph.cc @@ -50,7 +50,7 @@ OpenVINOGraph::OpenVINOGraph(const onnxruntime::Node* fused_node) { precision_ = InferenceEngine::Precision::FP16; precision_str = "FP16"; #endif -#ifdef OPENVINO_CONFIG_VAD_R +#ifdef OPENVINO_CONFIG_VAD_M device_id_ = "HDDL"; precision_ = InferenceEngine::Precision::FP16; precision_str = "FP16"; @@ -65,8 +65,8 @@ OpenVINOGraph::OpenVINOGraph(const onnxruntime::Node* fused_node) { // operations associated with the Infer Requests may be scheduled in parallel. // Infer Requests hold resources representing the entire network on their target hardware. So, // having more Infer Requests than needed would waste system resources. - // In VAD-R (HDDL) accelerator, there are 8 parallel execution units. So, creating 8 instances - // of Infer Requests only if the VAD-R accelerator is being used. + // In VAD-M (HDDL) accelerator, there are 8 parallel execution units. So, creating 8 instances + // of Infer Requests only if the VAD-M accelerator is being used. // sets number of maximum parallel inferences num_inf_reqs_ = (device_id_ == "HDDL") ? 8 : 1; diff --git a/onnxruntime/test/providers/cpu/activation/activation_op_test.cc b/onnxruntime/test/providers/cpu/activation/activation_op_test.cc index 9064e3791ef62..cd8b284056205 100644 --- a/onnxruntime/test/providers/cpu/activation/activation_op_test.cc +++ b/onnxruntime/test/providers/cpu/activation/activation_op_test.cc @@ -34,8 +34,8 @@ void TestUnaryElementwiseOp(const char* szOp, std::vector& input_vals, excluded_providers.insert(kTensorrtExecutionProvider); } -//Disabled because of accuracy issues for MYRIAD FP16 and VAD_R -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R) +//Disabled because of accuracy issues for MYRIAD FP16 and VAD_M +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M) int relu = strcmp(szOp, "Relu"); int leaky = strcmp(szOp, "LeakyRelu"); if(relu == 0 || leaky == 0){ diff --git a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc index bfff81c5be876..80770af33a06b 100644 --- a/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc +++ b/onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc @@ -42,7 +42,7 @@ TEST(MathOpTest, Add_float) { 0.0f, 5.0f, -36.0f, -10.8f, 18.6f, 0.0f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_M) test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); //OpenVINO: Disabled due to accuracy mismatch for FP16 #else test.Run(); @@ -159,7 +159,7 @@ TEST(MathOpTest, Add_Broadcast_2x1x4_1x3x1) { 221.0f, 222.0f, 223.0f, 224.0f, 231.0f, 232.0f, 233.0f, 234.0f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M) //OpenVINO: Disabled due to software limitation for VPU Plugin. //This test runs fine on CPU and GPU Plugins test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider}); @@ -185,7 +185,7 @@ TEST(MathOpTest, Add_Broadcast_2x1x1_3x4) { 211.0f, 212.0f, 213.0f, 214.0f, 221.0f, 222.0f, 223.0f, 224.0f, 231.0f, 232.0f, 233.0f, 234.0f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M) //OpenVINO: Disabled due to software limitation for VPU Plugin. //This test runs fine on CPU and GPU Plugins test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider}); @@ -275,7 +275,7 @@ TEST(MathOpTest, Mul) { 0.0f, 5.25f, -6'400.0f, 29.16f, 86.49f, -100'000'000.0f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_M) test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); //OpenVINO: Disabled due to accuracy issues for MYRIAD FP16 #else test.Run(); @@ -537,7 +537,7 @@ TEST(MathOpTest, Sum_6) { -6.0f, 6.6f, 28.0f, -1.0f, 0.06f, 0.25f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_VAD_M) test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); //OpenVINO: Disabled due to accuracy mismatch for FP16 #else test.Run(); @@ -561,7 +561,7 @@ TEST(MathOpTest, Sum_8_Test1) { 311.0f, 312.0f, 313.0f, 321.0f, 322.0f, 323.0f, 331.0f, 332.0f, 333.0f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M) //OpenVINO: Disabled due to software limitation for VPU Plugin. //This test runs fine on CPU and GPU Plugins test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider}); @@ -596,7 +596,7 @@ TEST(MathOpTest, Sum_8_Test2) { 3.3f, 4.4f, -94.7f, 59.6f, 64.01f, -8.0f}); -#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_R) +#if defined(OPENVINO_CONFIG_MYRIAD) || defined(OPENVINO_CONFIG_VAD_M) //OpenVINO: Disabled due to software limitation for VPU Plugin. //This test runs fine on CPU and GPU Plugins test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider,kOpenVINOExecutionProvider}); diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index dd1fab19a2edb..bbaa891ef282b 100755 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -129,7 +129,7 @@ def parse_arguments(): parser.add_argument("--use_mklml", action='store_true', help="Build with MKLML.") parser.add_argument("--use_ngraph", action='store_true', help="Build with nGraph.") parser.add_argument("--use_openvino", nargs="?", const="CPU_FP32", - choices=["CPU_FP32","GPU_FP32","GPU_FP16","VAD-R_FP16","MYRIAD_FP16"], help="Build with OpenVINO for specific hardware.") + choices=["CPU_FP32","GPU_FP32","GPU_FP16","VAD-M_FP16","MYRIAD_FP16"], help="Build with OpenVINO for specific hardware.") parser.add_argument("--use_dnnlibrary", action='store_true', help="Build with DNNLibrary.") parser.add_argument("--use_nsync", action='store_true', help="Build with NSYNC.") parser.add_argument("--use_preinstalled_eigen", action='store_true', help="Use pre-installed eigen.") @@ -340,7 +340,7 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home "-Donnxruntime_USE_OPENVINO_GPU_FP32=" + ("ON" if args.use_openvino == "GPU_FP32" else "OFF"), "-Donnxruntime_USE_OPENVINO_GPU_FP16=" + ("ON" if args.use_openvino == "GPU_FP16" else "OFF"), "-Donnxruntime_USE_OPENVINO_CPU_FP32=" + ("ON" if args.use_openvino == "CPU_FP32" else "OFF"), - "-Donnxruntime_USE_OPENVINO_VAD_R=" + ("ON" if args.use_openvino == "VAD-R_FP16" else "OFF"), + "-Donnxruntime_USE_OPENVINO_VAD_M=" + ("ON" if args.use_openvino == "VAD-M_FP16" else "OFF"), "-Donnxruntime_USE_NNAPI=" + ("ON" if args.use_dnnlibrary else "OFF"), "-Donnxruntime_USE_OPENMP=" + ("ON" if args.use_openmp and not args.use_dnnlibrary and not args.use_mklml and not args.use_ngraph else "OFF"), "-Donnxruntime_USE_TVM=" + ("ON" if args.use_tvm else "OFF"),