From 081dff1d0960a7e73337fcf371f6fd778ea5b9ca Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 8 Sep 2023 19:11:19 +0400 Subject: [PATCH] [openvino] new recipe --- recipes/openvino/all/conandata.yml | 31 ++ recipes/openvino/all/conanfile.py | 402 ++++++++++++++++++ .../0001-cpu-plugin-compilation-c++17.patch | 16 + .../0002-core-compilation-c++17.patch | 46 ++ ...plugin-compilation-with-conan-opencl.patch | 44 ++ .../openvino/all/test_package/CMakeLists.txt | 48 +++ .../openvino/all/test_package/conanfile.py | 49 +++ .../all/test_package/test_package.cpp | 116 +++++ recipes/openvino/config.yml | 3 + 9 files changed, 755 insertions(+) create mode 100644 recipes/openvino/all/conandata.yml create mode 100644 recipes/openvino/all/conanfile.py create mode 100644 recipes/openvino/all/patches/2023.1.0/0001-cpu-plugin-compilation-c++17.patch create mode 100644 recipes/openvino/all/patches/2023.1.0/0002-core-compilation-c++17.patch create mode 100644 recipes/openvino/all/patches/2023.1.0/0003-gpu-plugin-compilation-with-conan-opencl.patch create mode 100644 recipes/openvino/all/test_package/CMakeLists.txt create mode 100644 recipes/openvino/all/test_package/conanfile.py create mode 100644 recipes/openvino/all/test_package/test_package.cpp create mode 100644 recipes/openvino/config.yml diff --git a/recipes/openvino/all/conandata.yml b/recipes/openvino/all/conandata.yml new file mode 100644 index 00000000000000..f425071b114c91 --- /dev/null +++ b/recipes/openvino/all/conandata.yml @@ -0,0 +1,31 @@ +sources: + "2023.1.0": + "openvino": + url: "https://github.com/openvinotoolkit/openvino/archive/47b736f63edda256d66e2bbb572f42a9d6549f6e.tar.gz" + sha256: "8a1dd5b35fe4b60440e97ee61e12e01aa4dd78b5be30550aa08fcd96c416e1d7" + "arm_compute": + url: "https://github.com/ARM-software/ComputeLibrary/archive/refs/tags/v23.02.1.tar.gz" + sha256: "c3a443e26539f866969242e690cf0651ef629149741ee18732f954c734da6763" + "onednn_cpu": + url: "https://github.com/openvinotoolkit/oneDNN/archive/ae825539bd850d1ad5c83d4bb0d56c65d46d5842.tar.gz" + sha256: "1204df17785c8603f9dfa1f4f91e91e5ffd4391bf7680d2b256de2513490ebee" + "mlas": + url: "https://github.com/openvinotoolkit/mlas/archive/c7c8a631315000f17c650af34431009d2f22129c.tar.gz" + sha256: "7b790dfeef8e1dd612f920c85186c52ad3a3e2245e2a2afd6cc91ce4b1dc64a9" + "onednn_gpu": + url: "https://github.com/oneapi-src/oneDNN/archive/4b82a66ed38ecaa993352e5cc6ed7753656b8a26.tar.gz" + sha256: "cb17c003fe51bc9b4e20189573956b4446468162adf0fc4cea2ee0820cff0cd0" +patches: + "2023.1.0": + - patch_file: "patches/2023.1.0/0001-cpu-plugin-compilation-c++17.patch" + patch_description: "Fixed CPU plugin compilation with C++17" + patch_type: "portability" + patch_source: "https://github.com/openvinotoolkit/openvino/pull/19628" + - patch_file: "patches/2023.1.0/0002-core-compilation-c++17.patch" + patch_description: "Fixed OpenVINO Core compilation with C++17" + patch_type: "portability" + patch_source: "https://github.com/openvinotoolkit/openvino/pull/19707" + - patch_file: "patches/2023.1.0/0003-gpu-plugin-compilation-with-conan-opencl.patch" + patch_description: "Fixed OpenVINO Intel GPU plugin compilation with OpenCL fron Conan" + patch_type: "portability" + patch_source: "https://github.com/openvinotoolkit/openvino/pull/19668" diff --git a/recipes/openvino/all/conanfile.py b/recipes/openvino/all/conanfile.py new file mode 100644 index 00000000000000..d55e563cdb68f7 --- /dev/null +++ b/recipes/openvino/all/conanfile.py @@ -0,0 +1,402 @@ +from conan import ConanFile, conan_version +from conan.errors import ConanInvalidConfiguration +from conan.tools.build import check_min_cppstd, cross_building, stdcpp_library +from conan.tools.scm import Version +from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout +from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, rmdir +import os + +required_conan_version = ">=1.60.0 <2.0 || >=2.0.8" + +class OpenvinoConan(ConanFile): + name = "openvino" + + # Optional metadata + license = "Apache-2.0" + homepage = "https://github.com/openvinotoolkit/openvino" + url = "https://github.com/conan-io/conan-center-index" + description = "Open Visual Inference And Optimization toolkit for AI inference" + topics = ("nlp", "natural-language-processing", "ai", "computer-vision", "deep-learning", "transformers", "inference", + "speech-recognition", "yolo", "performance-boost", "diffusion-models", "recomendation-system", "stable-diffusion", + "generative-ai", "llm-inference", "optimize-ai", "deploy-ai") + package_id_non_embed_mode = "patch_mode" + package_type = "library" + short_paths = True + no_copy_source = True + + # Binary configuration + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + # HW plugins + "enable_cpu": [True, False], + "enable_gpu": [True, False], + # SW plugins + "enable_auto": [True, False], + "enable_hetero": [True, False], + "enable_auto_batch": [True, False], + # Frontends + "enable_ir_frontend": [True, False], + "enable_onnx_frontend": [True, False], + "enable_tf_frontend": [True, False], + "enable_tf_lite_frontend": [True, False], + "enable_paddle_frontend": [True, False], + "enable_pytorch_frontend": [True, False] + } + default_options = { + "shared": False, + "fPIC": True, + # HW plugins + "enable_cpu": True, + "enable_gpu": True, + # SW plugins + "enable_auto": True, + "enable_hetero": True, + "enable_auto_batch": True, + # Frontends + "enable_ir_frontend": True, + "enable_onnx_frontend": True, + "enable_tf_frontend": True, + "enable_tf_lite_frontend": True, + "enable_paddle_frontend": True, + "enable_pytorch_frontend": True + } + options_description = { + "shared": "Builds OpenVINO as shared libraries", + "fPIC": "Enables / Disables the -fPIC option. Only valid for shared=True", + # HW plugins + "enable_cpu": "Builds CPU plugin", + "enable_gpu": "Builds GPU plugin", + # SW plugins + "enable_auto": "Builds AUTO plugin", + "enable_hetero": "Builds HETERO plugin", + "enable_auto_batch": "Builds BATCH plugin", + # Frontends + "enable_ir_frontend": "Builds IR frontend", + "enable_onnx_frontend": "Builds ONNX frontend", + "enable_tf_frontend": "Builds TensorFlow frontend", + "enable_tf_lite_frontend": "Builds TensorFlow Lite frontend", + "enable_paddle_frontend": "Builds PaddlePaddle frontend", + "enable_pytorch_frontend": "Builds PyTorch frontend" + } + + @property + def _protobuf_required(self): + return self.options.enable_tf_frontend or self.options.enable_onnx_frontend or self.options.enable_paddle_frontend + + @property + def _target_arm(self): + return "arm" in self.settings.arch + + @property + def _target_x86_64(self): + return self.settings.arch == "x86_64" + + @property + def _gna_option_available(self): + return self.settings.os in ["Linux", "Windows"] and self._target_x86_64 and Version(self.version) < "2024.0.0" + + @property + def _gpu_option_available(self): + return self.settings.os != "Macos" and self._target_x86_64 + + @property + def _preprocessing_available(self): + return Version(self.version) < "2023.2.0" + + @property + def _onnx_version(self): + if Version(self.version) == "2023.1.0": + return "1.13.1" + + @property + def _compilers_minimum_version(self): + return { + "gcc": "7", + "clang": "9", + "apple-clang": "11", + "Visual Studio": "16", + "msvc": "192", + } + + def source(self): + get(self, **self.conan_data["sources"][self.version]["openvino"], strip_root=True) + get(self, **self.conan_data["sources"][self.version]["onednn_cpu"], strip_root=True, + destination=f"{self.source_folder}/src/plugins/intel_cpu/thirdparty/onednn") + get(self, **self.conan_data["sources"][self.version]["mlas"], strip_root=True, + destination=f"{self.source_folder}/src/plugins/intel_cpu/thirdparty/mlas") + get(self, **self.conan_data["sources"][self.version]["arm_compute"], strip_root=True, + destination=f"{self.source_folder}/src/plugins/intel_cpu/thirdparty/ComputeLibrary") + get(self, **self.conan_data["sources"][self.version]["onednn_gpu"], strip_root=True, + destination=f"{self.source_folder}/src/plugins/intel_gpu/thirdparty/onednn_gpu") + apply_conandata_patches(self) + + def export_sources(self): + export_conandata_patches(self) + + def config_options(self): + if self.settings.os == "Windows": + del self.options.fPIC + if not self._gpu_option_available: + del self.options.enable_gpu + + def configure(self): + suffix = "" if Version(conan_version).major < "2" else "/*" + if self.options.shared: + self.options.rm_safe("fPIC") + if self._protobuf_required: + if self.options.shared: + # we need to use static protobuf to overcome potential issues with multiple registrations inside + # protobuf when frontends (implemented as plugins) are loaded multiple times in runtime + self.options[f"protobuf{suffix}"].shared = False + # disable GPU plugin when clang is used; plugin has issues with static variables initialization + if self.settings.compiler == "clang" and self.options.get_safe("enable_gpu"): + self.options.enable_gpu = False + + def build_requirements(self): + if self._target_arm: + self.tool_requires("scons/[>=4.2.0]") + if cross_building(self): + if self._protobuf_required: + self.tool_requires("protobuf/") + if self.options.enable_tf_lite_frontend: + self.tool_requires("flatbuffers/") + if not self.options.shared: + self.tool_requires("cmake/[>=3.18]") + + def requirements(self): + self.requires("onetbb/[>=2021.3.0]") + self.requires("pugixml/[>=1.10]") + if self._target_x86_64: + self.requires("xbyak/[>=6.62]") + if self.options.get_safe("enable_gpu"): + self.requires("opencl-icd-loader/2023.04.17") + if self._protobuf_required: + self.requires("protobuf/3.21.12") + if self.options.enable_tf_frontend: + self.requires("snappy/[>=1.1.7]") + if self.options.enable_onnx_frontend: + self.requires(f"onnx/{self._onnx_version}") + if self.options.enable_tf_lite_frontend: + self.requires("flatbuffers/22.9.24") + if self._preprocessing_available: + self.requires("ade/0.1.2a") + + def layout(self): + cmake_layout(self, src_folder="src") + + def generate(self): + deps = CMakeDeps(self) + deps.generate() + + toolchain = CMakeToolchain(self) + # HW plugins + toolchain.cache_variables["ENABLE_INTEL_CPU"] = self.options.enable_cpu + if self._gpu_option_available: + toolchain.cache_variables["ENABLE_INTEL_GPU"] = self.options.enable_gpu + toolchain.cache_variables["ENABLE_ONEDNN_FOR_GPU"] = self.options.shared + if self._gna_option_available: + toolchain.cache_variables["ENABLE_INTEL_GNA"] = False + # SW plugins + toolchain.cache_variables["ENABLE_AUTO"] = self.options.enable_auto + toolchain.cache_variables["ENABLE_MULTI"] = self.options.enable_auto + toolchain.cache_variables["ENABLE_AUTO_BATCH"] = self.options.enable_auto_batch + toolchain.cache_variables["ENABLE_HETERO"] = self.options.enable_hetero + # Frontends + toolchain.cache_variables["ENABLE_OV_IR_FRONTEND"] = self.options.enable_ir_frontend + toolchain.cache_variables["ENABLE_OV_PADDLE_FRONTEND"] = self.options.enable_paddle_frontend + toolchain.cache_variables["ENABLE_OV_TF_FRONTEND"] = self.options.enable_tf_frontend + toolchain.cache_variables["ENABLE_OV_TF_LITE_FRONTEND"] = self.options.enable_tf_lite_frontend + toolchain.cache_variables["ENABLE_OV_ONNX_FRONTEND"] = self.options.enable_onnx_frontend + toolchain.cache_variables["ENABLE_OV_PYTORCH_FRONTEND"] = self.options.enable_pytorch_frontend + # Dependencies + toolchain.cache_variables["ENABLE_SYSTEM_TBB"] = True + toolchain.cache_variables["ENABLE_TBBBIND_2_5"] = False + toolchain.cache_variables["ENABLE_SYSTEM_PUGIXML"] = True + if self._protobuf_required: + toolchain.cache_variables["ENABLE_SYSTEM_PROTOBUF"] = True + if self.options.enable_tf_frontend: + toolchain.cache_variables["ENABLE_SYSTEM_SNAPPY"] = True + if self.options.enable_tf_lite_frontend: + toolchain.cache_variables["ENABLE_SYSTEM_FLATBUFFERS"] = True + if self.options.get_safe("enable_gpu"): + toolchain.cache_variables["ENABLE_SYSTEM_OPENCL"] = True + # misc + if self._preprocessing_available: + toolchain.cache_variables["ENABLE_GAPI_PREPROCESSING"] = True + toolchain.cache_variables["BUILD_SHARED_LIBS"] = self.options.shared + toolchain.cache_variables["CPACK_GENERATOR"] = "CONAN" + toolchain.cache_variables["ENABLE_PROFILING_ITT"] = False + toolchain.cache_variables["ENABLE_PYTHON"] = False + toolchain.cache_variables["ENABLE_PROXY"] = False + toolchain.cache_variables["ENABLE_WHEEL"] = False + toolchain.cache_variables["ENABLE_CPPLINT"] = False + toolchain.cache_variables["ENABLE_NCC_STYLE"] = False + toolchain.cache_variables["ENABLE_SAMPLES"] = False + toolchain.cache_variables["ENABLE_TEMPLATE"] = False + toolchain.cache_variables["CMAKE_VERBOSE_MAKEFILE"] = True + toolchain.generate() + + def validate_build(self): + if self.settings.compiler.get_safe("cppstd"): + check_min_cppstd(self, "11") + + minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) + compiler_version = Version(self.settings.compiler.version) + if minimum_version and compiler_version < minimum_version: + raise ConanInvalidConfiguration( + f"{self.ref} requires {self.settings.compiler} ver. {minimum_version}, provided ver. {compiler_version}.", + ) + + # OpenVINO has unresolved symbols, when clang is used with libc++ + if self.settings.compiler == "clang" and stdcpp_library(self) == "libc++": + raise ConanInvalidConfiguration( + f"{self.ref} cannot be built with clang and libc++ due to unresolved symbols. " + f"Please, use libstdc++ instead." + ) + + if self.settings.os == "Emscripten": + raise ConanInvalidConfiguration(f"{self.ref} does not support Emscripten") + + # TODO: resolve it later, since it is not critical for now + # Conan Center CI fails with our of memory error when building OpenVINO + if self.settings.build_type == "Debug": + raise ConanInvalidConfiguration(f"{self.ref} does not support Debug build type") + + def validate(self): + # dynamically compiled frontends cannot be run against dynamic protobuf + if self._protobuf_required and self.options.shared and self.dependencies["protobuf"].options.shared: + raise ConanInvalidConfiguration(f"{self.ref}:shared=True requires protobuf:shared=False for correct work.") + + if self.options.get_safe("enable_gpu"): + # GPU does not support oneDNN in static build configuration, warn about it + if not self.options.shared: + self.output.warning(f"{self.name} recipe builds GPU plugin without oneDNN (dGPU) support during static build.") + + # GPU plugin is currently not stable when compiled with clang compiler + if self.settings.compiler == "clang": + raise ConanInvalidConfiguration( + "GPU plugin cannot be built with clang compiler." + "Please, disable GPU plugin (openvino/*:enable_gpu=False) or change the compiler" + ) + + def build(self): + cmake = CMake(self) + cmake.configure() + for target in ["ov_frontends", "ov_plugins", "openvino_c"]: + cmake.build(target=target) + + def package(self): + cmake = CMake(self) + cmake.install() + # remove cmake and .pc files, since they will be generated later by Conan itself in package_info() + rmdir(self, os.path.join(self.package_folder, "share")) + rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) + + def package_info(self): + self.cpp_info.set_property("cmake_find_mode", "config") + self.cpp_info.set_property("cmake_file_name", "OpenVINO") + self.cpp_info.set_property("pkg_config_name", "openvino") + + openvino_runtime = self.cpp_info.components["Runtime"] + openvino_runtime.set_property("cmake_target_name", "openvino::runtime") + openvino_runtime.requires = ["onetbb::libtbb", "pugixml::pugixml"] + openvino_runtime.libs = ["openvino_c", "openvino"] + if self._preprocessing_available: + openvino_runtime.requires.append("ade::ade") + if self._target_x86_64: + openvino_runtime.requires.append("xbyak::xbyak") + if self.settings.os in ["Linux", "Android", "FreeBSD", "SunOS", "AIX"]: + openvino_runtime.system_libs = ["m", "dl", "pthread"] + if self.settings.os == "Windows": + openvino_runtime.system_libs.append("shlwapi") + if self._preprocessing_available: + openvino_runtime.system_libs.extend(["wsock32", "ws2_32"]) + + # Have to expose all internal libraries for static libraries case + if not self.options.shared: + # HW plugins + if self.options.enable_cpu: + openvino_runtime.libs.append("openvino_arm_cpu_plugin" if self._target_arm else \ + "openvino_intel_cpu_plugin") + openvino_runtime.libs.extend(["openvino_onednn_cpu", "openvino_snippets", "mlas"]) + if self._target_arm: + openvino_runtime.libs.append("arm_compute-static") + if self.options.get_safe("enable_gpu"): + openvino_runtime.libs.extend(["openvino_intel_gpu_plugin", "openvino_intel_gpu_graph", + "openvino_intel_gpu_runtime", "openvino_intel_gpu_kernels"]) + # SW plugins + if self.options.enable_auto: + openvino_runtime.libs.append("openvino_auto_plugin") + if self.options.enable_hetero: + openvino_runtime.libs.append("openvino_hetero_plugin") + if self.options.enable_auto_batch: + openvino_runtime.libs.append("openvino_auto_batch_plugin") + # Preprocessing should come after plugins, because plugins depend on it + if self._preprocessing_available: + openvino_runtime.libs.extend(["openvino_gapi_preproc", "fluid"]) + # Frontends + if self.options.enable_ir_frontend: + openvino_runtime.libs.append("openvino_ir_frontend") + if self.options.enable_onnx_frontend: + openvino_runtime.libs.extend(["openvino_onnx_frontend", "openvino_onnx_common"]) + openvino_runtime.requires.extend(["protobuf::libprotobuf", "onnx::onnx"]) + if self.options.enable_tf_frontend: + openvino_runtime.libs.extend(["openvino_tensorflow_frontend", "openvino_tensorflow_common"]) + openvino_runtime.requires.extend(["protobuf::libprotobuf", "snappy::snappy"]) + if self.options.enable_tf_lite_frontend: + openvino_runtime.libs.extend(["openvino_tensorflow_lite_frontend", "openvino_tensorflow_common"]) + openvino_runtime.requires.extend(["flatbuffers::flatbuffers"]) + if self.options.enable_paddle_frontend: + openvino_runtime.libs.append("openvino_paddle_frontend") + openvino_runtime.requires.append("protobuf::libprotobuf") + if self.options.enable_pytorch_frontend: + openvino_runtime.libs.append("openvino_pytorch_frontend") + # Common private dependencies should go last, because they satisfy dependencies for all other libraries + openvino_runtime.libs.extend(["openvino_reference", "openvino_builders", + "openvino_shape_inference", "openvino_itt", + # utils goes last since all others depend on it + "openvino_util"]) + # set 'openvino' once again for transformations objects files (cyclic dependency) + # openvino_runtime.libs.append("openvino") + full_openvino_lib_path = os.path.join(self.package_folder, "lib", "openvino.lib").replace("\\", "/") if self.settings.os == "Windows" else \ + os.path.join(self.package_folder, "lib", "libopenvino.a") + openvino_runtime.system_libs.insert(0, full_openvino_lib_path) + # Add definition to prevent symbols importing + openvino_runtime.defines = ["OPENVINO_STATIC_LIBRARY"] + + if self.options.get_safe("enable_gpu"): + openvino_runtime.requires.append("opencl-icd-loader::opencl-icd-loader") + if self.settings.os == "Windows": + openvino_runtime.system_libs.append("setupapi") + + if self.options.enable_onnx_frontend: + openvino_onnx = self.cpp_info.components["ONNX"] + openvino_onnx.set_property("cmake_target_name", "openvino::frontend::onnx") + openvino_onnx.libs = ["openvino_onnx_frontend"] + openvino_onnx.requires = ["Runtime", "onnx::onnx", "protobuf::libprotobuf"] + + if self.options.enable_paddle_frontend: + openvino_paddle = self.cpp_info.components["Paddle"] + openvino_paddle.set_property("cmake_target_name", "openvino::frontend::paddle") + openvino_paddle.libs = ["openvino_paddle_frontend"] + openvino_paddle.requires = ["Runtime", "protobuf::libprotobuf"] + + if self.options.enable_tf_frontend: + openvino_tensorflow = self.cpp_info.components["TensorFlow"] + openvino_tensorflow.set_property("cmake_target_name", "openvino::frontend::tensorflow") + openvino_tensorflow.libs = ["openvino_tensorflow_frontend"] + openvino_tensorflow.requires = ["Runtime", "protobuf::libprotobuf", "snappy::snappy"] + + if self.options.enable_pytorch_frontend: + openvino_pytorch = self.cpp_info.components["PyTorch"] + openvino_pytorch.set_property("cmake_target_name", "openvino::frontend::pytorch") + openvino_pytorch.libs = ["openvino_pytorch_frontend"] + openvino_pytorch.requires = ["Runtime"] + + if self.options.enable_tf_lite_frontend: + openvino_tensorflow_lite = self.cpp_info.components["TensorFlowLite"] + openvino_tensorflow_lite.set_property("cmake_target_name", "openvino::frontend::tensorflow_lite") + openvino_tensorflow_lite.libs = ["openvino_tensorflow_lite_frontend"] + openvino_tensorflow_lite.requires = ["Runtime", "flatbuffers::flatbuffers"] diff --git a/recipes/openvino/all/patches/2023.1.0/0001-cpu-plugin-compilation-c++17.patch b/recipes/openvino/all/patches/2023.1.0/0001-cpu-plugin-compilation-c++17.patch new file mode 100644 index 00000000000000..da6548b7c91159 --- /dev/null +++ b/recipes/openvino/all/patches/2023.1.0/0001-cpu-plugin-compilation-c++17.patch @@ -0,0 +1,16 @@ +diff --git a/src/plugins/intel_cpu/src/cpu_tensor.cpp b/src/plugins/intel_cpu/src/cpu_tensor.cpp +index 48d8fdd4be..815edd9309 100644 +--- a/src/plugins/intel_cpu/src/cpu_tensor.cpp ++++ b/src/plugins/intel_cpu/src/cpu_tensor.cpp +@@ -68,8 +68,9 @@ void Tensor::update_strides() const { + OPENVINO_ASSERT(blocked_desc, "not a valid blocked memory descriptor."); + auto& strides = blocked_desc->getStrides(); + m_strides.resize(strides.size()); +- std::transform(strides.cbegin(), strides.cend(), m_strides.begin(), +- std::bind1st(std::multiplies(), m_element_type.size())); ++ std::transform(strides.cbegin(), strides.cend(), m_strides.begin(), [this] (const size_t stride) { ++ return stride * m_element_type.size(); ++ }); + } + + void* Tensor::data(const element::Type& element_type) const { diff --git a/recipes/openvino/all/patches/2023.1.0/0002-core-compilation-c++17.patch b/recipes/openvino/all/patches/2023.1.0/0002-core-compilation-c++17.patch new file mode 100644 index 00000000000000..2df86d8ed0ec68 --- /dev/null +++ b/recipes/openvino/all/patches/2023.1.0/0002-core-compilation-c++17.patch @@ -0,0 +1,46 @@ +diff --git a/src/frontends/common/src/utils.cpp b/src/frontends/common/src/utils.cpp +index 8ef7481551..3a0db585fd 100644 +--- a/src/frontends/common/src/utils.cpp ++++ b/src/frontends/common/src/utils.cpp +@@ -12,10 +12,6 @@ + # include + # include + # include +-# ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +-# include +-# include +-# endif + #else + # if defined(WINAPI_FAMILY) && !WINAPI_PARTITION_DESKTOP + # error "Only WINAPI_PARTITION_DESKTOP is supported, because of GetModuleHandleEx[A|W]" +diff --git a/src/frontends/paddle/src/input_model.cpp b/src/frontends/paddle/src/input_model.cpp +index 6987b3285e..287fa5e54a 100644 +--- a/src/frontends/paddle/src/input_model.cpp ++++ b/src/frontends/paddle/src/input_model.cpp +@@ -13,14 +13,10 @@ + #include "openvino/frontend/paddle/node_context.hpp" + #include "openvino/opsets/opset7.hpp" + #include "openvino/util/common_util.hpp" ++#include "openvino/util/file_util.hpp" + #include "paddle_utils.hpp" + #include "place.hpp" + +-#if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) +-# include +-# include +-#endif +- + namespace ov { + namespace frontend { + namespace paddle { +@@ -169,9 +165,7 @@ std::basic_string get_const_path(const std::basic_string& folder_with_weig + #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) + template <> + std::basic_string get_const_path(const std::basic_string& folder, const std::string& name) { +- std::wstring_convert> converter; +- std::wstring _name = converter.from_bytes(name); +- return folder + paddle::get_path_sep() + _name; ++ return folder + paddle::get_path_sep() + ov::util::string_to_wstring(name); + } + #endif + diff --git a/recipes/openvino/all/patches/2023.1.0/0003-gpu-plugin-compilation-with-conan-opencl.patch b/recipes/openvino/all/patches/2023.1.0/0003-gpu-plugin-compilation-with-conan-opencl.patch new file mode 100644 index 00000000000000..993f7469c81c39 --- /dev/null +++ b/recipes/openvino/all/patches/2023.1.0/0003-gpu-plugin-compilation-with-conan-opencl.patch @@ -0,0 +1,44 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 461f1a209c..0b552b3da3 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -47,6 +47,7 @@ message (STATUS "CMAKE_GENERATOR ....................... " ${CMAKE_GENERATOR}) + message (STATUS "CPACK_GENERATOR ....................... " ${CPACK_GENERATOR}) + message (STATUS "CMAKE_C_COMPILER_ID ................... " ${CMAKE_C_COMPILER_ID}) + message (STATUS "CMAKE_CXX_COMPILER_ID ................. " ${CMAKE_CXX_COMPILER_ID}) ++message (STATUS "CMAKE_CXX_STANDARD .................... " ${CMAKE_CXX_STANDARD}) + if(OV_GENERATOR_MULTI_CONFIG) + string(REPLACE ";" " " config_types "${CMAKE_CONFIGURATION_TYPES}") + message (STATUS "CMAKE_CONFIGURATION_TYPES ............. " ${config_types}) +diff --git a/src/plugins/intel_gpu/thirdparty/CMakeLists.txt b/src/plugins/intel_gpu/thirdparty/CMakeLists.txt +index c7c616d0df..7a3132d8b1 100644 +--- a/src/plugins/intel_gpu/thirdparty/CMakeLists.txt ++++ b/src/plugins/intel_gpu/thirdparty/CMakeLists.txt +@@ -62,6 +62,9 @@ if(ENABLE_ONEDNN_FOR_GPU) + list(APPEND cmake_extra_args "-DCMAKE_CONFIGURATION_TYPES=${CMAKE_DEFAULT_BUILD_TYPE}") + list(APPEND cmake_extra_args "-DCMAKE_DEFAULT_BUILD_TYPE=${CMAKE_DEFAULT_BUILD_TYPE}") + endif() ++ # sometimes $ is evaluated as real build type even for non-multi-config generators ++ # so, have to put under OV_GENERATOR_MULTI_CONFIG (example: docker pull conanio/gcc11-ubuntu16.04:latest) ++ list(APPEND cmake_config "$") + else() + list(APPEND cmake_extra_args "-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}") + endif() +@@ -114,7 +117,7 @@ if(ENABLE_ONEDNN_FOR_GPU) + "-DDNNL_GPU_RUNTIME=OCL" + "-DDNNL_LIBRARY_NAME=openvino_onednn_gpu" + "-DCMAKE_INSTALL_PREFIX=${ONEDNN_INSTALL_DIR}" +- "-DCMAKE_INSTALL_LIBDIR=lib/$" ++ "-DCMAKE_INSTALL_LIBDIR=lib/${cmake_config}" + "-DDNNL_ENABLE_CONCURRENT_EXEC=ON" + "-DDNNL_ENABLE_PRIMITIVE_CACHE=OFF" + "-DDNNL_ENABLE_WORKLOAD=INFERENCE" +@@ -132,7 +135,7 @@ if(ENABLE_ONEDNN_FOR_GPU) + ) + add_library(onednn_gpu_tgt INTERFACE) + set_target_properties(onednn_gpu_tgt PROPERTIES +- INTERFACE_LINK_DIRECTORIES "${ONEDNN_INSTALL_DIR}/lib/$" ++ INTERFACE_LINK_DIRECTORIES "${ONEDNN_INSTALL_DIR}/lib/${cmake_config}" + INTERFACE_LINK_LIBRARIES "openvino_onednn_gpu" + INTERFACE_INCLUDE_DIRECTORIES "${ONEDNN_INSTALL_DIR}/include" + INTERFACE_COMPILE_DEFINITIONS ENABLE_ONEDNN_FOR_GPU diff --git a/recipes/openvino/all/test_package/CMakeLists.txt b/recipes/openvino/all/test_package/CMakeLists.txt new file mode 100644 index 00000000000000..032463e0d1754a --- /dev/null +++ b/recipes/openvino/all/test_package/CMakeLists.txt @@ -0,0 +1,48 @@ +cmake_minimum_required(VERSION 3.15) +project(test_package LANGUAGES CXX) + +macro(add_component def comp) + if(${def}) + list(APPEND OpenVINO_COMPONENTS ${comp}) + endif() +endmacro() + +add_component(ENABLE_ONNX_FRONTEND ONNX) +add_component(ENABLE_PADDLE_FRONTEND Paddle) +add_component(ENABLE_TF_FRONTEND TensorFlow) +add_component(ENABLE_TF_LITE_FRONTEND TensorFlowLite) +add_component(ENABLE_PYTORCH_FRONTEND PyTroch) + +find_package(OpenVINO REQUIRED COMPONENTS Runtime ${OpenVINO_COMPONENTS}) + +add_executable(${PROJECT_NAME} test_package.cpp) +target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime) +target_compile_features(${PROJECT_NAME} PRIVATE cxx_std_11) + +function(add_plugin def) + if(${def}) + target_compile_definitions(${PROJECT_NAME} PRIVATE ${def}) + endif() +endfunction() + +add_plugin(ENABLE_INTEL_CPU) +add_plugin(ENABLE_INTEL_GPU) +add_plugin(ENABLE_AUTO) +add_plugin(ENABLE_HETERO) +add_plugin(ENABLE_AUTO_BATCH) + +function(add_frontend def frontend) + if(${def}) + target_compile_definitions(${PROJECT_NAME} PRIVATE ${def}) + if(NOT TARGET openvino::frontend::${frontend}) + message(FATAL_ERROR "Target for ${frontend} has not been created") + endif() + endif() +endfunction() + +add_plugin(ENABLE_IR_FRONTEND) +add_frontend(ENABLE_ONNX_FRONTEND onnx) +add_frontend(ENABLE_PADDLE_FRONTEND paddle) +add_frontend(ENABLE_TF_FRONTEND tensorflow) +add_frontend(ENABLE_TF_LITE_FRONTEND tensorflow_lite) +add_frontend(ENABLE_PYTORCH_FRONTEND pytorch) diff --git a/recipes/openvino/all/test_package/conanfile.py b/recipes/openvino/all/test_package/conanfile.py new file mode 100644 index 00000000000000..2cbd00abf5b8b9 --- /dev/null +++ b/recipes/openvino/all/test_package/conanfile.py @@ -0,0 +1,49 @@ +from conan import ConanFile +from conan.tools.build import can_run +from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout +import os + + +class TestPackageConan(ConanFile): + settings = "os", "arch", "compiler", "build_type" + generators = "VirtualRunEnv" + test_type = "explicit" + + def layout(self): + cmake_layout(self) + + def requirements(self): + self.requires(self.tested_reference_str) + + def generate(self): + deps = CMakeDeps(self) + # deps.check_components_exist = True + deps.generate() + + tc = CMakeToolchain(self) + tc.variables["CMAKE_VERBOSE_MAKEFILE"] = True + # HW plugins + tc.variables["ENABLE_INTEL_CPU"] = self.dependencies[self.tested_reference_str].options.enable_cpu + tc.variables["ENABLE_INTEL_GPU"] = self.dependencies[self.tested_reference_str].options.get_safe("enable_gpu", False) + # SW plugins + tc.variables["ENABLE_AUTO"] = self.dependencies[self.tested_reference_str].options.enable_auto + tc.variables["ENABLE_HETERO"] = self.dependencies[self.tested_reference_str].options.enable_hetero + tc.variables["ENABLE_AUTO_BATCH"] = self.dependencies[self.tested_reference_str].options.enable_auto_batch + # Frontends + tc.variables["ENABLE_IR_FRONTEND"] = self.dependencies[self.tested_reference_str].options.enable_ir_frontend + tc.variables["ENABLE_ONNX_FRONTEND"] = self.dependencies[self.tested_reference_str].options.enable_onnx_frontend + tc.variables["ENABLE_TF_FRONTEND"] = self.dependencies[self.tested_reference_str].options.enable_tf_frontend + tc.variables["ENABLE_TF_LITE_FRONTEND"] = self.dependencies[self.tested_reference_str].options.enable_tf_lite_frontend + tc.variables["ENABLE_PADDLE_FRONTEND"] = self.dependencies[self.tested_reference_str].options.enable_paddle_frontend + tc.variables["ENABLE_PYTORCH_FRONTEND"] = self.dependencies[self.tested_reference_str].options.enable_pytorch_frontend + tc.generate() + + def build(self): + cmake = CMake(self) + cmake.configure() + cmake.build() + + def test(self): + if can_run(self): + bin_path = os.path.join(self.cpp.build.bindirs[0], "test_package") + self.run(bin_path, env="conanrun") diff --git a/recipes/openvino/all/test_package/test_package.cpp b/recipes/openvino/all/test_package/test_package.cpp new file mode 100644 index 00000000000000..b711dc4ef2be49 --- /dev/null +++ b/recipes/openvino/all/test_package/test_package.cpp @@ -0,0 +1,116 @@ +#include +#include +#include + +#include + +#define OV_SUCCESS(statement) \ + if ((statement) != 0) \ + return 1; + +#define OV_FAIL(statement) \ + if ((statement) == 0) \ + return 1; + +int test_available_devices() { + ov_core_t* core = NULL; + char* ret = NULL; + OV_SUCCESS(ov_core_create(&core)); +#ifdef ENABLE_INTEL_CPU + OV_SUCCESS(ov_core_get_property(core, "CPU", "AVAILABLE_DEVICES", &ret)); + std::cout << "CPU has passed with SUCCESS" << std::endl; +#else + OV_FAIL(ov_core_get_property(core, "CPU", "AVAILABLE_DEVICES", &ret)); + std::cout << "CPU has passed with FAIL" << std::endl; +#endif +#ifdef ENABLE_INTEL_GPU + OV_SUCCESS(ov_core_get_property(core, "GPU", "AVAILABLE_DEVICES", &ret)); + std::cout << "GPU has passed with SUCCESS" << std::endl; +#else + OV_FAIL(ov_core_get_property(core, "GPU", "AVAILABLE_DEVICES", &ret)); + std::cout << "GPU has passed with FAIL" << std::endl; +#endif +#ifdef ENABLE_AUTO + OV_SUCCESS(ov_core_get_property(core, "AUTO", "SUPPORTED_METRICS", &ret)); + OV_SUCCESS(ov_core_get_property(core, "MULTI", "SUPPORTED_METRICS", &ret)); + std::cout << "AUTO / MULTI has passed with SUCCESS" << std::endl; +#else + OV_FAIL(ov_core_get_property(core, "AUTO", "SUPPORTED_METRICS", &ret)); + OV_FAIL(ov_core_get_property(core, "MULTI", "SUPPORTED_METRICS", &ret)); + std::cout << "AUTO / MULTI has passed with FAIL" << std::endl; +#endif +#ifdef ENABLE_HETERO + OV_SUCCESS(ov_core_get_property(core, "HETERO", "SUPPORTED_METRICS", &ret)); + std::cout << "HETERO has passed with SUCCESS" << std::endl; +#else + OV_FAIL(ov_core_get_property(core, "HETERO", "SUPPORTED_METRICS", &ret)); + std::cout << "HETERO has passed with FAIL" << std::endl; +#endif +#ifdef ENABLE_AUTO_BATCH + OV_SUCCESS(ov_core_get_property(core, "BATCH", "SUPPORTED_METRICS", &ret)); + std::cout << "BATCH has passed with SUCCESS" << std::endl; +#else + OV_FAIL(ov_core_get_property(core, "BATCH", "SUPPORTED_METRICS", &ret)); + std::cout << "BATCH has passed with FAIL" << std::endl; +#endif + ov_core_free(core); + return 0; +} + +int test_available_frontends() { + ov::frontend::FrontEndManager manager; + auto frontend_found = [&] (const std::string & name) -> int { + try { + manager.load_by_framework(name); + std::cout << name << " has passed with SUCCESS" << std::endl; + } catch (const std::exception & e) { + std::cout << name << " has passed with FAIL" << std::endl; + return 1; + } + return 0; + }; + +#ifdef ENABLE_IR_FRONTEND + OV_SUCCESS(frontend_found("ir")); +#else + OV_FAIL(frontend_found("ir")); +#endif +#ifdef ENABLE_TF_LITE_FRONTEND + OV_SUCCESS(frontend_found("tflite")); +#else + OV_FAIL(frontend_found("tflite")); +#endif +#ifdef ENABLE_PYTORCH_FRONTEND + OV_SUCCESS(frontend_found("pytorch")); +#else + OV_FAIL(frontend_found("pytorch")); +#endif +#ifdef ENABLE_ONNX_FRONTEND + OV_SUCCESS(frontend_found("onnx")); +#else + OV_FAIL(frontend_found("onnx")); +#endif +#ifdef ENABLE_TF_FRONTEND + OV_SUCCESS(frontend_found("tf")); +#else + OV_FAIL(frontend_found("tf")); +#endif +#ifdef ENABLE_PADDLE_FRONTEND + OV_SUCCESS(frontend_found("paddle")); +#else + OV_FAIL(frontend_found("paddle")); +#endif + return 0; +} + +int main() { + OV_SUCCESS(test_available_devices()); + OV_SUCCESS(test_available_frontends()); + + // Deinitialize OpenVINO. Important for old systems like Ubuntu 16.04 with obsolete glibc, + // where application deinit can lead to the following issue on exit: + // Inconsistency detected by ld.so: dl-close.c: 811: _dl_close: Assertion `map->l_init_called' failed! + ov::shutdown(); + + return 0; +} diff --git a/recipes/openvino/config.yml b/recipes/openvino/config.yml new file mode 100644 index 00000000000000..ab4b77f9729467 --- /dev/null +++ b/recipes/openvino/config.yml @@ -0,0 +1,3 @@ +versions: + "2023.1.0": + folder: "all"