Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
ilya-lavrenov committed Oct 24, 2024
1 parent 810a636 commit 5d48d13
Show file tree
Hide file tree
Showing 12 changed files with 14 additions and 88 deletions.
2 changes: 1 addition & 1 deletion conan.lock
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"opencl-icd-loader/2023.04.17#5f73dd9f0c023d416a7f162e320b9c77%1692732261.088",
"opencl-headers/2023.04.17#3d98f2d12a67c2400de6f11d5335b5a6%1683936272.16",
"opencl-clhpp-headers/2023.04.17#7c62fcc7ac2559d4839150d2ebaac5c8%1685450803.672",
"onnx/1.16.0#4d2d4f24d6f73b8a7551e001839631f0%1712404811.278",
"onnx/1.16.2#b5e8d35b10d454b26751762922465eb8%1712404811.278",
"onetbb/2021.10.0#cbb2fc43088070b48f6e4339bc8fa0e1%1693812561.235",
"ittapi/3.24.0#9246125f13e7686dee2b0c992b71db94%1682969872.743",
"hwloc/2.9.2#1c63e2eccac57048ae226e6c946ebf0e%1688677682.002",
Expand Down
2 changes: 1 addition & 1 deletion conanfile.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ opencl-icd-loader/[>=2023.04.17]
rapidjson/[>=1.1.0]
xbyak/[>=6.62]
snappy/[>=1.1.7]
onnx/1.16.0
onnx/1.16.2
pybind11/[>=2.12.0]
flatbuffers/[>=22.9.24]

Expand Down
14 changes: 0 additions & 14 deletions src/frontends/onnx/frontend/src/core/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,18 +82,11 @@ std::vector<int8_t> Tensor::get_data() const {
if (m_tensor_proto->has_raw_data()) {
return detail::__get_raw_data<int8_t>(m_tensor_proto->raw_data(), m_tensor_proto->data_type());
}
#ifdef ONNX_VERSION_116
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT8 ||
m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT4) {
return detail::__get_data<int8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT4, INT8, raw data");
#else
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT8) {
return detail::__get_data<int8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT8, raw data");
#endif
}

template <>
Expand Down Expand Up @@ -146,18 +139,11 @@ std::vector<uint8_t> Tensor::get_data() const {
if (m_tensor_proto->has_raw_data()) {
return detail::__get_raw_data<uint8_t>(m_tensor_proto->raw_data(), m_tensor_proto->data_type());
}
#ifdef ONNX_VERSION_116
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT8 ||
m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT4) {
return detail::__get_data<uint8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT4, UINT8, raw data");
#else
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT8) {
return detail::__get_data<uint8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT8, raw data");
#endif
}

template <>
Expand Down
17 changes: 0 additions & 17 deletions src/frontends/onnx/frontend/src/core/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,8 @@ class Tensor {
enum class Type {
undefined = TensorProto_DataType::TensorProto_DataType_UNDEFINED,
float32 = TensorProto_DataType::TensorProto_DataType_FLOAT,
#ifdef ONNX_VERSION_116
uint4 = TensorProto_DataType::TensorProto_DataType_UINT4,
int4 = TensorProto_DataType::TensorProto_DataType_INT4,
#endif
uint8 = TensorProto_DataType::TensorProto_DataType_UINT8,
int8 = TensorProto_DataType::TensorProto_DataType_INT8,
uint16 = TensorProto_DataType::TensorProto_DataType_UINT16,
Expand Down Expand Up @@ -146,10 +144,8 @@ class Tensor {
return ov::element::f16;
case TensorProto_DataType::TensorProto_DataType_DOUBLE:
return ov::element::f64;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_INT4:
return ov::element::i4;
#endif
case TensorProto_DataType::TensorProto_DataType_INT8:
return ov::element::i8;
case TensorProto_DataType::TensorProto_DataType_INT16:
Expand All @@ -158,10 +154,8 @@ class Tensor {
return ov::element::i32;
case TensorProto_DataType::TensorProto_DataType_INT64:
return ov::element::i64;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_UINT4:
return ov::element::u4;
#endif
case TensorProto_DataType::TensorProto_DataType_UINT8:
return ov::element::u8;
case TensorProto_DataType::TensorProto_DataType_UINT16:
Expand Down Expand Up @@ -205,10 +199,8 @@ class Tensor {
return make_ov_constant<ov::float16>(ov::element::f16);
case TensorProto_DataType::TensorProto_DataType_DOUBLE:
return make_ov_constant<double>(ov::element::f64);
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_INT4:
return make_ov_constant<int8_t>(ov::element::i4);
#endif
case TensorProto_DataType::TensorProto_DataType_INT8:
return make_ov_constant<int8_t>(ov::element::i8);
case TensorProto_DataType::TensorProto_DataType_INT16:
Expand All @@ -217,10 +209,8 @@ class Tensor {
return make_ov_constant<int32_t>(ov::element::i32);
case TensorProto_DataType::TensorProto_DataType_INT64:
return make_ov_constant<int64_t>(ov::element::i64);
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_UINT4:
return make_ov_constant<uint8_t>(ov::element::u4);
#endif
case TensorProto_DataType::TensorProto_DataType_UINT8:
return make_ov_constant<uint8_t>(ov::element::u8);
case TensorProto_DataType::TensorProto_DataType_UINT16:
Expand All @@ -238,17 +228,10 @@ class Tensor {
case TensorProto_DataType::TensorProto_DataType_STRING:
return make_ov_constant<std::string>(ov::element::string);
default:
#ifdef ONNX_VERSION_116
ONNX_UNSUPPORTED_DATA_TYPE(
m_tensor_proto->data_type(),
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, "
"UINT4, UINT8, UINT16, UINT32, UINT64, STRING");
#else
ONNX_UNSUPPORTED_DATA_TYPE(
m_tensor_proto->data_type(),
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT8, INT16, INT32, INT64, "
"UINT8, UINT16, UINT32, UINT64, STRING");
#endif
}
}

Expand Down
4 changes: 2 additions & 2 deletions src/frontends/onnx/frontend/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
// SPDX-License-Identifier: Apache-2.0
//

#include <google/protobuf/runtime_version.h>

#include <google/protobuf/port_def.inc>
#if PROTOBUF_VERSION >= 4022000 // protobuf 4.22
# define OV_PROTOBUF_ABSL_IS_USED
#endif
#include <google/protobuf/port_undef.inc>

#ifdef OV_PROTOBUF_ABSL_IS_USED
# include <absl/log/globals.h>
Expand Down
10 changes: 0 additions & 10 deletions src/frontends/onnx/frontend/src/utils/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,8 @@ const ov::element::Type& get_ov_element_type(int64_t onnx_type) {
return ov::element::f16;
case TensorProto_DataType::TensorProto_DataType_FLOAT:
return ov::element::f32;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_INT4:
return ov::element::i4;
#endif
case TensorProto_DataType::TensorProto_DataType_INT8:
return ov::element::i8;
case TensorProto_DataType::TensorProto_DataType_INT16:
Expand All @@ -54,10 +52,8 @@ const ov::element::Type& get_ov_element_type(int64_t onnx_type) {
return ov::element::i32;
case TensorProto_DataType::TensorProto_DataType_INT64:
return ov::element::i64;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_UINT4:
return ov::element::u4;
#endif
case TensorProto_DataType::TensorProto_DataType_UINT8:
return ov::element::u8;
case TensorProto_DataType::TensorProto_DataType_UINT16:
Expand All @@ -77,15 +73,9 @@ const ov::element::Type& get_ov_element_type(int64_t onnx_type) {
case TensorProto_DataType::TensorProto_DataType_STRING:
return ov::element::string;
}
#ifdef ONNX_VERSION_116
ONNX_UNSUPPORTED_DATA_TYPE(onnx_type,
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, "
"INT32, INT64, UINT4, UINT8, UINT16, UINT32, UINT64, STRING, UNDEFINED");
#else
ONNX_UNSUPPORTED_DATA_TYPE(onnx_type,
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT8, INT16, "
"INT32, INT64, UINT8, UINT16, UINT32, UINT64, STRING, UNDEFINED");
#endif
}

void default_op_checks(const Node& node, size_t min_inputs_size) {
Expand Down
15 changes: 0 additions & 15 deletions src/frontends/onnx/onnx_common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,3 @@ ov_link_system_libraries(${TARGET_NAME} PUBLIC onnx_proto onnx)
ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})

ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE})

# Temporary solution until vcpkg doesn't have fresh ONNX,
# trying determine used version of ONNX to enable modern functionality
find_package(ONNX 1.16.0 QUIET COMPONENTS onnx onnx_proto NO_MODULE)
if(ONNX_FOUND)
target_compile_definitions(${TARGET_NAME} PUBLIC ONNX_VERSION_116)
else()
if(EXISTS "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER")
file(READ "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER" ONNX_VERSION)
string(STRIP "${ONNX_VERSION}" ONNX_VERSION)
if((ONNX_VERSION GREATER "1.16.0") OR (ONNX_VERSION EQUAL "1.16.0"))
target_compile_definitions(${TARGET_NAME} PUBLIC ONNX_VERSION_116)
endif()
endif()
endif()
8 changes: 0 additions & 8 deletions src/frontends/onnx/onnx_common/src/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,8 @@ size_t get_onnx_data_size(int32_t onnx_type) {
return sizeof(ov::float8_e4m3);
case TensorProto_DataType_FLOAT8E5M2:
return sizeof(ov::float8_e5m2);
#ifdef ONNX_VERSION_116
case TensorProto_DataType_INT4:
return sizeof(int8_t);
#endif
case TensorProto_DataType_INT8:
return sizeof(int8_t);
case TensorProto_DataType_INT16:
Expand All @@ -42,10 +40,8 @@ size_t get_onnx_data_size(int32_t onnx_type) {
return sizeof(int32_t);
case TensorProto_DataType_INT64:
return sizeof(int64_t);
#ifdef ONNX_VERSION_116
case TensorProto_DataType_UINT4:
return sizeof(uint8_t);
#endif
case TensorProto_DataType_UINT8:
return sizeof(uint8_t);
case TensorProto_DataType_UINT16:
Expand All @@ -66,16 +62,12 @@ const std::map<ov::element::Type_t, TensorProto_DataType> OV_2_ONNX_TYPES = {
{ov::element::Type_t::f16, TensorProto_DataType::TensorProto_DataType_FLOAT16},
{ov::element::Type_t::f32, TensorProto_DataType::TensorProto_DataType_FLOAT},
{ov::element::Type_t::f64, TensorProto_DataType::TensorProto_DataType_DOUBLE},
#ifdef ONNX_VERSION_116
{ov::element::Type_t::i4, TensorProto_DataType::TensorProto_DataType_INT4},
#endif
{ov::element::Type_t::i8, TensorProto_DataType::TensorProto_DataType_INT8},
{ov::element::Type_t::i16, TensorProto_DataType::TensorProto_DataType_INT16},
{ov::element::Type_t::i32, TensorProto_DataType::TensorProto_DataType_INT32},
{ov::element::Type_t::i64, TensorProto_DataType::TensorProto_DataType_INT64},
#ifdef ONNX_VERSION_116
{ov::element::Type_t::u4, TensorProto_DataType::TensorProto_DataType_UINT4},
#endif
{ov::element::Type_t::u8, TensorProto_DataType::TensorProto_DataType_UINT8},
{ov::element::Type_t::u16, TensorProto_DataType::TensorProto_DataType_UINT16},
{ov::element::Type_t::u32, TensorProto_DataType::TensorProto_DataType_UINT32},
Expand Down
15 changes: 0 additions & 15 deletions src/frontends/onnx/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -134,21 +134,6 @@ target_compile_definitions(ov_onnx_frontend_tests
set(ONNX_OPSET_VERSION 17 CACHE INTERNAL "Supported version of ONNX operator set")
target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION})

# Temporary solution until vcpkg doesn't have fresh ONNX,
# trying determine used version of ONNX to enable modern functionality
find_package(ONNX 1.16.0 QUIET COMPONENTS onnx onnx_proto NO_MODULE)
if(ONNX_FOUND)
target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_VERSION_116)
else()
if(EXISTS "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER")
file(READ "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER" ONNX_VERSION)
string(STRIP "${ONNX_VERSION}" ONNX_VERSION)
if((ONNX_VERSION GREATER "1.16.0") OR (ONNX_VERSION EQUAL "1.16.0"))
target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_VERSION_116)
endif()
endif()
endif()

if(ONNX_TESTS_DEPENDENCIES)
add_dependencies(ov_onnx_frontend_tests ${ONNX_TESTS_DEPENDENCIES})
endif()
Expand Down
2 changes: 0 additions & 2 deletions src/frontends/onnx/tests/onnx_import.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_bool_init_raw) {
test_case.run();
}

#ifdef ONNX_VERSION_116
OPENVINO_TEST(${BACKEND_NAME}, onnx_int4_const) {
auto model = convert_model("int4_const.onnx");

Expand Down Expand Up @@ -195,7 +194,6 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_uint4_input) {

test_case.run();
}
#endif

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_add_abc_initializers) {
auto model = convert_model("add_abc_initializers.onnx");
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/paddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@

#include "openvino/frontend/paddle/frontend.hpp"

#include <google/protobuf/runtime_version.h>

#include <google/protobuf/port_def.inc>
#if PROTOBUF_VERSION >= 4022000 // protobuf 4.22
# define OV_PROTOBUF_ABSL_IS_USED
#endif
#include <google/protobuf/port_undef.inc>

#ifdef OV_PROTOBUF_ABSL_IS_USED
# include <absl/log/globals.h>
Expand Down
9 changes: 8 additions & 1 deletion thirdparty/dependencies.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -503,10 +503,17 @@ endif()
#

if(ENABLE_OV_ONNX_FRONTEND)
find_package(ONNX 1.15.0 QUIET COMPONENTS onnx onnx_proto NO_MODULE)
find_package(ONNX 1.16.2 QUIET COMPONENTS onnx onnx_proto NO_MODULE)

if(ONNX_FOUND)
# conan and vcpkg create imported targets 'onnx' and 'onnx_proto'
# newer versions of ONNX in vcpkg has ONNX:: prefix, let's create aliases
if(TARGET ONNX::onnx)
add_library(onnx ALIAS ONNX::onnx)
endif()
if(TARGET ONNX::onnx_proto)
add_library(onnx_proto ALIAS ONNX::onnx_proto)
endif()
else()
add_subdirectory(thirdparty/onnx)
endif()
Expand Down

0 comments on commit 5d48d13

Please sign in to comment.