From 29ad3e8c92dc4144dbc0d2fcbd0227f2d586b7d2 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Mon, 28 Aug 2023 10:22:14 +0400 Subject: [PATCH 1/3] Moved eval tests to new API (#19364) * Moved eval tests to new API * Fixed build * Fixed eval tests --- src/core/tests/eval.cpp | 2622 ++++++++++++++------------- src/core/tests/utils/eval_utils.hpp | 80 +- 2 files changed, 1449 insertions(+), 1253 deletions(-) diff --git a/src/core/tests/eval.cpp b/src/core/tests/eval.cpp index bfea21b6dfaa45..3950546f5c96db 100644 --- a/src/core/tests/eval.cpp +++ b/src/core/tests/eval.cpp @@ -14,114 +14,107 @@ #include "common_test_utils/type_prop.hpp" #include "gmock/gmock.h" #include "gtest/gtest.h" -#include "ngraph/node.hpp" -#include "ngraph/node_output.hpp" -#include "ngraph/op/abs.hpp" -#include "ngraph/op/acos.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/asin.hpp" -#include "ngraph/op/atan.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/ceiling.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/cos.hpp" -#include "ngraph/op/cosh.hpp" -#include "ngraph/op/cum_sum.hpp" -#include "ngraph/op/erf.hpp" -#include "ngraph/op/exp.hpp" -#include "ngraph/op/fake_quantize.hpp" -#include "ngraph/op/floor.hpp" -#include "ngraph/op/gather.hpp" -#include "ngraph/op/log.hpp" -#include "ngraph/op/max_pool.hpp" -#include "ngraph/op/min.hpp" -#include "ngraph/op/minimum.hpp" -#include "ngraph/op/negative.hpp" -#include "ngraph/op/non_zero.hpp" -#include "ngraph/op/not.hpp" -#include "ngraph/op/parameter.hpp" -#include "ngraph/op/range.hpp" -#include "ngraph/op/reduce_logical_and.hpp" -#include "ngraph/op/relu.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/round.hpp" -#include "ngraph/op/scatter_elements_update.hpp" -#include "ngraph/op/scatter_update.hpp" -#include "ngraph/op/shape_of.hpp" -#include "ngraph/op/sigmoid.hpp" -#include "ngraph/op/sign.hpp" -#include "ngraph/op/sin.hpp" -#include "ngraph/op/sinh.hpp" -#include "ngraph/op/softmax.hpp" -#include "ngraph/op/softsign.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/squeeze.hpp" -#include "ngraph/op/tan.hpp" -#include "ngraph/op/tanh.hpp" -#include "ngraph/op/topk.hpp" -#include "ngraph/op/unsqueeze.hpp" -#include "ngraph/runtime/host_tensor.hpp" #include "ngraph/validation_util.hpp" +#include "openvino/core/model.hpp" +#include "openvino/core/shape.hpp" +#include "openvino/core/type/element_type.hpp" +#include "openvino/op/abs.hpp" +#include "openvino/op/acos.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/asin.hpp" +#include "openvino/op/atan.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/ceiling.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/cosh.hpp" +#include "openvino/op/cum_sum.hpp" +#include "openvino/op/erf.hpp" +#include "openvino/op/exp.hpp" +#include "openvino/op/fake_quantize.hpp" +#include "openvino/op/floor.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/log.hpp" +#include "openvino/op/logical_not.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/negative.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_min.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/round.hpp" +#include "openvino/op/scatter_elements_update.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/sigmoid.hpp" +#include "openvino/op/sign.hpp" +#include "openvino/op/sin.hpp" +#include "openvino/op/sinh.hpp" +#include "openvino/op/softmax.hpp" +#include "openvino/op/softsign.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/tan.hpp" +#include "openvino/op/tanh.hpp" +#include "openvino/op/topk.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "openvino/runtime/tensor.hpp" #include "sequnce_generator.hpp" #include "utils/eval_utils.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; -using namespace ngraph; +using namespace ov; using namespace testing; +namespace { +template +std::vector read_vector(const ov::Tensor& tv) { + if (ov::element::from() != tv.get_element_type()) { + OPENVINO_THROW("read_vector type must match Tensor type"); + } + size_t element_count = tv.get_size(); + size_t size = tv.get_byte_size(); + std::vector rc(element_count); + memcpy(rc.data(), tv.data(), size); + return rc; +} +} // namespace + #define ASSERT_FLOAT_VECTORS_EQ(expected, result) \ ASSERT_EQ(expected.size(), result.size()) << "Array sizes differ."; \ for (size_t i = 0; i < expected.size(); ++i) { \ ASSERT_FLOAT_EQ(expected[i], result[i]) << "at index: " << i; \ } -TEST(eval, bad_get_data_ptr) { - HostTensor c(element::f32, Shape{}); - *c.get_data_ptr() = 1.0; - EXPECT_EQ(*c.get_data_ptr(), 1.0); - try { - c.get_data_ptr(); - FAIL() << "Bad type not detected."; - } catch (const CheckFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("get_data_ptr")); - } - try { - c.get_data_ptr(); - FAIL() << "Bad type not detected."; - } catch (const CheckFailure& error) { - EXPECT_HAS_SUBSTRING(error.what(), std::string("get_data_ptr")); - } -} - TEST(eval, max_eval_parameter) { - auto p = make_shared(element::i64, Shape{}); + auto p = make_shared(element::i64, Shape{}); OPENVINO_SUPPRESS_DEPRECATED_START - auto result = maximum_value(p); + auto result = ngraph::maximum_value(p); OPENVINO_SUPPRESS_DEPRECATED_END EXPECT_FALSE(result.first); EXPECT_EQ(result.second, numeric_limits::max()); } TEST(eval, max_eval_constant) { - auto c = op::Constant::create(element::i64, Shape{}, {27}); + auto c = ov::op::v0::Constant::create(element::i64, Shape{}, {27}); OPENVINO_SUPPRESS_DEPRECATED_START - auto result = maximum_value(c); + auto result = ngraph::maximum_value(c); OPENVINO_SUPPRESS_DEPRECATED_END ASSERT_TRUE(result.first); EXPECT_EQ(result.second, 27); } TEST(eval, max_eval_minimum_constant) { - auto c = op::Constant::create(element::i64, Shape{}, {27}); - auto p = make_shared(element::i64, Shape{}); + auto c = ov::op::v0::Constant::create(element::i64, Shape{}, {27}); + auto p = make_shared(element::i64, Shape{}); auto m = make_shared(c, p); OPENVINO_SUPPRESS_DEPRECATED_START - auto result = maximum_value(m); + auto result = ngraph::maximum_value(m); OPENVINO_SUPPRESS_DEPRECATED_END ASSERT_TRUE(result.first); EXPECT_EQ(result.second, 27); @@ -140,41 +133,45 @@ TEST(eval, max_eval_reduce_min) { make_shared(reduce, make_shared(element::i32, Shape{1}, 0)), make_shared(element::i64, Shape{1}, 0)); OPENVINO_SUPPRESS_DEPRECATED_START - EXPECT_EQ(maximum_value(squeezes).second, 37); + EXPECT_EQ(ngraph::maximum_value(squeezes).second, 37); OPENVINO_SUPPRESS_DEPRECATED_END } TEST(eval, evaluate_shape_of) { - auto p = make_shared(element::f32, PartialShape{-1, -1}); + auto p = make_shared(element::f32, PartialShape{-1, -1}); auto so = make_shared(p); - auto fun = make_shared(OutputVector{so}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 3}, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::i64); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2})); + auto model = make_shared(OutputVector{so}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3}, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::i64); + EXPECT_EQ(result.get_shape(), (Shape{2})); auto result_shape = read_vector(result); vector arg_shape{2, 3}; ASSERT_EQ(result_shape, arg_shape); } TEST(eval, evaluate_dynamic_range_sum) { - auto p_start = make_shared(element::f32, PartialShape{}); - auto p_stop = make_shared(element::f32, PartialShape{}); - auto p_step = make_shared(element::f32, PartialShape{}); - auto p1 = make_shared(element::f32, PartialShape{}); + auto p_start = make_shared(element::f32, PartialShape{}); + auto p_stop = make_shared(element::f32, PartialShape{}); + auto p_step = make_shared(element::f32, PartialShape{}); + auto p1 = make_shared(element::f32, PartialShape{}); auto range = make_shared(p_start, p_stop, p_step); auto add = make_shared(range, p1); - auto fun = make_shared(OutputVector{add}, ParameterVector{p_start, p_stop, p_step, p1}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor({}, {1.0f}), - make_host_tensor({}, {10.0f}), - make_host_tensor({}, {3.0f}), - make_host_tensor({}, {7.0f})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3})); + auto model = make_shared(OutputVector{add}, ParameterVector{p_start, p_stop, p_step, p1}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({}, {1.0f}), + make_tensor({}, {10.0f}), + make_tensor({}, {3.0f}), + make_tensor({}, {7.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3})); auto cval = read_vector(result_tensor); vector seq{8.0f, 11.0f, 14.0f}; ASSERT_EQ(cval, seq); @@ -182,16 +179,18 @@ TEST(eval, evaluate_dynamic_range_sum) { TEST(eval, evaluate_broadcast_v3_bidirectional) { Shape shape_a{4, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i32, Shape{3}, {2, 1, 4}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i32, Shape{3}, {2, 1, 4}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, {make_host_tensor(Shape{4, 1}, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 4, 4})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{4, 1}, {1.0f, 2.0f, 3.0f, 4.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (ov::Shape{2, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; ASSERT_EQ(result_val, expec); @@ -199,15 +198,18 @@ TEST(eval, evaluate_broadcast_v3_bidirectional) { TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input) { Shape shape_a{1, 1, 1, 1, 1, 1, 1, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 3, 1, 1}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{4}, {1, 3, 1, 1}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(shape_a, {1.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 1, 1, 1, 1, 3, 1, 1})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(shape_a, {1.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{1, 1, 1, 1, 1, 3, 1, 1})); auto result_val = read_vector(result); vector expec{1.0f, 1.0f, 1.0f}; ASSERT_EQ(result_val, expec); @@ -215,15 +217,18 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input) { TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) { Shape shape_a{1, 3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i32, Shape{2}, {3, 1}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i32, Shape{2}, {3, 1}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{1, 3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 3, 1})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{1, 3, 1}, {1.0f, 2.0f, 3.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{1, 3, 1})); auto result_val = read_vector(result); vector expec{1.0f, 2.0f, 3.0f}; ASSERT_EQ(result_val, expec); @@ -231,17 +236,19 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) { Shape shape_a{4, 1}; - auto A = make_shared(element::i32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::i32, shape_a); + auto target_shape = make_shared(element::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{4, 1}, {1, 2, 3, 4}), - make_host_tensor(Shape{3}, {2, 1, 4})})); - EXPECT_EQ(result->get_element_type(), element::i32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 4, 4})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{4, 1}, {1, 2, 3, 4}), + make_tensor(Shape{3}, {2, 1, 4})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::i32); + EXPECT_EQ(result.get_shape(), (Shape{2, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; @@ -250,15 +257,18 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) { TEST(eval, evaluate_broadcast_v3_numpy) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -268,17 +278,19 @@ TEST(eval, evaluate_broadcast_v3_numpy) { TEST(eval, evaluate_broadcast_v3_numpy_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = make_shared(element::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), - make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), + make_tensor(Shape{3}, {2, 3, 6})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -289,29 +301,33 @@ TEST(eval, evaluate_broadcast_v3_numpy_dyn) { TEST(eval, evaluate_broadcast_v3_numpy_vs_bidi) { Shape in_shape{1, 4, 1}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 4, 4}); + auto A = make_shared(element::f32, in_shape); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {1, 4, 4}); auto bcast_v3_num = make_shared(A, target_shape, op::BroadcastType::NUMPY); - auto fun_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE( - fun_num->evaluate({result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 4})); + auto model_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})}; + ASSERT_TRUE(model_num->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{1, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; ASSERT_EQ(expec, result_val); - auto target_shape2 = op::Constant::create(element::i64, Shape{2}, {1, 4}); + auto target_shape2 = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, 4}); auto bcast_v3 = make_shared(A, target_shape2, op::BroadcastType::BIDIRECTIONAL); - auto fun_bidi = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); - - auto result2 = make_shared(); - ASSERT_TRUE( - fun_bidi->evaluate({result2}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result2->get_element_type(), element::f32); - EXPECT_EQ(result2->get_partial_shape(), (PartialShape{1, 4, 4})); + auto model_bidi = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); + + auto result2 = ov::Tensor(); + auto out_vector2 = ov::TensorVector{result2}; + auto in_vector2 = ov::TensorVector{make_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})}; + ASSERT_TRUE(model_bidi->evaluate(out_vector2, in_vector2)); + result2 = out_vector.at(0); + EXPECT_EQ(result2.get_element_type(), element::f32); + EXPECT_EQ(result2.get_shape(), (Shape{1, 4, 4})); auto result_val2 = read_vector(result2); vector expec2{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; ASSERT_EQ(expec2, result_val2); @@ -320,16 +336,18 @@ TEST(eval, evaluate_broadcast_v3_numpy_vs_bidi) { TEST(eval, evaluate_broadcast_v3_bidi_3d) { Shape in_shape{1, 4, 1}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 1, 3}); + auto A = make_shared(element::f32, in_shape); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {1, 1, 3}); auto bcast_v3_num = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); - auto fun_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE( - fun_num->evaluate({result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 3})); + auto model = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{1, 4, 3})); auto result_val = read_vector(result); vector expec{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f}; ASSERT_EQ(expec, result_val); @@ -339,15 +357,18 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) { Shape in_shape{4, 1, 1}; Shape expec_shape{1, 4, 2, 2}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}); + auto A = make_shared(element::f32, in_shape); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 2, 2})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{1, 4, 2, 2})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; ASSERT_EQ(result_val, expec); @@ -355,15 +376,18 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) { TEST(eval, evaluate_broadcast_v3_pdpd) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -373,17 +397,19 @@ TEST(eval, evaluate_broadcast_v3_pdpd) { TEST(eval, evaluate_broadcast_v3_pdpd_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = make_shared(element::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), - make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), + make_tensor(Shape{3}, {2, 3, 6})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -393,15 +419,18 @@ TEST(eval, evaluate_broadcast_v3_pdpd_dyn) { TEST(eval, evaluate_broadcast_v1_numpy) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -411,17 +440,19 @@ TEST(eval, evaluate_broadcast_v1_numpy) { TEST(eval, evaluate_broadcast_v1_numpy_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = make_shared(element::i64, Shape{3}); auto bcast_v3 = make_shared(A, target_shape); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), - make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), + make_tensor(Shape{3}, {2, 3, 6})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -431,16 +462,19 @@ TEST(eval, evaluate_broadcast_v1_numpy_dyn) { TEST(eval, evaluate_broadcast_v1_pdpd) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -450,18 +484,20 @@ TEST(eval, evaluate_broadcast_v1_pdpd) { TEST(eval, evaluate_broadcast_v1_pdpd_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = make_shared(element::i64, Shape{3}); auto bcast_v3 = make_shared(A, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), - make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), + make_tensor(Shape{3}, {2, 3, 6})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, @@ -471,19 +507,22 @@ TEST(eval, evaluate_broadcast_v1_pdpd_dyn) { TEST(eval, evaluate_broadcast_v1_explicit) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i32, Shape{2}, {1, 2}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = ov::op::v0::Constant::create(element::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = ov::op::v0::Constant::create(element::i32, Shape{2}, {1, 2}); auto bcast_v3 = make_shared(A, target_shape, axes_mapping, op::AutoBroadcastSpec(op::AutoBroadcastType::EXPLICIT)); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; ASSERT_EQ(result_val, expec); @@ -491,23 +530,25 @@ TEST(eval, evaluate_broadcast_v1_explicit) { TEST(eval, evaluate_broadcast_v1_explicit_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); - auto axes_mapping = make_shared(element::i32, Shape{2}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = make_shared(element::i64, Shape{3}); + auto axes_mapping = make_shared(element::i32, Shape{2}); auto bcast_v1 = make_shared(A, target_shape, axes_mapping, op::AutoBroadcastSpec(op::AutoBroadcastType::EXPLICIT)); - auto fun = make_shared(OutputVector{bcast_v1}, ParameterVector{A, target_shape, axes_mapping}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), - make_host_tensor(Shape{3}, {2, 3, 1}), - make_host_tensor(Shape{2}, {1, 2})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); + auto model = make_shared(OutputVector{bcast_v1}, ParameterVector{A, target_shape, axes_mapping}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), + make_tensor(Shape{3}, {2, 3, 1}), + make_tensor(Shape{2}, {1, 2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; ASSERT_EQ(result_val, expec); @@ -515,23 +556,25 @@ TEST(eval, evaluate_broadcast_v1_explicit_dyn) { TEST(eval, evaluate_broadcast_v3_explicit_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); - auto axes_mapping = make_shared(element::i32, Shape{2}); + auto A = make_shared(element::f32, shape_a); + auto target_shape = make_shared(element::i64, Shape{3}); + auto axes_mapping = make_shared(element::i32, Shape{2}); auto bcast_v3 = make_shared(A, target_shape, axes_mapping, op::BroadcastModeSpec(op::BroadcastType::EXPLICIT)); - auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape, axes_mapping}); - - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), - make_host_tensor(Shape{3}, {2, 3, 1}), - make_host_tensor(Shape{2}, {1, 2})})); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); + auto model = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape, axes_mapping}); + + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), + make_tensor(Shape{3}, {2, 3, 1}), + make_tensor(Shape{2}, {1, 2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; ASSERT_EQ(result_val, expec); @@ -556,98 +599,106 @@ class TestOpMultiOut : public op::Op { return std::make_shared(new_args.at(0), new_args.at(1)); } - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override { - inputs[0]->read(outputs[0]->get_data_ptr(), inputs[0]->get_size_in_bytes()); - inputs[1]->read(outputs[1]->get_data_ptr(), inputs[1]->get_size_in_bytes()); + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override { + memcpy(outputs[0].data(), inputs[0].data(), inputs[0].get_byte_size()); + memcpy(outputs[1].data(), inputs[1].data(), inputs[1].get_byte_size()); return true; } - OPENVINO_SUPPRESS_DEPRECATED_END }; TEST(eval, test_op_multi_out) { - auto p = make_shared(element::f32, PartialShape{2, 3}); - auto p2 = make_shared(element::f64, PartialShape{2, 2}); + auto p = make_shared(element::f32, PartialShape{2, 3}); + auto p2 = make_shared(element::f64, PartialShape{2, 2}); auto so = make_shared(p, p2); - auto fun = make_shared(OutputVector{so->output(0), so->output(1)}, ParameterVector{p, p2}); - auto result = make_shared(element::Type_t::f32, Shape{2, 3}); - auto result2 = make_shared(element::Type_t::f64, Shape{2, 2}); - HostTensorVector ins{make_host_tensor(Shape{2, 3}), - make_host_tensor(Shape{2, 2})}; - ASSERT_TRUE(fun->evaluate({result, result2}, ins)); - EXPECT_EQ(result->get_element_type(), element::f32); - EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3})); + auto model = make_shared(OutputVector{so->output(0), so->output(1)}, ParameterVector{p, p2}); + auto result = ov::Tensor(element::Type_t::f32, Shape{2, 3}); + auto result2 = ov::Tensor(element::Type_t::f64, Shape{2, 2}); + ov::TensorVector outs{result, result2}; + ov::TensorVector ins{make_tensor(Shape{2, 3}), + make_tensor(Shape{2, 2})}; + ASSERT_TRUE(model->evaluate(outs, ins)); + result = outs.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); + EXPECT_EQ(result.get_shape(), (Shape{2, 3})); auto result_val = read_vector(result); auto arg_val = read_vector(ins[0]); ASSERT_EQ(result_val, arg_val); - EXPECT_EQ(result2->get_element_type(), element::f64); - EXPECT_EQ(result2->get_partial_shape(), (PartialShape{2, 2})); + EXPECT_EQ(result2.get_element_type(), element::f64); + EXPECT_EQ(result2.get_shape(), (Shape{2, 2})); auto result_val2 = read_vector(result2); auto arg_val2 = read_vector(ins[1]); ASSERT_EQ(result_val2, arg_val2); } TEST(eval, evaluate_reshape_v1) { - auto data = make_shared(element::f32, Shape{2, 5}); - auto pattern = make_shared(element::i64, Shape{2}); - auto dyn_reshape = make_shared(data, pattern, false); - auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); - auto result_tensor = make_shared(); - ASSERT_TRUE(func->evaluate({result_tensor}, - {make_host_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), - make_host_tensor({2}, {5, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{5, 2})); + auto data = make_shared(element::f32, Shape{2, 5}); + auto pattern = make_shared(element::i64, Shape{2}); + auto dyn_reshape = make_shared(data, pattern, false); + auto model = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), + make_tensor({2}, {5, 2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{5, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; ASSERT_EQ(computed_val, expected_val); } TEST(eval, evaluate_reshape_v1_negative_index) { - auto data = make_shared(element::f32, Shape{2, 5}); - auto pattern = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::f32, Shape{2, 5}); + auto pattern = make_shared(element::i64, Shape{2}); auto dyn_reshape = make_shared(data, pattern, false); - auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); - auto result_tensor = make_shared(); - ASSERT_TRUE(func->evaluate({result_tensor}, - {make_host_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), - make_host_tensor({2}, {2, -1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 5})); + auto model = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), + make_tensor({2}, {2, -1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{2, 5})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; ASSERT_EQ(computed_val, expected_val); } TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) { - auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); - auto pattern = make_shared(element::i64, Shape{6}); + auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); + auto pattern = make_shared(element::i64, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); - auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); - auto result_tensor = make_shared(); - ASSERT_TRUE(func->evaluate( - {result_tensor}, - {make_host_tensor({2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), - make_host_tensor({6}, {2, 0, 1, -1, 1, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2, 1, 2, 1, 2})); + auto model = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor({2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), + make_tensor({6}, {2, 0, 1, -1, 1, 2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{2, 2, 1, 2, 1, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; ASSERT_EQ(computed_val, expected_val); } TEST(eval, evaluate_reshape_v1_pattern_int16) { - auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); - auto pattern = make_shared(element::i16, Shape{6}); + auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); + auto pattern = make_shared(element::i16, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); - auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); - auto result_tensor = make_shared(); - ASSERT_TRUE(func->evaluate( - {result_tensor}, - {make_host_tensor({2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), - make_host_tensor({6}, {2, 0, 1, -1, 1, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2, 1, 2, 1, 2})); + auto model = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor({2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), + make_tensor({6}, {2, 0, 1, -1, 1, 2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{2, 2, 1, 2, 1, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; ASSERT_EQ(computed_val, expected_val); @@ -656,76 +707,85 @@ TEST(eval, evaluate_reshape_v1_pattern_int16) { TEST(eval, evaluate_reshape_v1_data_dynamic_shape) { constexpr auto exp_dtype = element::i32; - auto data = make_shared(exp_dtype, PartialShape::dynamic()); - auto pattern = make_shared(element::i64, Shape{6}); + auto data = make_shared(exp_dtype, PartialShape::dynamic()); + auto pattern = make_shared(element::i64, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); - auto f = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); - auto result_tensor = make_shared(); - - ASSERT_TRUE(f->evaluate({result_tensor}, - {make_host_tensor(Shape{2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7}), - make_host_tensor(pattern->get_shape(), {2, 0, 1, -1, 1, 1})})); - - EXPECT_EQ(result_tensor->get_element_type(), exp_dtype); - EXPECT_EQ(result_tensor->get_partial_shape(), PartialShape({2, 2, 1, 2, 1, 1})); + auto model = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7}), + make_tensor(pattern->get_shape(), {2, 0, 1, -1, 1, 1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), exp_dtype); + EXPECT_EQ(result_tensor.get_shape(), Shape({2, 2, 1, 2, 1, 1})); EXPECT_THAT(read_vector(result_tensor), ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); } TEST(eval, evaluate_reshape_v1_not_backward_compatible_and_in_out_size_not_eq) { constexpr auto exp_dtype = element::i32; - auto data = make_shared(exp_dtype, PartialShape::dynamic()); - auto pattern = make_shared(element::i16, Shape{5}); + auto data = make_shared(exp_dtype, PartialShape::dynamic()); + auto pattern = make_shared(element::i16, Shape{5}); auto dyn_reshape = make_shared(data, pattern, true); - auto f = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); - auto result_tensor = make_shared(); + auto model = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7}), + make_tensor(pattern->get_shape(), {2, 1, 1, 1, 1})}; - OV_EXPECT_THROW(f->evaluate({result_tensor}, - {make_host_tensor(Shape{2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7}), - make_host_tensor(pattern->get_shape(), {2, 1, 1, 1, 1})}), + OV_EXPECT_THROW(model->evaluate(out_vector, in_vector), NodeValidationFailure, HasSubstr("Requested output shape [2,1,1,1,1] is incompatible with input shape [2,2,2]")); } TEST(eval, evaluate_convert) { - auto p = make_shared(element::f32, PartialShape{-1, -1}); + auto p = make_shared(element::f32, PartialShape{-1, -1}); auto convert = make_shared(p, element::i64); - auto fun = make_shared(OutputVector{convert}, ParameterVector{p}); + auto model = make_shared(OutputVector{convert}, ParameterVector{p}); std::vector> inputs{{-1, 1}}; std::vector> expected_result{{-1, 1}}; for (size_t i = 0; i < inputs.size(); i++) { - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{1, 2}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); - EXPECT_EQ(result->get_shape(), (Shape{1, 2})); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{1, 2}, inputs[i])}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::i64); + EXPECT_EQ(result.get_shape(), (Shape{1, 2})); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); } } TEST(eval, evaluate_abs) { - auto p = make_shared(element::f32, Shape{2, 3}); - auto abs = make_shared(p); - auto fun = make_shared(OutputVector{abs}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 3}); + auto abs = make_shared(p); + auto model = make_shared(OutputVector{abs}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_erf) { - auto p = make_shared(element::f32, Shape{2, 3}); - auto erf = make_shared(p); - auto fun = make_shared(OutputVector{erf}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 3}); + auto erf = make_shared(p); + auto model = make_shared(OutputVector{erf}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{std::erf(0.0f), std::erf(-1.0f), @@ -737,14 +797,16 @@ TEST(eval, evaluate_erf) { } TEST(eval, evaluate_exp) { - auto p = make_shared(element::f32, Shape{2, 3}); - auto exp = make_shared(p); - auto fun = make_shared(OutputVector{exp}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 3}); + auto exp = make_shared(p); + auto model = make_shared(OutputVector{exp}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{std::exp(0.0f), std::exp(-1.0f), @@ -756,41 +818,47 @@ TEST(eval, evaluate_exp) { } TEST(eval, evaluate_floor) { - auto p = make_shared(element::f32, Shape{2, 2}); - auto floor = make_shared(p); - auto fun = make_shared(OutputVector{floor}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, {make_host_tensor(Shape{2, 2}, {-2.5f, -2.0f, 0.3f, 4.8f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 2}); + auto floor = make_shared(p); + auto model = make_shared(OutputVector{floor}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 2}, {-2.5f, -2.0f, 0.3f, 4.8f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{-3.0f, -2.0f, 0.0f, 4.0f}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_floor_int32) { - auto p = make_shared(element::i32, Shape{2, 2}); - auto floor = make_shared(p); - auto fun = make_shared(OutputVector{floor}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 2}, {-2, -136314888, 0x40000010, 0x40000001})})); - EXPECT_EQ(result->get_element_type(), element::i32); + auto p = make_shared(element::i32, Shape{2, 2}); + auto floor = make_shared(p); + auto model = make_shared(OutputVector{floor}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 2}, {-2, -136314888, 0x40000010, 0x40000001})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::i32); auto result_val = read_vector(result); vector expec{-2, -136314888, 0x40000010, 0x40000001}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_log) { - auto p = make_shared(element::f32, Shape{2, 2, 2}); - auto log = make_shared(p); - auto fun = make_shared(OutputVector{log}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result}, - {make_host_tensor(Shape{2, 2, 2}, {0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 2, 2}); + auto log = make_shared(p); + auto model = make_shared(OutputVector{log}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{ + make_tensor(Shape{2, 2, 2}, {0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{std::log(0.125f), std::log(0.25f), @@ -804,133 +872,151 @@ TEST(eval, evaluate_log) { } TEST(eval, evaluate_negative_f32) { - auto p = make_shared(element::f32, Shape{2, 5}); - auto negate = make_shared(p); - auto fun = make_shared(OutputVector{negate}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate({result}, - {make_host_tensor( - Shape{2, 5}, - {1.35f, 8.76f, -8.0f, 17.234f, -2.121f, 1.0f, 8.7f, -8.92f, 17.0f, -1.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 5}); + auto negate = make_shared(p); + auto model = make_shared(OutputVector{negate}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{ + make_tensor(Shape{2, 5}, + {1.35f, 8.76f, -8.0f, 17.234f, -2.121f, 1.0f, 8.7f, -8.92f, 17.0f, -1.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{-1.35f, -8.76f, 8.0f, -17.234f, 2.121f, -1.0f, -8.7f, 8.92f, -17.0f, 1.0f}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_negative_i32) { - auto p = make_shared(element::i32, Shape{2, 5}); - auto negate = make_shared(p); - auto fun = make_shared(OutputVector{negate}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, 0})})); - EXPECT_EQ(result->get_element_type(), element::i32); + auto p = make_shared(element::i32, Shape{2, 5}); + auto negate = make_shared(p); + auto model = make_shared(OutputVector{negate}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, 0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::i32); auto result_val = read_vector(result); vector expec{-1, -8, 8, -17, 2, -1, -8, 8, -17, 0}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_relu_2Ffprop_f32) { - auto p = make_shared(element::f32, Shape{2, 5}); - auto relu = make_shared(p); - auto fun = make_shared(OutputVector{relu}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result}, - {make_host_tensor(Shape{2, 5}, {1, 8, -8, 17, -0.5f, 0.1f, 8.5f, -8, 17, -0.5f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 5}); + auto relu = make_shared(p); + auto model = make_shared(OutputVector{relu}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{ + make_tensor(Shape{2, 5}, {1, 8, -8, 17, -0.5f, 0.1f, 8.5f, -8, 17, -0.5f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{1, 8, 0, 17, 0, 0.1f, 8.5f, 0, 17, 0}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_relu_2Ffprop_i32) { - auto p = make_shared(element::i32, Shape{2, 5}); - auto relu = make_shared(p); - auto fun = make_shared(OutputVector{relu}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, - {make_host_tensor(Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, -1})})); - EXPECT_EQ(result->get_element_type(), element::i32); + auto p = make_shared(element::i32, Shape{2, 5}); + auto relu = make_shared(p); + auto model = make_shared(OutputVector{relu}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, -1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::i32); auto result_val = read_vector(result); vector expec{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_round) { - auto p = make_shared(element::f32, Shape{5}); + auto p = make_shared(element::f32, Shape{5}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); - auto fun = make_shared(OutputVector{round}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE( - fun->evaluate({result}, {make_host_tensor(Shape{5}, {0.9f, 2.5f, 2.3f, 1.5f, -4.5f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto model = make_shared(OutputVector{round}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{5}, {0.9f, 2.5f, 2.3f, 1.5f, -4.5f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{1.0f, 2.0f, 2.0f, 2.0f, -4.0f}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_round_2D) { - auto p = make_shared(element::f32, Shape{3, 5}); + auto p = make_shared(element::f32, Shape{3, 5}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); - auto fun = make_shared(OutputVector{round}, ParameterVector{p}); - auto result = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result}, - {make_host_tensor( - Shape{3, 5}, - {0.1f, 0.5f, 0.9f, 1.2f, 1.5f, 1.8f, 2.3f, 2.5f, 2.7f, -1.1f, -1.5f, -1.9f, -2.2f, -2.5f, -2.8f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto model = make_shared(OutputVector{round}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor( + Shape{3, 5}, + {0.1f, 0.5f, 0.9f, 1.2f, 1.5f, 1.8f, 2.3f, 2.5f, 2.7f, -1.1f, -1.5f, -1.9f, -2.2f, -2.5f, -2.8f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{0.f, 0.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 3.f, -1.f, -2.f, -2.f, -2.f, -2.f, -3.f}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_sigmoid) { - auto p = make_shared(element::f32, Shape{1, 1, 2, 2}); - auto sigmoid = make_shared(p); - auto fun = make_shared(OutputVector{sigmoid}, ParameterVector{p}); - auto result = make_shared(); - + auto p = make_shared(element::f32, Shape{1, 1, 2, 2}); + auto sigmoid = make_shared(p); + auto model = make_shared(OutputVector{sigmoid}, ParameterVector{p}); float x1 = 1.0f; float x2 = 4.0f; float sigma1 = 1.0f / (1.0f + std::exp(-x1)); float sigma2 = 1.0f / (1.0f + std::exp(-x2)); - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{1, 1, 2, 2}, {x1, x2, x1, x2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{1, 1, 2, 2}, {x1, x2, x1, x2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{sigma1, sigma2, sigma1, sigma2}; EXPECT_EQ(result_val.size(), expec.size()); } TEST(eval, evaluate_sign) { - auto p = make_shared(element::f32, Shape{2, 3}); - auto sign = make_shared(p); - auto fun = make_shared(OutputVector{sign}, ParameterVector{p}); - auto result = make_shared(); - - ASSERT_TRUE( - fun->evaluate({result}, {make_host_tensor(Shape{2, 3}, {1, -2, 0, -4.8f, 4.8f, -0.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{2, 3}); + auto sign = make_shared(p); + auto model = make_shared(OutputVector{sign}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 3}, {1, -2, 0, -4.8f, 4.8f, -0.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{1, -1, 0, -1, 1, 0}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_sin) { - auto p = make_shared(element::f32, Shape{11}); - auto sin = make_shared(p); - auto fun = make_shared(OutputVector{sin}, ParameterVector{p}); - auto result = make_shared(); - - ASSERT_TRUE(fun->evaluate( - {result}, - {make_host_tensor(Shape{11}, - {0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto p = make_shared(element::f32, Shape{11}); + auto sin = make_shared(p); + auto model = make_shared(OutputVector{sin}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{ + make_tensor(Shape{11}, + {0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{0.00000000f, 0.24740396f, @@ -947,14 +1033,17 @@ TEST(eval, evaluate_sin) { } TEST(eval, evaluate_sinh) { - auto p = make_shared(element::f32, Shape{6}); - auto sinh = make_shared(p); - auto fun = make_shared(OutputVector{sinh}, ParameterVector{p}); - auto result = make_shared(); - + auto p = make_shared(element::f32, Shape{6}); + auto sinh = make_shared(p); + auto model = make_shared(OutputVector{sinh}, ParameterVector{p}); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{6}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); @@ -963,28 +1052,34 @@ TEST(eval, evaluate_sinh) { } TEST(eval, evaluate_sqrt) { - auto p = make_shared(element::f32, Shape{6}); - auto sqrt = make_shared(p); - auto fun = make_shared(OutputVector{sqrt}, ParameterVector{p}); - auto result = make_shared(); - + auto p = make_shared(element::f32, Shape{6}); + auto sqrt = make_shared(p); + auto model = make_shared(OutputVector{sqrt}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; vector input{16, 4, 81, 100, 10000, 0}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto in_vector = ov::TensorVector{make_tensor(Shape{6}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{4, 2, 9, 10, 100, 0}; ASSERT_FLOAT_VECTORS_EQ(expec, result_val); } TEST(eval, evaluate_acos) { - auto p = make_shared(element::f32, Shape{11}); - auto acos = make_shared(p); - auto fun = make_shared(OutputVector{acos}, ParameterVector{p}); - auto result = make_shared(); - + auto p = make_shared(element::f32, Shape{11}); + auto acos = make_shared(p); + auto model = make_shared(OutputVector{acos}, ParameterVector{p}); vector input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{11}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::acos(x); @@ -993,14 +1088,17 @@ TEST(eval, evaluate_acos) { } TEST(eval, evaluate_asin) { - auto p = make_shared(element::f32, Shape{11}); - auto asin = make_shared(p); - auto fun = make_shared(OutputVector{asin}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{11}); + auto asin = make_shared(p); + auto model = make_shared(OutputVector{asin}, ParameterVector{p}); vector input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{11}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::asin(x); @@ -1010,14 +1108,17 @@ TEST(eval, evaluate_asin) { } TEST(eval, evaluate_atan) { - auto p = make_shared(element::f32, Shape{11}); - auto atan = make_shared(p); - auto fun = make_shared(OutputVector{atan}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{11}); + auto atan = make_shared(p); + auto model = make_shared(OutputVector{atan}, ParameterVector{p}); vector input{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{11}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::atan(x); @@ -1027,28 +1128,34 @@ TEST(eval, evaluate_atan) { } TEST(eval, evaluate_ceiling) { - auto p = make_shared(element::f32, Shape{2, 2}); - auto ceil = make_shared(p); - auto fun = make_shared(OutputVector{ceil}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{2, 2}); + auto ceil = make_shared(p); + auto model = make_shared(OutputVector{ceil}, ParameterVector{p}); vector input{-2.5f, -2.0f, 0.3f, 4.8f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{2, 2}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 2}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); vector expec{-2.0f, -2.0f, 1.0f, 5.0f}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_cos) { - auto p = make_shared(element::f32, Shape{11}); - auto cos = make_shared(p); - auto fun = make_shared(OutputVector{cos}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{11}); + auto cos = make_shared(p); + auto model = make_shared(OutputVector{cos}, ParameterVector{p}); vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{11}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::cos(x); @@ -1058,14 +1165,17 @@ TEST(eval, evaluate_cos) { } TEST(eval, evaluate_cosh) { - auto p = make_shared(element::f32, Shape{6}); - auto cosh = make_shared(p); - auto fun = make_shared(OutputVector{cosh}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{6}); + auto cosh = make_shared(p); + auto model = make_shared(OutputVector{cosh}, ParameterVector{p}); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{6}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::cosh(x); @@ -1075,14 +1185,17 @@ TEST(eval, evaluate_cosh) { } TEST(eval, evaluate_tan) { - auto p = make_shared(element::f32, Shape{11}); - auto tan = make_shared(p); - auto fun = make_shared(OutputVector{tan}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{11}); + auto tan = make_shared(p); + auto model = make_shared(OutputVector{tan}, ParameterVector{p}); vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{11}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::tan(x); @@ -1092,14 +1205,17 @@ TEST(eval, evaluate_tan) { } TEST(eval, evaluate_tanh) { - auto p = make_shared(element::f32, Shape{6}); - auto tanh = make_shared(p); - auto fun = make_shared(OutputVector{tanh}, ParameterVector{p}); - auto result = make_shared(); + auto p = make_shared(element::f32, Shape{6}); + auto tanh = make_shared(p); + auto model = make_shared(OutputVector{tanh}, ParameterVector{p}); vector input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f}; - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{6}, input)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + EXPECT_EQ(result.get_element_type(), element::f32); auto result_val = read_vector(result); std::transform(input.begin(), input.end(), input.begin(), [](float x) -> float { return std::tanh(x); @@ -1109,119 +1225,135 @@ TEST(eval, evaluate_tanh) { } TEST(eval, evaluate_logical_not_dynamic_input_shape) { - const auto a = make_shared(element::boolean, PartialShape::dynamic()); + const auto a = make_shared(element::boolean, PartialShape::dynamic()); const auto op = make_shared(a); - const auto f = make_shared(OutputVector{op}, ParameterVector{a}); - const auto result = make_shared(); - - ASSERT_TRUE(f->evaluate({result}, {make_host_tensor(Shape{2, 1, 2}, {0, 0, 1, 1})})); - EXPECT_EQ(result->get_element_type(), element::boolean); - EXPECT_EQ(result->get_shape(), Shape({2, 1, 2})); + const auto model = make_shared(OutputVector{op}, ParameterVector{a}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 1, 2}, {0, 0, 1, 1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::boolean); + EXPECT_EQ(result.get_shape(), Shape({2, 1, 2})); EXPECT_THAT(read_vector(result), ElementsAre(1, 1, 0, 0)); } TEST(eval, evaluate_logical_not) { - auto p = make_shared(element::boolean, Shape{2, 2}); + auto p = make_shared(element::boolean, Shape{2, 2}); auto logical_not = make_shared(p); - auto fun = make_shared(OutputVector{logical_not}, ParameterVector{p}); - auto result = make_shared(); - - ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{2, 2}, {1, 0, 1, 0})})); - EXPECT_EQ(result->get_element_type(), element::boolean); + auto model = make_shared(OutputVector{logical_not}, ParameterVector{p}); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(Shape{2, 2}, {1, 0, 1, 0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); + + EXPECT_EQ(result.get_element_type(), element::boolean); auto result_val = read_vector(result); vector expec{0, 1, 0, 1}; ASSERT_EQ(result_val, expec); } TEST(eval, evaluate_dynamic_gather_v1) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::i32, PartialShape::dynamic()); auto gather = make_shared(arg1, arg2, arg3); - auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor({3}, {1.0f, 2.0f, 3.0f}), - make_host_tensor({2}, {1, 0}), - make_host_tensor({1}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2})); + auto model = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({3}, {1.0f, 2.0f, 3.0f}), + make_tensor({2}, {1, 0}), + make_tensor({1}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{2})); auto cval = read_vector(result_tensor); vector out{2.0f, 1.0f}; ASSERT_EQ(cval, out); } TEST(eval, evaluate_dynamic_gather_v1_scalar_axis) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::i64, PartialShape::dynamic()); auto gather = make_shared(arg1, arg2, arg3); - auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor({3, 3}, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f, 3.0f, 3.1f, 3.2f}), - make_host_tensor({1, 2}, {0, 2}), - make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 1, 2})); + auto model = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor({3, 3}, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f, 3.0f, 3.1f, 3.2f}), + make_tensor({1, 2}, {0, 2}), + make_tensor({}, {1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 1, 2})); auto cval = read_vector(result_tensor); vector out{1.0f, 1.2f, 2.0f, 2.2f, 3.0f, 3.2f}; ASSERT_EQ(cval, out); } TEST(eval, evaluate_dynamic_gather_v7) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::i32, PartialShape::dynamic()); int64_t batch_dims = 1; int32_t axis = 1; auto gather = make_shared(arg1, arg2, arg3, batch_dims); - auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor({2, 3}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}), - make_host_tensor({2, 2}, {1, 0, 1, 0}), - make_host_tensor({1}, {axis})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2})); + auto model = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({2, 3}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}), + make_tensor({2, 2}, {1, 0, 1, 0}), + make_tensor({1}, {axis})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{2, 2})); auto cval = read_vector(result_tensor); vector out{2.0f, 1.0f, 5.0f, 4.0f}; ASSERT_EQ(cval, out); } TEST(eval, evaluate_dynamic_gather_v7_axis_scalar) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::i64, PartialShape::dynamic()); int64_t batch_dims = 0; int64_t axis = 1; auto gather = make_shared(arg1, arg2, arg3, batch_dims); - auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor({3, 3}, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f, 3.0f, 3.1f, 3.2f}), - make_host_tensor({1, 2}, {0, 2}), - make_host_tensor({}, {axis})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 1, 2})); + auto model = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor({3, 3}, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f, 3.0f, 3.1f, 3.2f}), + make_tensor({1, 2}, {0, 2}), + make_tensor({}, {axis})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 1, 2})); auto cval = read_vector(result_tensor); vector out{1.0f, 1.2f, 2.0f, 2.2f, 3.0f, 3.2f}; ASSERT_EQ(cval, out); } TEST(eval, evaluate_dynamic_concat) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::f32, PartialShape::dynamic()); auto concat = make_shared(NodeVector{arg1, arg2}, 1); - auto fun = make_shared(OutputVector{concat}, ParameterVector{arg1, arg2}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor({1, 1}, {1.0f}), - make_host_tensor({1, 2}, {8.0f, 10.0f})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 3})); + auto model = make_shared(OutputVector{concat}, ParameterVector{arg1, arg2}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor({1, 1}, {1.0f}), + make_tensor({1, 2}, {8.0f, 10.0f})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{1, 3})); auto cval = read_vector(result_tensor); vector out{1.0f, 8.0f, 10.0f}; ASSERT_EQ(cval, out); @@ -1229,18 +1361,19 @@ TEST(eval, evaluate_dynamic_concat) { TEST(eval, max_pool_v1_dynamic) { Shape window_shape{3}; - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto f = make_shared( + auto A = make_shared(element::f32, PartialShape::dynamic()); + auto model = make_shared( make_shared(A, Strides(), Shape(), Shape(), window_shape, op::RoundingType::FLOOR), ParameterVector{A}); - auto result_tensor = make_shared(); - - ASSERT_TRUE( - f->evaluate({result_tensor}, - {make_host_tensor({1, 1, 14}, {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 1, 12})); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor({1, 1, 14}, {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{1, 1, 12})); auto cval = read_vector(result_tensor); vector out{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}; } @@ -1252,21 +1385,23 @@ TYPED_TEST_SUITE_P(ScatterElementsUpdateEvalTest); TYPED_TEST_P(ScatterElementsUpdateEvalTest, evaluate_static_scatter_elements_update_basic) { const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), - make_host_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), - make_host_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), + make_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), + make_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; ASSERT_EQ(cval, out); @@ -1276,23 +1411,25 @@ TYPED_TEST_P(ScatterElementsUpdateEvalTest, evaluate_dynamic_scatter_elements_up const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), - make_host_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), - make_host_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({}, {0})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), + make_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), + make_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; ASSERT_EQ(cval, out); @@ -1303,23 +1440,25 @@ TYPED_TEST_P(ScatterElementsUpdateEvalTest, evaluate_dynamic_scatter_elements_up const Shape indices_shape{2, 3}; const Shape axis_shape{}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), - make_host_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), - make_host_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor(axis_shape, {-1})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), + make_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), + make_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor(axis_shape, {-1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{1.1f, 1.0f, 1.2f, 2.0f, 2.2f, 2.1f, 0.0f, 0.0f, 0.0f}; ASSERT_EQ(cval, out); @@ -1329,78 +1468,54 @@ TYPED_TEST_P(ScatterElementsUpdateEvalTest, evaluate_dynamic_scatter_elements_up const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), - make_host_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), - make_host_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({1}, {0})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}), + make_tensor(indices_shape, {1, 0, 2, 0, 2, 1}), + make_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({1}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; ASSERT_EQ(cval, out); } -// Disabled test for disabled reference implementation -TYPED_TEST_P(ScatterElementsUpdateEvalTest, DISABLED_evaluate_dynamic_scatter_elements_update_3d_i16) { - const Shape data_shape{3, 3, 3}; - const Shape indices_shape{2, 2, 3}; - - auto arg1 = make_shared(element::i16, PartialShape::dynamic()); - auto arg2 = make_shared(element::i16, PartialShape::dynamic()); - auto arg3 = make_shared(element::i16, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); - - auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), - make_host_tensor(indices_shape, {1, 0, 2, 0, 2, 1, 2, 2, 2, 0, 1, 0}), - make_host_tensor(indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}), - make_host_tensor({}, {1})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::i16); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 3})); - auto cval = read_vector(result_tensor); - vector out{4, 2, 0, 1, 0, 6, 0, 5, 3, 10, 0, 12, 0, 11, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - ASSERT_EQ(cval, out); -} - TYPED_TEST_P(ScatterElementsUpdateEvalTest, evaluate_dynamic_scatter_elements_update_one_elem_i32) { const Shape data_shape{3, 3, 3}; const Shape indices_shape{1, 1, 1}; - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::i32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::i32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), - make_host_tensor(indices_shape, {1}), - make_host_tensor(indices_shape, {2}), - make_host_tensor({}, {0})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::i32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 3})); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), + make_tensor(indices_shape, {1}), + make_tensor(indices_shape, {2}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::i32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3, 3})); auto cval = read_vector(result_tensor); vector out{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ASSERT_EQ(cval, out); @@ -1408,7 +1523,6 @@ TYPED_TEST_P(ScatterElementsUpdateEvalTest, evaluate_dynamic_scatter_elements_up REGISTER_TYPED_TEST_SUITE_P(ScatterElementsUpdateEvalTest, evaluate_dynamic_scatter_elements_update_one_elem_i32, - DISABLED_evaluate_dynamic_scatter_elements_update_3d_i16, evaluate_dynamic_scatter_elements_update_1d_axis, evaluate_dynamic_scatter_elements_update_negative_axis, evaluate_dynamic_scatter_elements_update_basic, @@ -1420,27 +1534,28 @@ INSTANTIATE_TYPED_TEST_SUITE_P(eval, ScatterElementsUpdateEvalTest, OpVersions); TEST(eval, evaluate_static_scatter_elements_update_reduction_sum) { const Shape data_shape{10}; const Shape indices_shape{4}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4, ov::op::v12::ScatterElementsUpdate::Reduction::SUM); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, - {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), - make_host_tensor(indices_shape, {5, 0, 7, 5}), - make_host_tensor(indices_shape, {5.0f, 6.0f, 1.5f, -5.0f}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor(data_shape, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), + make_tensor(indices_shape, {5, 0, 7, 5}), + make_tensor(indices_shape, {5.0f, 6.0f, 1.5f, -5.0f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 8.5f, 8.0f, 9.0f}; ASSERT_EQ(cval, out); @@ -1449,10 +1564,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_sum) { TEST(eval, evaluate_static_scatter_elements_update_reduction_prod_exclusive) { const Shape data_shape{10}; const Shape indices_shape{4}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1460,17 +1575,18 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_prod_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::PROD, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, - {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), - make_host_tensor(indices_shape, {1, 9, 4, 9}), - make_host_tensor(indices_shape, {5.0f, 6.0f, 1.5f, -2.0f}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor(data_shape, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), + make_tensor(indices_shape, {1, 9, 4, 9}), + make_tensor(indices_shape, {5.0f, 6.0f, 1.5f, -2.0f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{0.0f, 5.0f, 2.0f, 3.0f, 1.5f, 5.0f, 6.0f, 7.0f, 8.0f, -12.0f}; ASSERT_EQ(cval, out); @@ -1479,10 +1595,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_prod_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_reduction_mean) { const Shape data_shape{3, 3}; const Shape indices_shape{2, 2}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1490,16 +1606,18 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_mean) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MEAN, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), - make_host_tensor(indices_shape, {2, 2, 0, 1}), - make_host_tensor(indices_shape, {10.f, 21.f, 25.f, 38.f}), - make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor(data_shape, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), + make_tensor(indices_shape, {2, 2, 0, 1}), + make_tensor(indices_shape, {10.f, 21.f, 25.f, 38.f}), + make_tensor({}, {1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{1.0f, 2.0f, 11.33333f, 14.5f, 21.5f, 6.0f, 7.0f, 8.0f, 9.0f}; for (size_t i = 0; i < cval.size(); ++i) @@ -1509,10 +1627,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_mean) { TEST(eval, evaluate_static_scatter_elements_update_reduction_mean_exclusive) { const Shape data_shape{3, 3}; const Shape indices_shape{2, 2}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1520,16 +1638,18 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_mean_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MEAN, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), - make_host_tensor(indices_shape, {2, 2, 0, 1}), - make_host_tensor(indices_shape, {10.f, 21.f, 25.f, 38.f}), - make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor(data_shape, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), + make_tensor(indices_shape, {2, 2, 0, 1}), + make_tensor(indices_shape, {10.f, 21.f, 25.f, 38.f}), + make_tensor({}, {1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{1.0f, 2.0f, 15.5f, 25.f, 38.f, 6.0f, 7.0f, 8.0f, 9.0f}; for (size_t i = 0; i < cval.size(); ++i) @@ -1539,10 +1659,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_mean_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_reduction_mean_ints) { const Shape data_shape{3, 3}; const Shape indices_shape{2, 2}; - auto arg1 = make_shared(element::i32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::i32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::i32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::i32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1550,15 +1670,17 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_mean_ints) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MEAN, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 2, 3, 4, -5, 6, 7, 8, 9}), - make_host_tensor(indices_shape, {0, 1, 2, 1}), - make_host_tensor(indices_shape, {-6, -2, 600, -120}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 2, 3, 4, -5, 6, 7, 8, 9}), + make_tensor(indices_shape, {0, 1, 2, 1}), + make_tensor(indices_shape, {-6, -2, 600, -120}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::i32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{-3, 2, 3, 4, -43, 6, 303, 8, 9}; ASSERT_EQ(cval, out); @@ -1567,10 +1689,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_mean_ints) { TEST(eval, evaluate_static_scatter_elements_update_reduction_min) { const Shape data_shape{9}; const Shape indices_shape{9}; - auto arg1 = make_shared(element::i32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::i32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::i32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::i32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1578,16 +1700,18 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_min) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MIN, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {-1000, 2, 3, 4, -5, 6, 7, -2, 8}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 5, 6, 7, 0}), - make_host_tensor(indices_shape, {-999, 1, 3, 5, -4, 6, 8, 9, -1001}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {-1000, 2, 3, 4, -5, 6, 7, -2, 8}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 5, 6, 7, 0}), + make_tensor(indices_shape, {-999, 1, 3, 5, -4, 6, 8, 9, -1001}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::i32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{-1001, 1, 3, 4, -5, 6, 7, -2, 8}; ASSERT_EQ(cval, out); @@ -1596,10 +1720,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_min) { TEST(eval, evaluate_static_scatter_elements_update_reduction_max) { const Shape data_shape{9}; const Shape indices_shape{9}; - auto arg1 = make_shared(element::i32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::i32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::i32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::i32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1607,16 +1731,18 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_max) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MAX, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {-1000, 2, 3, 4, -5, 6, 7, -2, 8}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 5, 6, 7, 0}), - make_host_tensor(indices_shape, {-999, 1, 3, 5, -4, 6, 8, 9, -1001}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {-1000, 2, 3, 4, -5, 6, 7, -2, 8}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 5, 6, 7, 0}), + make_tensor(indices_shape, {-999, 1, 3, 5, -4, 6, 8, 9, -1001}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::i32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{-999, 2, 3, 5, -4, 6, 8, 9, 8}; ASSERT_EQ(cval, out); @@ -1625,10 +1751,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_max) { TEST(eval, evaluate_static_scatter_elements_update_reduction_max_exclusive) { const Shape data_shape{9}; const Shape indices_shape{9}; - auto arg1 = make_shared(element::i32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::i32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::i32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::i32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1636,16 +1762,18 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_max_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MAX, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1000, 2, 3, 4, -5, 6, 7, -2, 8}), - make_host_tensor(indices_shape, {0, 2, 1, 3, 7, 5, 6, 7, 0}), - make_host_tensor(indices_shape, {999, 10, 20, 30, -40, 6, 8, 9, 555}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, {1000, 2, 3, 4, -5, 6, 7, -2, 8}), + make_tensor(indices_shape, {0, 2, 1, 3, 7, 5, 6, 7, 0}), + make_tensor(indices_shape, {999, 10, 20, 30, -40, 6, 8, 9, 555}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::i32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{999, 20, 10, 30, -5, 6, 8, 9, 8}; ASSERT_EQ(cval, out); @@ -1654,10 +1782,10 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_max_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_boolean_sum) { const Shape data_shape{5}; const Shape indices_shape{6}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1665,15 +1793,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_sum) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::SUM, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 0, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 1}), - make_host_tensor(indices_shape, {0, 0, 0, 1, 1, 1}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 0, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 1}), + make_tensor(indices_shape, {0, 0, 0, 1, 1, 1}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{1, 1, 0, 1, 1}; ASSERT_EQ(cval, out); @@ -1682,10 +1812,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_sum) { TEST(eval, evaluate_static_scatter_elements_update_boolean_sum_exclusive) { const Shape data_shape{5}; const Shape indices_shape{6}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1693,15 +1823,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_sum_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::SUM, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 1, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 4, 4, 0}), - make_host_tensor(indices_shape, {0, 1, 0, 1, 1, 1}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 1, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 4, 4, 0}), + make_tensor(indices_shape, {0, 1, 0, 1, 1, 1}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{1, 1, 0, 1, 1}; ASSERT_EQ(cval, out); @@ -1710,10 +1842,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_sum_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_boolean_prod) { const Shape data_shape{5}; const Shape indices_shape{6}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1721,15 +1853,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_prod) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::PROD, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 0, 1, 1}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 1}), - make_host_tensor(indices_shape, {0, 0, 1, 1, 0, 1}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 0, 1, 1}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 1}), + make_tensor(indices_shape, {0, 0, 1, 1, 0, 1}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{0, 0, 0, 1, 0}; ASSERT_EQ(cval, out); @@ -1738,10 +1872,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_prod) { TEST(eval, evaluate_static_scatter_elements_update_boolean_prod_exclusive) { const Shape data_shape{5}; const Shape indices_shape{6}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1749,15 +1883,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_prod_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::PROD, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 1, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 4, 4, 0}), - make_host_tensor(indices_shape, {0, 0, 1, 1, 1, 1}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 1, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 4, 4, 0}), + make_tensor(indices_shape, {0, 0, 1, 1, 1, 1}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{0, 0, 1, 1, 1}; ASSERT_EQ(cval, out); @@ -1766,10 +1902,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_prod_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_boolean_min) { const Shape data_shape{6}; const Shape indices_shape{8}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1777,15 +1913,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_min) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MIN, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 0, 1, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), - make_host_tensor(indices_shape, {0, 0, 0, 1, 0, 1, 1, 0}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 0, 1, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), + make_tensor(indices_shape, {0, 0, 0, 1, 0, 1, 1, 0}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{0, 0, 0, 1, 0, 0}; ASSERT_EQ(cval, out); @@ -1794,10 +1932,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_min) { TEST(eval, evaluate_static_scatter_elements_update_boolean_min_exclusive) { const Shape data_shape{6}; const Shape indices_shape{8}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1805,15 +1943,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_min_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MIN, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 1, 0, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), - make_host_tensor(indices_shape, {0, 0, 1, 1, 0, 1, 1, 1}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 1, 0, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), + make_tensor(indices_shape, {0, 0, 1, 1, 0, 1, 1, 1}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{0, 0, 1, 1, 0, 1}; ASSERT_EQ(cval, out); @@ -1822,10 +1962,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_min_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_boolean_max) { const Shape data_shape{6}; const Shape indices_shape{8}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1833,15 +1973,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_max) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MAX, true); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 0, 1, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), - make_host_tensor(indices_shape, {0, 1, 0, 1, 0, 1, 0, 0}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 0, 1, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), + make_tensor(indices_shape, {0, 1, 0, 1, 0, 1, 0, 0}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{1, 1, 0, 1, 1, 0}; ASSERT_EQ(cval, out); @@ -1850,10 +1992,10 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_max) { TEST(eval, evaluate_static_scatter_elements_update_boolean_max_exclusive) { const Shape data_shape{6}; const Shape indices_shape{8}; - auto arg1 = make_shared(element::boolean, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::boolean, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::boolean, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::boolean, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, @@ -1861,15 +2003,17 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_max_exclusive) { arg4, ov::op::v12::ScatterElementsUpdate::Reduction::MAX, false); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, {1, 0, 1, 0, 1, 0}), - make_host_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), - make_host_tensor(indices_shape, {0, 1, 1, 0, 0, 1, 0, 0}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::boolean); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 0, 1, 0, 1, 0}), + make_tensor(indices_shape, {0, 1, 2, 3, 4, 4, 5, 5}), + make_tensor(indices_shape, {0, 1, 1, 0, 0, 1, 0, 0}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::boolean); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{0, 1, 1, 0, 1, 0}; ASSERT_EQ(cval, out); @@ -1878,27 +2022,28 @@ TEST(eval, evaluate_static_scatter_elements_update_boolean_max_exclusive) { TEST(eval, evaluate_static_scatter_elements_update_reduction_sum_negative_idx) { const Shape data_shape{10}; const Shape indices_shape{4}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4, ov::op::v12::ScatterElementsUpdate::Reduction::SUM); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, - {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), - make_host_tensor(indices_shape, {-5, 0, -3, -5}), - make_host_tensor(indices_shape, {5.0f, 6.0f, 1.5f, -5.0f}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor(data_shape, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), + make_tensor(indices_shape, {-5, 0, -3, -5}), + make_tensor(indices_shape, {5.0f, 6.0f, 1.5f, -5.0f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 8.5f, 8.0f, 9.0f}; ASSERT_EQ(cval, out); @@ -1907,27 +2052,28 @@ TEST(eval, evaluate_static_scatter_elements_update_reduction_sum_negative_idx) { TEST(eval, evaluate_static_scatter_elements_update_reduction_none_negative_idx) { const Shape data_shape{2, 5}; const Shape indices_shape{2, 2}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, indices_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4, ov::op::v12::ScatterElementsUpdate::Reduction::NONE); - auto fun = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result_tensor}, - {make_host_tensor(data_shape, - {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), - make_host_tensor(indices_shape, {-5, -4, -3, -1}), - make_host_tensor(indices_shape, {11.5f, 12.5f, 13.5f, 14.5f}), - make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), data_shape); + auto model = make_shared(OutputVector{scatter_elements_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{ + make_tensor(data_shape, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f}), + make_tensor(indices_shape, {-5, -4, -3, -1}), + make_tensor(indices_shape, {11.5f, 12.5f, 13.5f, 14.5f}), + make_tensor({}, {1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), data_shape); const auto cval = read_vector(result_tensor); const vector out{11.5f, 12.5f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 13.5f, 8.0f, 14.5f}; ASSERT_EQ(cval, out); @@ -1937,21 +2083,24 @@ TEST(eval, topk_v1) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - const auto k = op::Constant::create(element::i32, Shape{}, {2}); + auto A = make_shared(element::f32, shape); + const auto k = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); auto B = make_shared(A, k, 1, "max", "index", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE(fun->evaluate( - {result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 2, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1966,22 +2115,25 @@ TEST(eval, topk_v1) { TEST(eval, topk_v1_dyn) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::i32, Shape{}); + auto A = make_shared(element::f32, shape); + auto k = make_shared(element::i32, Shape{}); auto B = make_shared(A, k, 1, "max", "index", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 2, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); vector expec0{12, 9, 10, 4, 6, 3, 11, 7}; @@ -1994,22 +2146,25 @@ TEST(eval, topk_v1_dyn) { TEST(eval, topk_v3_dyn) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); + auto A = make_shared(element::f32, shape); + auto k = make_shared(element::u32, Shape{}); auto B = make_shared(A, k, 1, "max", "index", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 2, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); vector expec0{12, 9, 10, 4, 6, 3, 11, 7}; @@ -2022,22 +2177,25 @@ TEST(eval, topk_v3_dyn) { TEST(eval, topk_v3_dyn_values) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); + auto A = make_shared(element::f32, shape); + auto k = make_shared(element::u32, Shape{}); auto B = make_shared(A, k, 1, "max", "value", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 2, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); vector expec0{12, 9, 10, 4, 11, 7, 6, 3}; @@ -2050,22 +2208,25 @@ TEST(eval, topk_v3_dyn_values) { TEST(eval, topk_v3_dyn_values_k0) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); + auto A = make_shared(element::f32, shape); + auto k = make_shared(element::u32, Shape{}); auto B = make_shared(A, k, 1, "max", "value", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 3, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); vector expec0{12, 9, 10, 4, 8, 2, 11, 7, 6, 3, 5, 1}; @@ -2078,25 +2239,28 @@ TEST(eval, topk_v3_dyn_values_k0) { TEST(eval, topk_v1_dyn_k0) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::f32, shape); + auto k = make_shared(element::i64, Shape{}); element::Type result_et{element::i32}; auto B = make_shared(A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 3, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -2108,22 +2272,25 @@ TEST(eval, topk_v1_dyn_k0) { } TEST(eval, topk_v3_param_dyn_values_k0) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::u32, Shape{}); + auto A = make_shared(element::f32, PartialShape::dynamic()); + auto k = make_shared(element::u32, Shape{}); auto B = make_shared(A, k, 1, "max", "value", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 3, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); vector expec0{12, 9, 10, 4, 8, 2, 11, 7, 6, 3, 5, 1}; @@ -2134,22 +2301,25 @@ TEST(eval, topk_v3_param_dyn_values_k0) { } TEST(eval, topk_v3_param_dyn_values_k2) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::u32, Shape{}); + auto A = make_shared(element::f32, PartialShape::dynamic()); + auto k = make_shared(element::u32, Shape{}); auto B = make_shared(A, k, 1, "max", "value", element::i32); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 2, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); vector expec0{12, 9, 10, 4, 11, 7, 6, 3}; @@ -2160,26 +2330,29 @@ TEST(eval, topk_v3_param_dyn_values_k2) { } TEST(eval, topk_v1_param_dyn_k2) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::f32, PartialShape::dynamic()); + auto k = make_shared(element::i64, Shape{}); auto axis = 1; element::Type result_et{element::i32}; auto B = make_shared(A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {2})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 2, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -2191,26 +2364,29 @@ TEST(eval, topk_v1_param_dyn_k2) { } TEST(eval, topk_v1_param_dyn_k0) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::f32, PartialShape::dynamic()); + auto k = make_shared(element::i64, Shape{}); element::Type result_et{element::i32}; auto B = make_shared(A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); - auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); - - auto result0 = make_shared(); - auto result1 = make_shared(); - ASSERT_TRUE( - fun->evaluate({result0, result1}, - {make_host_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), - make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); - EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); - EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); + auto model = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); + + auto result0 = ov::Tensor(); + auto result1 = ov::Tensor(); + auto out_vector = ov::TensorVector{result0, result1}; + auto in_vector = + ov::TensorVector{make_tensor(Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), + make_tensor(Shape{}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result0 = out_vector.at(0); + result1 = out_vector.at(1); + EXPECT_EQ(result0.get_element_type(), element::f32); + EXPECT_EQ(result0.get_shape(), (Shape{2, 3, 2})); + EXPECT_EQ(result1.get_element_type(), element::i32); + EXPECT_EQ(result1.get_shape(), (Shape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -2226,21 +2402,23 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32) { const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, updates_shape); - auto arg4 = make_shared(element::i32, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i32, indices_shape); + auto arg3 = make_shared(element::f32, updates_shape); + auto arg4 = make_shared(element::i32, Shape{}); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, std::vector(shape_size(data_shape))), - make_host_tensor(indices_shape, {1, 2}), - make_host_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); + auto model = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, std::vector(shape_size(data_shape))), + make_tensor(indices_shape, {1, 2}), + make_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; ASSERT_EQ(cval, out); @@ -2251,21 +2429,23 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i64) { const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i64, indices_shape); - auto arg3 = make_shared(element::f32, updates_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::f32, data_shape); + auto arg2 = make_shared(element::i64, indices_shape); + auto arg3 = make_shared(element::f32, updates_shape); + auto arg4 = make_shared(element::i64, Shape{}); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, std::vector(shape_size(data_shape))), - make_host_tensor(indices_shape, {1, 2}), - make_host_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); + auto model = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, std::vector(shape_size(data_shape))), + make_tensor(indices_shape, {1, 2}), + make_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; ASSERT_EQ(cval, out); @@ -2276,23 +2456,25 @@ TEST(eval, evaluate_dynamic_scatter_update_basic) { const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, std::vector(shape_size(data_shape))), - make_host_tensor(indices_shape, {1, 2}), - make_host_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({}, {0})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); + auto model = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, std::vector(shape_size(data_shape))), + make_tensor(indices_shape, {1, 2}), + make_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; ASSERT_EQ(cval, out); @@ -2304,23 +2486,25 @@ TEST(eval, evaluate_dynamic_scatter_update_negative_axis) { const Shape updates_shape{3, 1, 2}; const Shape axis_shape{}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, std::vector(shape_size(data_shape))), - make_host_tensor(indices_shape, {1, 2}), - make_host_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor(axis_shape, {-1})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); + auto model = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, std::vector(shape_size(data_shape))), + make_tensor(indices_shape, {1, 2}), + make_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor(axis_shape, {-1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 1.0f, 1.1f, 0.0f, 1.2f, 2.0f, 0.0f, 2.1f, 2.2f}; ASSERT_EQ(cval, out); @@ -2331,23 +2515,25 @@ TEST(eval, evaluate_dynamic_scatter_update_1d_axis) { const Shape indices_shape{1, 2}; const Shape updates_shape{3, 1, 2}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, std::vector(shape_size(data_shape))), - make_host_tensor(indices_shape, {1, 2}), - make_host_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), - make_host_tensor({1}, {1})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); + auto model = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, std::vector(shape_size(data_shape))), + make_tensor(indices_shape, {1, 2}), + make_tensor(updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), + make_tensor({1}, {1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 1.0f, 1.1f, 0.0f, 1.2f, 2.0f, 0.0f, 2.1f, 2.2f}; ASSERT_EQ(cval, out); @@ -2358,23 +2544,25 @@ TEST(eval, evaluate_dynamic_scatter_update_one_elem_i32) { const Shape indices_shape{1, 1}; const Shape updates_shape{1, 1, 3, 2}; - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::i32, PartialShape::dynamic()); + auto arg2 = make_shared(element::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::i32, PartialShape::dynamic()); + auto arg4 = make_shared(element::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); - auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); - auto result_tensor = make_shared(); - ASSERT_TRUE( - fun->evaluate({result_tensor}, - {make_host_tensor(data_shape, std::vector(shape_size(data_shape))), - make_host_tensor(indices_shape, {1}), - make_host_tensor(updates_shape, {1, 2, 3, 4, 5, 6}), - make_host_tensor({}, {0})})); - - EXPECT_EQ(result_tensor->get_element_type(), element::i32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 2})); + auto model = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = + ov::TensorVector{make_tensor(data_shape, std::vector(shape_size(data_shape))), + make_tensor(indices_shape, {1}), + make_tensor(updates_shape, {1, 2, 3, 4, 5, 6}), + make_tensor({}, {0})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::i32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{3, 3, 2})); auto cval = read_vector(result_tensor); vector out{0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0}; ASSERT_EQ(cval, out); @@ -2382,28 +2570,31 @@ TEST(eval, evaluate_dynamic_scatter_update_one_elem_i32) { TEST(eval, evaluate_softmax_8) { const Shape data_shape{1, 2}; - auto arg = std::make_shared(element::f32, PartialShape::dynamic()); - auto softmax = std::make_shared(arg, -1); - auto fun = std::make_shared(OutputVector{softmax}, ParameterVector{arg}); - auto result_tensor = std::make_shared(); - - ASSERT_TRUE(fun->evaluate({result_tensor}, {make_host_tensor(data_shape, {1, 1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); - EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 2})); + auto arg = std::make_shared(element::f32, PartialShape::dynamic()); + auto softmax = std::make_shared(arg, -1); + auto model = std::make_shared(OutputVector{softmax}, ParameterVector{arg}); + auto result_tensor = ov::Tensor(); + auto out_vector = ov::TensorVector{result_tensor}; + auto in_vector = ov::TensorVector{make_tensor(data_shape, {1, 1})}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result_tensor = out_vector.at(0); + + EXPECT_EQ(result_tensor.get_element_type(), element::f32); + EXPECT_EQ(result_tensor.get_shape(), (Shape{1, 2})); auto val = read_vector(result_tensor); vector out{0.5, 0.5}; ASSERT_EQ(val, out); } TEST(eval, evaluate_softsign_9) { - auto arg = std::make_shared(element::f32, PartialShape::dynamic()); - auto softsign = std::make_shared(arg); - auto fun = std::make_shared(OutputVector{softsign}, ParameterVector{arg}); + auto arg = std::make_shared(element::f32, PartialShape::dynamic()); + auto softsign = std::make_shared(arg); + auto model = std::make_shared(OutputVector{softsign}, ParameterVector{arg}); ov::TensorVector result_tensor(1); float input_vector[] = {1, -1, 2.5, -3.5}; ov::Tensor input{ov::element::f32, ov::Shape{4}, input_vector}; - ASSERT_TRUE(fun->evaluate(result_tensor, ov::TensorVector{input})); + ASSERT_TRUE(model->evaluate(result_tensor, ov::TensorVector{input})); EXPECT_EQ(result_tensor[0].get_element_type(), ov::element::f32); EXPECT_EQ(result_tensor[0].get_shape(), ov::Shape{4}); @@ -2417,31 +2608,34 @@ TEST(eval, evaluate_fake_quantize_dynamic_input) { using namespace testing; constexpr auto et = element::f32; - auto param = make_shared(et, PartialShape::dynamic()); + auto param = make_shared(et, PartialShape::dynamic()); auto in_low = op::v0::Constant::create(et, Shape{}, {0.f}); auto in_high = op::v0::Constant::create(et, Shape{}, {5.f}); auto out_low = op::v0::Constant::create(et, Shape{}, {2.f}); auto out_high = op::v0::Constant::create(et, Shape{}, {4.f}); auto op = make_shared(param, in_low, in_high, out_low, out_high, 4); - auto f = make_shared(OutputVector{op}, ParameterVector{param}); + auto model = make_shared(OutputVector{op}, ParameterVector{param}); const auto exp_shape = Shape{1, 3, 2}; std::vector input_data; std::generate_n(std::back_inserter(input_data), shape_size(exp_shape), ov::SeqGen(0.f)); - auto result = make_shared(); - ASSERT_TRUE(f->evaluate({result}, {make_host_tensor(exp_shape, input_data)})); + auto result = ov::Tensor(); + auto out_vector = ov::TensorVector{result}; + auto in_vector = ov::TensorVector{make_tensor(exp_shape, input_data)}; + ASSERT_TRUE(model->evaluate(out_vector, in_vector)); + result = out_vector.at(0); - EXPECT_EQ(result->get_element_type(), et); - EXPECT_EQ(result->get_shape(), exp_shape); + EXPECT_EQ(result.get_element_type(), et); + EXPECT_EQ(result.get_shape(), exp_shape); EXPECT_THAT(read_vector(result), Pointwise(FloatEq(), std::vector{2.f, 2.6666667f, 2.6666667f, 3.3333333f, 3.3333333f, 4.f})); } TEST(eval, evaluate_cum_sum_v0) { - auto data = make_shared(element::f32, Shape{2, 3}); - auto axis = op::Constant::create(element::i32, Shape{1}, {1}); + auto data = make_shared(element::f32, Shape{2, 3}); + auto axis = ov::op::v0::Constant::create(element::i32, Shape{1}, {1}); auto cs = make_shared(data, axis); auto m = make_shared(OutputVector{cs}, ParameterVector{data}); @@ -2456,8 +2650,8 @@ TEST(eval, evaluate_cum_sum_v0) { } TEST(eval, evaluate_cum_sum_v0_exclusive_reversed) { - auto data = make_shared(element::f32, Shape{5}); - auto axis = op::Constant::create(element::i32, Shape{1}, {0}); + auto data = make_shared(element::f32, Shape{5}); + auto axis = ov::op::v0::Constant::create(element::i32, Shape{1}, {0}); auto cs = make_shared(data, axis, true, true); auto m = make_shared(OutputVector{cs}, ParameterVector{data}); diff --git a/src/core/tests/utils/eval_utils.hpp b/src/core/tests/utils/eval_utils.hpp index 036754e8b76142..579b6b925c5f22 100644 --- a/src/core/tests/utils/eval_utils.hpp +++ b/src/core/tests/utils/eval_utils.hpp @@ -6,90 +6,93 @@ #include -#include "gtest/gtest.h" -#include "ngraph/runtime/host_tensor.hpp" #include "openvino/core/node.hpp" #include "openvino/core/shape.hpp" -OPENVINO_SUPPRESS_DEPRECATED_START namespace { template -void copy_data(const std::shared_ptr& tv, const std::vector& data) { +void copy_data(const ov::Tensor& tv, const std::vector& data) { size_t data_size = data.size() * sizeof(T); if (data_size > 0) { - tv->write(data.data(), data_size); + OPENVINO_ASSERT(tv.get_byte_size() >= data_size); + + memcpy(tv.data(), data.data(), data_size); } } template <> -inline void copy_data(const std::shared_ptr& tv, const std::vector& data) { +inline void copy_data(const ov::Tensor& tv, const std::vector& data) { std::vector data_char(data.begin(), data.end()); copy_data(tv, data_char); } template -void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) { - size_t size = tv->get_element_count(); +void init_int_tv(const ov::Tensor& tv, std::default_random_engine& engine, T min, T max) { + size_t size = tv.get_size(); std::uniform_int_distribution dist(min, max); std::vector vec(size); for (T& element : vec) { element = dist(engine); } - tv->write(vec.data(), vec.size() * sizeof(T)); + size_t data_size = vec.size() * sizeof(T); + OPENVINO_ASSERT(tv.get_byte_size() >= data_size); + memcpy(tv.data(), vec.data(), data_size); } template <> -inline void init_int_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, char min, char max) { - size_t size = tv->get_element_count(); +inline void init_int_tv(const ov::Tensor& tv, std::default_random_engine& engine, char min, char max) { + size_t size = tv.get_size(); std::uniform_int_distribution dist(static_cast(min), static_cast(max)); std::vector vec(size); for (char& element : vec) { element = static_cast(dist(engine)); } - tv->write(vec.data(), vec.size() * sizeof(char)); + size_t data_size = vec.size() * sizeof(char); + OPENVINO_ASSERT(tv.get_byte_size() >= data_size); + memcpy(tv.data(), vec.data(), data_size); } template <> -inline void init_int_tv(ngraph::runtime::Tensor* tv, - std::default_random_engine& engine, - int8_t min, - int8_t max) { - size_t size = tv->get_element_count(); +inline void init_int_tv(const ov::Tensor& tv, std::default_random_engine& engine, int8_t min, int8_t max) { + size_t size = tv.get_size(); std::uniform_int_distribution dist(static_cast(min), static_cast(max)); std::vector vec(size); for (int8_t& element : vec) { element = static_cast(dist(engine)); } - tv->write(vec.data(), vec.size() * sizeof(int8_t)); + size_t data_size = vec.size() * sizeof(int8_t); + OPENVINO_ASSERT(tv.get_byte_size() >= data_size); + memcpy(tv.data(), vec.data(), data_size); } template <> -inline void init_int_tv(ngraph::runtime::Tensor* tv, - std::default_random_engine& engine, - uint8_t min, - uint8_t max) { - size_t size = tv->get_element_count(); +inline void init_int_tv(const ov::Tensor& tv, std::default_random_engine& engine, uint8_t min, uint8_t max) { + size_t size = tv.get_size(); std::uniform_int_distribution dist(static_cast(min), static_cast(max)); std::vector vec(size); for (uint8_t& element : vec) { element = static_cast(dist(engine)); } - tv->write(vec.data(), vec.size() * sizeof(uint8_t)); + size_t data_size = vec.size() * sizeof(uint8_t); + OPENVINO_ASSERT(tv.get_byte_size() >= data_size); + memcpy(tv.data(), vec.data(), data_size); } template -void init_real_tv(ngraph::runtime::Tensor* tv, std::default_random_engine& engine, T min, T max) { - size_t size = tv->get_element_count(); +void init_real_tv(const ov::Tensor& tv, std::default_random_engine& engine, T min, T max) { + size_t size = tv.get_size(); std::uniform_real_distribution dist(min, max); std::vector vec(size); for (T& element : vec) { element = dist(engine); } - tv->write(vec.data(), vec.size() * sizeof(T)); + size_t data_size = vec.size() * sizeof(T); + OPENVINO_ASSERT(tv.get_byte_size() >= data_size); + memcpy(tv.data(), vec.data(), data_size); } -inline void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine) { - ov::element::Type et = tv->get_element_type(); +inline void random_init(const ov::Tensor& tv, std::default_random_engine& engine) { + ov::element::Type et = tv.get_element_type(); if (et == ov::element::boolean) { init_int_tv(tv, engine, 0, 1); } else if (et == ov::element::f32) { @@ -119,19 +122,18 @@ inline void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& } // namespace template -ngraph::HostTensorPtr make_host_tensor(const ov::Shape& shape, - const std::vector::value_type>& data) { +ov::Tensor make_tensor(const ov::Shape& shape, + const std::vector::value_type>& data) { OPENVINO_ASSERT(shape_size(shape) == data.size(), "Incorrect number of initialization elements"); - auto host_tensor = std::make_shared(ET, shape); - copy_data(host_tensor, data); - return host_tensor; + auto tensor = ov::Tensor(ET, shape); + copy_data(tensor, data); + return tensor; } template -ngraph::HostTensorPtr make_host_tensor(const ov::Shape& shape) { - auto host_tensor = std::make_shared(ET, shape); +ov::Tensor make_tensor(const ov::Shape& shape) { + auto tensor = ov::Tensor(ET, shape); static std::default_random_engine engine(2112); - random_init(host_tensor.get(), engine); - return host_tensor; + random_init(tensor, engine); + return tensor; } -OPENVINO_SUPPRESS_DEPRECATED_END From 2e78eec502da1cb5d73607988f9a21b1d9b21a20 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 28 Aug 2023 11:27:09 +0200 Subject: [PATCH 2/3] Fix boxes dim calculation when scores dynamic rank (#19097) * Fix boxes dim calculation when scores dynamic rank * NMS shape infer improve upper bound calculation * Calculate boxes if required shapes has static rank * Optimize shape_infer for NMS v4 * Reorder checks in nms v4 for selected boxes --- .../include/nms_shape_inference.hpp | 63 +++++++------------ .../tests/type_prop/non_max_suppression.cpp | 46 +++++++++++++- .../subgraph_tests/src/convert_range.cpp | 5 +- 3 files changed, 72 insertions(+), 42 deletions(-) diff --git a/src/core/shape_inference/include/nms_shape_inference.hpp b/src/core/shape_inference/include/nms_shape_inference.hpp index 6885a991b9a06e..a4ab52e43bbe31 100644 --- a/src/core/shape_inference/include/nms_shape_inference.hpp +++ b/src/core/shape_inference/include/nms_shape_inference.hpp @@ -115,7 +115,7 @@ std::vector shape_infer(const Node* op, const auto& boxes_shape = input_shapes[0]; const auto& scores_shape = input_shapes[1]; - auto output_shapes = std::vector{TRShape{TDim(-1), 3}}; + auto output_shapes = std::vector{TRShape{TDim(dim::inf_bound), 3}}; if (boxes_shape.rank().is_static()) { const auto max_out_boxes_per_class = get_input_const_data_as(op, 2, ta); auto max_out_class_boxes = @@ -147,6 +147,7 @@ std::vector shape_infer(const Node* op, NODE_VALIDATION_CHECK(op, cmp::Between(1, 7)(inputs_size)); using TDim = typename TRShape::value_type; using V = typename TDim::value_type; + using namespace ov::util; nms::validate::boxes_shape(op, input_shapes); nms::validate::scores_shape(op, input_shapes); @@ -180,44 +181,31 @@ std::vector shape_infer(const Node* op, } const auto& boxes_shape = input_shapes[0]; - const auto& scores_shape = input_shapes[1]; - const auto boxes_rank = boxes_shape.rank(); - const auto scores_rank = scores_shape.rank(); - - auto out_shape = TRShape{TDim(-1), 3}; - if (boxes_rank.is_static()) { - int64_t max_out_boxes_per_class_val; - if (const auto max_out_boxes_per_class = get_input_const_data_as(op, 2, ta)) { - max_out_boxes_per_class_val = max_out_boxes_per_class->front(); - } else { - max_out_boxes_per_class_val = -1; - } - const auto& num_boxes = boxes_shape[1]; - auto& selected_boxes = out_shape[0]; - if (num_boxes.is_static()) { - const auto min_selected_boxes = - std::min(num_boxes.get_length(), static_cast(max_out_boxes_per_class_val)); - selected_boxes = static_output ? TDim{min_selected_boxes} : TDim{0, min_selected_boxes}; - } else if (scores_rank.is_static() && num_boxes.get_max_length() != -1 && - scores_shape[0].get_max_length() != -1 && scores_shape[1].get_max_length() != -1) { - const auto min_selected_boxes = - std::min(num_boxes.get_max_length(), static_cast(max_out_boxes_per_class_val)); - selected_boxes = static_output ? TDim{min_selected_boxes} : TDim{0, min_selected_boxes}; - } - if (scores_rank.is_static()) { + auto out_shape = TRShape{TDim(dim::inf_bound), 3}; + if (boxes_shape.rank().is_static()) { + const auto& scores_shape = input_shapes[1]; + + if (scores_shape.rank().is_static()) { nms::validate::num_batches(op, input_shapes); nms::validate::num_boxes(op, input_shapes); + auto& selected_boxes = out_shape[0]; + if (const auto max_out_boxes_per_class = get_input_const_data_as(op, 2, ta)) { + const auto& num_boxes = boxes_shape[1]; + const auto min_selected_boxes = + std::min(num_boxes.get_max_length(), static_cast(max_out_boxes_per_class->front())); + selected_boxes = static_output ? TDim{min_selected_boxes} : TDim{0, min_selected_boxes}; + } + selected_boxes *= scores_shape[0].get_max_length(); selected_boxes *= scores_shape[1].get_max_length(); } - nms::validate::boxes_last_dim(op, input_shapes); } auto output_shapes = std::vector(2, out_shape); - output_shapes.emplace_back(std::initializer_list{1}); + output_shapes.emplace_back(std::initializer_list{1}); return output_shapes; } } // namespace nms @@ -257,20 +245,17 @@ std::vector shape_infer(const NonMaxSuppression* op, const auto& boxes_shape = input_shapes[0]; const auto& scores_shape = input_shapes[1]; - auto output_shapes = std::vector{TRShape{TDim(-1), 3}}; - if (boxes_shape.rank().is_static()) { - const auto max_out_boxes_per_class = get_input_const_data_as(op, 2, ta); - const auto max_out_class_boxes = max_out_boxes_per_class ? max_out_boxes_per_class->front() : dim::inf_bound; + auto output_shapes = std::vector{TRShape{TDim(dim::inf_bound), 3}}; + if (boxes_shape.rank().is_static() && scores_shape.rank().is_static()) { const auto& num_boxes = boxes_shape[1]; - auto& selected_boxes = output_shapes[0][0]; if (num_boxes.is_static()) { - selected_boxes = std::min(num_boxes.get_length(), static_cast(max_out_class_boxes)); - } - - if (scores_shape.rank().is_static()) { - selected_boxes *= scores_shape[0].get_max_length(); - selected_boxes *= scores_shape[1].get_max_length(); + if (const auto max_out_boxes_per_class = get_input_const_data_as(op, 2, ta)) { + auto& selected_boxes = output_shapes[0][0]; + selected_boxes = std::min(num_boxes.get_length(), static_cast(max_out_boxes_per_class->front())); + selected_boxes *= scores_shape[0].get_max_length(); + selected_boxes *= scores_shape[1].get_max_length(); + } } } diff --git a/src/core/tests/type_prop/non_max_suppression.cpp b/src/core/tests/type_prop/non_max_suppression.cpp index 140a03d2f0b7e1..4d2e4c2c3c2706 100644 --- a/src/core/tests/type_prop/non_max_suppression.cpp +++ b/src/core/tests/type_prop/non_max_suppression.cpp @@ -368,6 +368,29 @@ TYPED_TEST_P(NMSDynamicOutputTest, interval_shapes_labels) { EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(2)), Each(no_label)); } +TYPED_TEST_P(NMSDynamicOutputTest, num_box_dynamic_dim_max_boxes_per_class_as_const) { + auto boxes_shape = PartialShape{2, -1, 4}; + auto scores_shape = PartialShape{2, {0, 5}, {1, 7}}; + set_shape_labels(boxes_shape, 10); + set_shape_labels(scores_shape, 20); + + const auto boxes = make_shared(element::f32, boxes_shape); + const auto scores = make_shared(element::f32, scores_shape); + const auto max_output_boxes_per_class = op::v0::Constant::create(element::i16, Shape{}, {5}); + const auto iou_threshold = make_shared(element::f32, Shape{}); + const auto score_threshold = make_shared(element::f32, Shape{}); + + const auto op = this->make_op(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); + + EXPECT_THAT(op->outputs(), + ElementsAre(Property("Indicies shape", &Output::get_partial_shape, PartialShape({-1, 3})), + Property("Scores shape", &Output::get_partial_shape, PartialShape({-1, 3})), + Property("Outputs shape", &Output::get_partial_shape, PartialShape({1})))); + EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), Each(no_label)); + EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(1)), Each(no_label)); + EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(2)), Each(no_label)); +} + TYPED_TEST_P(NMSDynamicOutputTest, output_shape_i32) { const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); const auto scores = make_shared(element::f32, Shape{2, 5, 7}); @@ -429,6 +452,25 @@ TYPED_TEST_P(NMSDynamicOutputTest, dynamic_types) { Property("Outputs shape", &Output::get_partial_shape, PartialShape({1})))); } +TYPED_TEST_P(NMSDynamicOutputTest, scores_shape_is_dynamic_rank) { + const auto boxes = make_shared(element::dynamic, Shape{5, 2, 4}); + const auto scores = make_shared(element::dynamic, PartialShape::dynamic()); + const auto max_output_boxes_per_class = op::v0::Constant::create(element::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::f32, Shape{}); + const auto score_threshold = make_shared(element::f32, Shape{}); + + const auto op = this->make_op(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); + + EXPECT_THAT(op->outputs(), + ElementsAre(Property("Indicies type", &Output::get_element_type, element::i64), + Property("Scores type", &Output::get_element_type, element::f32), + Property("Outputs type", &Output::get_element_type, element::i64))); + EXPECT_THAT(op->outputs(), + ElementsAre(Property("Indicies shape", &Output::get_partial_shape, PartialShape({-1, 3})), + Property("Scores shape", &Output::get_partial_shape, PartialShape({-1, 3})), + Property("Outputs shape", &Output::get_partial_shape, PartialShape({1})))); +} + REGISTER_TYPED_TEST_SUITE_P(NMSDynamicOutputTest, scalar_inputs_check, boxes_scores_static_other_defaults, @@ -436,8 +478,10 @@ REGISTER_TYPED_TEST_SUITE_P(NMSDynamicOutputTest, num_boxes_lt_max_out_boxes, max_out_boxes_is_zero, interval_shapes_labels, + num_box_dynamic_dim_max_boxes_per_class_as_const, output_shape_i32, dynamic_boxes_and_scores, - dynamic_types); + dynamic_types, + scores_shape_is_dynamic_rank); using NMSDynamicOutputTypes = testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, NMSDynamicOutputTest, NMSDynamicOutputTypes); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp index c4e2255b8bcce4..76728e7b1f2fdf 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp @@ -35,11 +35,12 @@ namespace SubgraphTestsDefinitions { \ / \ / \ / - MatMul + MatMul | Result - This test is needed to cover logic that allows to avoid computational error for subgraph: "[I32] -> Convert -> [F32] -> Range" due to precision lowering for floating point path inside "EnforceInferencePrecision" pass". + This test is needed to cover logic that allows to avoid computational error for subgraph: "[I32] -> Convert -> [F32] + -> Range" due to precision lowering for floating point path inside "EnforceInferencePrecision" pass". TODO: Incorrect subgraph is generated by ONNX FE + ticket 117861. */ From 94c21b53b34a7a4fd2b15b132b0008967a65de55 Mon Sep 17 00:00:00 2001 From: Wilson Seok Date: Mon, 28 Aug 2023 19:10:05 +0900 Subject: [PATCH 3/3] fix build error by removing makeDynamicParam (#19431) --- .../dynamic_smoke_test_reduce_deconvolution_concat.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp index 176d2a2ebd1f2c..fce1c81bcca3a2 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp @@ -79,9 +79,11 @@ class ReduceDeconvConcatDynamicGPUTest : public testing::WithParamInterface(netType, shape)); + } auto paramOuts = helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); auto deconvOp = ngraph::builder::makeConvolutionBackpropData(paramOuts[0], netType, {2, 2, 2}, {2, 2, 2}, {0, 0, 0},