diff --git a/src/core/tests/frontend/paddle/op_fuzzy.cpp b/src/core/tests/frontend/paddle/op_fuzzy.cpp index 137131e8ddabfb..de317149a7aa8e 100644 --- a/src/core/tests/frontend/paddle/op_fuzzy.cpp +++ b/src/core/tests/frontend/paddle/op_fuzzy.cpp @@ -100,6 +100,7 @@ static const std::vector models{ std::string("dropout_upscale_in_train"), std::string("elementwise_add1"), std::string("elementwise_div1"), + std::string("elementwise_floordiv1"), std::string("elementwise_max1"), std::string("elementwise_min1"), std::string("elementwise_mul1"), @@ -107,6 +108,7 @@ static const std::vector models{ std::string("elementwise_sub1"), std::string("elementwise_add2"), std::string("elementwise_div2"), + std::string("elementwise_floordiv2"), std::string("elementwise_max2"), std::string("elementwise_min2"), std::string("elementwise_mul2"), @@ -114,6 +116,7 @@ static const std::vector models{ std::string("elementwise_sub2"), std::string("elementwise_add3"), std::string("elementwise_div3"), + std::string("elementwise_floordiv3"), std::string("elementwise_max3"), std::string("elementwise_min3"), std::string("elementwise_mul3"), @@ -121,6 +124,7 @@ static const std::vector models{ std::string("elementwise_sub3"), std::string("elementwise_add4"), std::string("elementwise_div4"), + std::string("elementwise_floordiv4"), std::string("elementwise_max4"), std::string("elementwise_min4"), std::string("elementwise_mul4"), diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_elementwise_floordiv.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_elementwise_floordiv.py new file mode 100644 index 00000000000000..dfec520424fcd5 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_elementwise_floordiv.py @@ -0,0 +1,51 @@ +import sys +import numpy as np +import paddle + +from save_model import saveModel + + +def elementwise_floordiv(name: str, x, y): + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=y.dtype) + out = paddle.floor_divide(node_x, node_y) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + outs = exe.run( + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], + target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + in_dtype = 'int64' + data_x = np.array([2, 3, 4]).astype(in_dtype) + data_y = np.array([1, 5, 2]).astype(in_dtype) + elementwise_floordiv("elementwise_floordiv1", data_x, data_y) + + # input negative value + data_x = np.array([-2, -3, -4]).astype(in_dtype) + data_y = np.array([-1, -5, -2]).astype(in_dtype) + elementwise_floordiv("elementwise_floordiv2", data_x, data_y) + + # data_y's shape is the continuous subsequence of data_x's shape + data_x = np.random.randint(1, 5, size=[2, 3, 4, 5]).astype(in_dtype) + data_y = np.random.randint(1, 5, size=[4, 5]).astype(in_dtype) + elementwise_floordiv("elementwise_floordiv3", data_x, data_y) + + data_y = np.random.randint(1, 5, size=[5]).astype(in_dtype) + elementwise_floordiv("elementwise_floordiv4", data_x, data_y) + + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/src/op/elementwise_ops.cpp b/src/frontends/paddle/src/op/elementwise_ops.cpp index b833f1a19d4a1f..b750e3bd6cc257 100644 --- a/src/frontends/paddle/src/op/elementwise_ops.cpp +++ b/src/frontends/paddle/src/op/elementwise_ops.cpp @@ -46,6 +46,32 @@ NamedOutputs elementwise_greater_equal(const NodeContext& node_context) { return elementwise_ops(node_context); } +NamedOutputs elementwise_floordiv(const NodeContext& node_context) { + auto x = node_context.get_input("X"); + auto y = node_context.get_input("Y"); + const auto axis = node_context.get_attribute("axis", -1); + + PADDLE_OP_CHECK(node_context, x.get_partial_shape().rank().is_static(), "elementwise_ops: X rank must be static!"); + PADDLE_OP_CHECK(node_context, y.get_partial_shape().rank().is_static(), "elementwise_ops: Y rank must be static!"); + int64_t x_rank = x.get_partial_shape().rank().get_length(); + int64_t y_rank = y.get_partial_shape().rank().get_length(); + + if ((axis == -1) || (axis == x_rank - 1) || (x_rank == y_rank)) { + return node_context.default_single_output_mapping({std::make_shared(x, y, true)}, {"Out"}); + } else { + std::vector indices; + for (int64_t i = 0; i < axis; i++) + indices.push_back(i); + for (int64_t i = y_rank + axis; i < x_rank; i++) + indices.push_back(i); + + auto indices_node = default_opset::Constant::create(ov::element::i64, ov::Shape{indices.size()}, indices); + auto y_node = std::make_shared(y, indices_node); + return node_context.default_single_output_mapping({std::make_shared(x, y_node, true)}, {"Out"}); + } +} +} + } // namespace op } // namespace paddle } // namespace frontend diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 5af21316595145..7972ec0b5fa812 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -25,6 +25,7 @@ OP_CONVERTER(dropout); OP_CONVERTER(elementwise_add); OP_CONVERTER(elementwise_div); OP_CONVERTER(elementwise_equal); +OP_CONVERTER(elementwise_floordiv); OP_CONVERTER(elementwise_greater_equal); OP_CONVERTER(elementwise_max); OP_CONVERTER(elementwise_min); @@ -121,6 +122,7 @@ std::map get_supported_ops() { {"dropout", op::dropout}, {"elementwise_add", op::elementwise_add}, {"elementwise_div", op::elementwise_div}, + {"elementwise_floordiv", op::elementwise_floordiv}, {"elementwise_max", op::elementwise_max}, {"elementwise_min", op::elementwise_min}, {"elementwise_mul", op::elementwise_mul},