diff --git a/src/core/tests/frontend/paddle/op_fuzzy.cpp b/src/core/tests/frontend/paddle/op_fuzzy.cpp index b8a226e2cc21a9..ebc9e8f42334a2 100644 --- a/src/core/tests/frontend/paddle/op_fuzzy.cpp +++ b/src/core/tests/frontend/paddle/op_fuzzy.cpp @@ -60,6 +60,7 @@ static const std::vector models{ std::string("bilinear_upsample_scales2"), std::string("bilinear_upsample_true_0"), std::string("bmm"), + std::string("ceil"), std::string("clip"), std::string("conv2d_dilation_assymetric_pads_strides"), std::string("conv2d_SAME_padding"), diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_ceil.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_ceil.py new file mode 100644 index 00000000000000..efe99380cfa7af --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_ceil.py @@ -0,0 +1,40 @@ +# Copyright (C) 2018-2022 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# ceil paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def ceil(name: str, x, dtype="float32"): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=dtype) + out = paddle.ceil(node_x) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + dtype = "float32" + data = np.random.random([2, 3]).astype("float32") + ceil("ceil", data, dtype) + + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/src/op/ceil.cpp b/src/frontends/paddle/src/op/ceil.cpp new file mode 100644 index 00000000000000..cee1e975787d68 --- /dev/null +++ b/src/frontends/paddle/src/op/ceil.cpp @@ -0,0 +1,19 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs ceil(const NodeContext& node) { + return node.default_single_output_mapping({std::make_shared(node.get_input("X"))}, {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 97603c1ddcf91c..0ac131b5ec24e5 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -15,6 +15,7 @@ OP_CONVERTER(batch_norm); OP_CONVERTER(bicubic_interp_v2); OP_CONVERTER(bilinear_interp_v2); OP_CONVERTER(cast); +OP_CONVERTER(ceil); OP_CONVERTER(clip); OP_CONVERTER(concat); OP_CONVERTER(conv2d); @@ -111,6 +112,7 @@ std::map get_supported_ops() { {"bilinear_interp", op::bilinear_interp_v2}, {"bmm", op::matmul}, {"cast", op::cast}, + {"ceil", op::ceil}, {"clip", op::clip}, {"concat", op::concat}, {"conv2d", op::conv2d},