diff --git a/src/frontends/paddle/src/op/silu.cpp b/src/frontends/paddle/src/op/silu.cpp new file mode 100644 index 00000000000000..d3267efb14f190 --- /dev/null +++ b/src/frontends/paddle/src/op/silu.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs silu(const NodeContext& node) { + const auto x = node.get_input("X"); + const auto beta_node = default_opset::Constant::create(element::f32, Shape{}, {1.0f}); + return node.default_single_output_mapping({std::make_shared(x, beta_node)}, {"Out"}); +} +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 0a21af065b98ad..8d2293ff2f94a3 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -90,6 +90,7 @@ OP_CONVERTER(roi_align); OP_CONVERTER(scale); OP_CONVERTER(select_input); OP_CONVERTER(shape); +OP_CONVERTER(silu); OP_CONVERTER(slice); OP_CONVERTER(softmax); OP_CONVERTER(softplus); @@ -206,6 +207,7 @@ std::map get_supported_ops() { {"scale", op::scale}, {"select_input", op::select_input}, {"shape", op::shape}, + {"silu", op::silu}, {"slice", op::slice}, {"softmax", op::softmax}, {"softplus", op::softplus}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 598b49dc41a548..0ac7fd4d8ab6dc 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -448,6 +448,7 @@ static const std::vector models{ std::string("scale_tensor_bias_before"), std::string("shape"), std::string("sigmoid"), + std::string("silu"), std::string("slice"), std::string("slice_1d"), std::string("slice_decrease_axis/slice_decrease_axis.pdmodel"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_silu.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_silu.py new file mode 100644 index 00000000000000..321c784ae102d0 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_silu.py @@ -0,0 +1,39 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# clip paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + +def silu(name: str, x): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + silu_f = paddle.nn.Silu() + out = silu_f(node_x) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + data = np.random.random([2, 3, 4]).astype('float32') + silu("silu", data) + + +if __name__ == "__main__": + main()