Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 4】add paddle softshrink op #15845

Merged
merged 14 commits into from
May 4, 2023
54 changes: 54 additions & 0 deletions src/frontends/paddle/src/op/softshrink.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {
NamedOutputs softshrink(const NodeContext& node) {
auto data = node.get_input("X");
const float lambda = node.get_attribute<float>("lambda", 0.5f);

const auto input_element_type = data.get_element_type();
std::shared_ptr<ngraph::Node> output;
const auto positive_lambda = default_opset::Constant::create(input_element_type, Shape{}, {lambda});
std::shared_ptr<ngraph::Node> negative_node = std::make_shared<default_opset::Subtract>(data, positive_lambda);
std::shared_ptr<ngraph::Node> zero_node = default_opset::Constant::create(input_element_type, Shape{}, {0});
if (input_element_type.is_signed()) {
std::shared_ptr<default_opset::Constant> negative_lambda =
default_opset::Constant::create(input_element_type, Shape{}, {-lambda});

// Create masks for values below negative lambda and above positive lambda
std::shared_ptr<ngraph::Node> values_below_neg_lambda =
std::make_shared<default_opset::Less>(data, negative_lambda);
std::shared_ptr<ngraph::Node> values_above_pos_lambda =
std::make_shared<default_opset::Greater>(data, positive_lambda);

std::shared_ptr<ngraph::Node> positive_node = std::make_shared<default_opset::Add>(data, positive_lambda);

output = std::make_shared<default_opset::Select>(values_above_pos_lambda, negative_node, data);
output = std::make_shared<default_opset::Select>(values_below_neg_lambda, positive_node, output);

std::shared_ptr<ngraph::Node> zero_mask =
std::make_shared<default_opset::LogicalOr>(values_below_neg_lambda, values_above_pos_lambda);

output = std::make_shared<default_opset::Select>(zero_mask, output, zero_node);
} else {
// Passing -lambd to unsigned type constant will cause an overflow.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Refer to Paddle spec, input x could be float32 and float64 only. How can a test case trigger this else branch?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ceciliapeng2011 : the other Paddle 'softshrink' api defined here : https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/Softshrink_cn.html#softshrink
looks like there is no such limitation for input x type. To cover this api, I think we may still need the branch to handle '-lambd to unsigned type' cases.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi, @AndPuQing : about @ceciliapeng2011 's concern, could you please have a try to add such unsigned type test case by using paddle.nn.Softshrink() api?
if such test case is difficult to generate from paddle side, let's disable such unsigned input support(delete the else branch) this time, and add an input type check PADDLE_OP_CHECK(node, input_element_type.is_signed(), "softshrink only supports singed input data type"); for this op.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I test the unsighed tensor input on the paddlepaddle delelop version, there is the result:

>>>import paddle
>>>a = paddle.to_tensor([1],dtype='uint8')
>>>a
Out[3]:
Tensor(shape=[1], dtype=uint8, place=Place(gpu:0), stop_gradient=True,
       [1])
>>>paddle.nn.functional.softshrink(a)
RuntimeError: (NotFound) The kernel with key (GPU, Undefined(AnyLayout), uint8) of kernel `softshrink` is not registered and fail to fallback to CPU one. Selected wrong DataType `uint8`. Paddle support following DataTypes: float64, float16, float32, bfloat16.
  [Hint: Expected kernel_iter != iter->second.end(), but received kernel_iter == iter->second.end().] (at /paddle/paddle/phi/core/kernel_factory.cc:259)

So, I think just add dtype check currently.

// For unsigned types the lowest possible value is 0. So we just need to compare with lambda.
std::shared_ptr<ngraph::Node> values_above_pos_lambda =
std::make_shared<default_opset::Greater>(data, positive_lambda);

output = std::make_shared<default_opset::Select>(values_above_pos_lambda, negative_node, zero_node);
}

return node.default_single_output_mapping({output}, {"Out"});
}
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ OP_CONVERTER(shape);
OP_CONVERTER(slice);
OP_CONVERTER(softmax);
OP_CONVERTER(softplus);
OP_CONVERTER(softshrink);
OP_CONVERTER(sigmoid);
OP_CONVERTER(split);
OP_CONVERTER(sqrt);
Expand Down Expand Up @@ -209,6 +210,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"slice", op::slice},
{"softmax", op::softmax},
{"softplus", op::softplus},
{"softshrink", op::softshrink},
{"sigmoid", op::sigmoid},
{"split", op::split},
{"sqrt", op::sqrt},
Expand Down
2 changes: 2 additions & 0 deletions src/frontends/paddle/tests/op_fuzzy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,8 @@ static const std::vector<std::string> models{
std::string("softmax"),
std::string("softmax_minus"),
std::string("softplus_default_params"),
std::string("softshrink_default_params"),
std::string("softshrink_threshold_0.6"),
std::string("split_test1"),
std::string("split_test2"),
std::string("split_test3"),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

#
# softshrink paddle model generator
#
import numpy as np
import sys
from save_model import saveModel


def softshrink(name: str, x, threshold):
import paddle

paddle.enable_static()

node_x = paddle.static.data(name="x", shape=x.shape, dtype="float32")
if threshold == None:
out = paddle.nn.functional.softshrink(node_x)
else:
out = paddle.nn.functional.softshrink(node_x, threshold)

cpu = paddle.static.cpu_places(1)
exe = paddle.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(paddle.static.default_startup_program())

outs = exe.run(feed={"x": x}, fetch_list=[out])

saveModel(
name,
exe,
feedkeys=["x"],
fetchlist=[out],
inputs=[x],
outputs=[outs[0]],
target_dir=sys.argv[1],
)

return outs[0]


def main():
data = np.array(
[
[[2.0, 3.0, 4.0, 5.0], [0.0, 4.0, -5.0, 6.0], [7.0, -8.0, 8.0, 9.0]],
[[-1.0, 2.0, 3.0, 4.0], [-5.0, 6.0, 7.0, 8.0], [6.0, 7.0, 8.0, 9.0]],
]
).astype(np.float32)

softshrink("softshrink_default_params", data, threshold=None)
softshrink("softshrink_threshold_0.6", data, threshold=0.6)


if __name__ == "__main__":
main()