Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 5th No.113】Support paddle 2.5.1 #20161

Merged
merged 86 commits into from
Dec 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
86 commits
Select commit Hold shift + click to select a range
fc626c8
Fixed comments for linux-riscv64 GHA workflow
ilya-lavrenov Sep 27, 2023
3124a7f
Try to enable PDPD tests back
ilya-lavrenov Sep 27, 2023
77a08d6
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Sep 27, 2023
3baf64c
support paddle 2.5
AndSonder Sep 27, 2023
8160903
fix op test errors
AndSonder Sep 28, 2023
1968fda
fix op test errors
AndSonder Sep 29, 2023
cbe4e61
Merge branch 'master' into support-paddle-2.5
AndSonder Sep 29, 2023
9e5f21a
Merge from openvino master
AndSonder Sep 29, 2023
335f184
Merge branch 'support-paddle-2.5' of https://github.com/AndSonder/ope…
AndSonder Sep 29, 2023
b1bc2c0
recover openvino/src/core/src/op/swish.cpp
AndSonder Sep 29, 2023
a578e84
recover thirdparty open_model_zoo
AndSonder Sep 29, 2023
b88cb9b
update cmakelist
AndSonder Sep 29, 2023
be34ecb
disable some tests
AndSonder Sep 30, 2023
a756342
fix code style
AndSonder Sep 30, 2023
7811f1a
enable paddle ci tests
AndSonder Sep 30, 2023
81f748e
disable some tests
AndSonder Sep 30, 2023
5d26062
fix np.long error
AndSonder Oct 1, 2023
b1b4542
Merge branch 'master' into support-paddle-2.5
ilya-lavrenov Oct 1, 2023
977e064
recover reverse op
AndSonder Oct 3, 2023
f6859c3
Merge branch 'support-paddle-2.5' of https://github.com/AndSonder/ope…
AndSonder Oct 3, 2023
25ae0eb
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Oct 3, 2023
120d649
Merge branch 'master' into support-paddle-2.5
AndSonder Oct 3, 2023
f1c7fe2
update ci config
AndSonder Oct 4, 2023
e0f7af4
Merge branch 'support-paddle-2.5' of https://github.com/AndSonder/ope…
AndSonder Oct 4, 2023
3ed3612
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Oct 4, 2023
4673eca
recover set_value test codes
AndSonder Oct 7, 2023
a24d4ab
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Oct 8, 2023
b9a7c08
rm linux_debian.yml
AndSonder Oct 8, 2023
3809d1f
Compatible with cases where different paddle versions have different …
AndSonder Oct 15, 2023
3d5745d
remove set_value tests
AndSonder Oct 15, 2023
840ca24
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Oct 15, 2023
60380e0
recover save_model.py
AndSonder Oct 15, 2023
49a44f2
Added ctest labels for FE tests only if FW is found
ilya-lavrenov Oct 16, 2023
78e8276
Merge From liya:paddle
AndSonder Oct 16, 2023
cc69e2b
recover thirdparty
AndSonder Oct 16, 2023
ffe635c
merge from openvino master
AndSonder Oct 16, 2023
44f7cf2
Update CMakeLists.txt
ilya-lavrenov Oct 16, 2023
01de6c2
Merge from develop
AndSonder Oct 26, 2023
d6b66c5
Merge branch 'support-paddle-2.5' of https://github.com/AndSonder/ope…
AndSonder Oct 26, 2023
94cff0c
update paddle v2.5.1 proto file
meiyang-intel Nov 1, 2023
42c4092
Merge pull request #1 from meiyang-intel/meiyang/paddle2.5.1
AndSonder Nov 1, 2023
a933b40
recover thirdparty
AndSonder Nov 1, 2023
a86a01b
Merge branch 'support-paddle-2.5' of https://github.com/AndSonder/ope…
AndSonder Nov 1, 2023
e6efac2
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Nov 1, 2023
3db9d7a
fix Paddle_Reader_Tests.LoadModelMemoryToCore error
meiyang-intel Nov 2, 2023
6fa9c12
fix Paddle_Places test issue in v2.5.1
meiyang-intel Nov 7, 2023
c5850e7
support some tests for low version paddle
AndSonder Nov 8, 2023
7a7a505
Merge from openvino master
AndSonder Nov 8, 2023
e619a40
fix paddle FrontEndCutModelTest issue
meiyang-intel Nov 8, 2023
8b56a1e
fix
AndSonder Nov 9, 2023
8845d5f
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Nov 9, 2023
680f1e5
support all other tests for low version paddle
AndSonder Nov 9, 2023
7703529
fix codestyle
AndSonder Nov 9, 2023
78f4623
fix codestyle
AndSonder Nov 9, 2023
f43332f
Update generate_multi_tensor_split.py
AndSonder Nov 14, 2023
574cd2c
fix build error
AndSonder Nov 14, 2023
39457eb
Merge branch 'master' into support-paddle-2.5
ilya-lavrenov Nov 20, 2023
0fbf3e3
Merge branch 'master' into support-paddle-2.5
AndSonder Nov 20, 2023
daba0f1
add testUnloadLibBeforeDeletingDependentObject into paddle skip tests…
AndSonder Nov 28, 2023
4a6d806
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Nov 28, 2023
7a35c24
remove PROTOBUF_LITE from paddle CmakeList.txt
AndSonder Nov 29, 2023
8c427ac
fix path error
AndSonder Nov 29, 2023
b9f6a40
add debug info
AndSonder Nov 29, 2023
6134eff
add debug codes
AndSonder Nov 29, 2023
e22110a
use make_model_path
AndSonder Nov 30, 2023
8cc4e4b
recover
AndSonder Dec 1, 2023
a4b4cd9
add option optimize_for
AndSonder Dec 1, 2023
74ece6f
use FrontEndTestUtils in read_paddle_model_test.cpp
AndSonder Dec 1, 2023
1161847
use FrontEndTestUtils in read_paddle_model_test.cpp
AndSonder Dec 1, 2023
abbb2b4
fix grid_sample error when using dynamic shape
AndSonder Dec 2, 2023
7b6f1ff
fix error tests for 2.4 version
AndSonder Dec 3, 2023
3ce33c0
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Dec 3, 2023
638709f
add paddle version judge for floor_div
AndSonder Dec 4, 2023
d27c729
fix grid_sample and add tests
AndSonder Dec 4, 2023
869535a
fix
AndSonder Dec 4, 2023
16e6919
fix
AndSonder Dec 5, 2023
bfd97cd
recover
AndSonder Dec 5, 2023
e159bc9
recover grid_sampler
AndSonder Dec 5, 2023
674c85e
Apply suggestions from code review
meiyang-intel Dec 5, 2023
45c127e
fix
AndSonder Dec 5, 2023
aff27b6
Merge branch 'support-paddle-2.5' of https://github.com/AndSonder/ope…
AndSonder Dec 5, 2023
768eef8
Merge branch 'master' of https://github.com/openvinotoolkit/openvino …
AndSonder Dec 5, 2023
b877478
Apply suggestions from code review
AndSonder Dec 5, 2023
e6053a0
Apply suggestions from code review
AndSonder Dec 5, 2023
8ee9239
fix build error
AndSonder Dec 5, 2023
df3e8c7
Merge branch 'master' into support-paddle-2.5
meiyang-intel Dec 5, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/workflows/linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,6 @@ jobs:
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml

- name: PaddlePaddle frontend tests
if: ${{ 'false' }}
run: |
source ${INSTALL_DIR}/setupvars.sh
${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ class DecoderBase {
virtual size_t get_output_size() const = 0;
virtual size_t get_output_size(const std::string& port_name) const = 0;

/// \brief Get the version
virtual int64_t get_version() const = 0;

/// \brief Get output port type
///
/// Current API assumes that output port has only one output type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,10 @@ class NodeContext : public ov::frontend::NodeContext {
return decoder->get_output_port_infos(port_name);
}

int64_t get_version() const {
return decoder->get_version();
}

private:
ov::Any apply_additional_conversion_rules(const ov::Any& any, const std::type_info& type_info) const override {
auto res = decoder->convert_attribute(any, type_info);
Expand Down
4 changes: 4 additions & 0 deletions src/frontends/paddle/src/decoder_proto.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
}
}

int64_t DecoderProto::get_version() const {
return get_place()->get_version();
}

ov::Any DecoderProto::convert_attribute(const Any& data, const std::type_info& type_info) const {
if (data.is<int32_t>() && type_info == typeid(ov::element::Type)) {
return get_ov_type(static_cast<proto::VarType_Type>(data.as<int32_t>()));
Expand Down
2 changes: 2 additions & 0 deletions src/frontends/paddle/src/decoder_proto.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ class DecoderProto : public paddle::DecoderBase {
std::map<std::string, OutputVector> map_for_each_output(
const std::function<Output<Node>(const std::string&, size_t)>& func) const;

int64_t get_version() const override;

private:
std::vector<::paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const;
std::weak_ptr<OpPlace> op_place;
Expand Down
7 changes: 7 additions & 0 deletions src/frontends/paddle/src/input_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ class InputModel::InputModelImpl {
const std::shared_ptr<TelemetryExtension>& telemetry);
std::vector<Place::Ptr> get_inputs() const;
std::vector<Place::Ptr> get_outputs() const;
int64_t get_version() const {
return m_fw_ptr->version().version();
}
Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const;
void override_all_outputs(const std::vector<Place::Ptr>& outputs);
void override_all_inputs(const std::vector<Place::Ptr>& inputs);
Expand Down Expand Up @@ -589,6 +592,10 @@ std::vector<Place::Ptr> InputModel::get_outputs() const {
return _impl->get_outputs();
}

int64_t InputModel::get_version() const {
return _impl->get_version();
}

Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const {
return _impl->get_place_by_tensor_name(tensorName);
}
Expand Down
1 change: 1 addition & 0 deletions src/frontends/paddle/src/input_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ class InputModel : public ov::frontend::InputModel {
ov::PartialShape get_partial_shape(const Place::Ptr& place) const override;
void set_element_type(const Place::Ptr& place, const ov::element::Type&) override;
void set_tensor_value(const Place::Ptr& place, const void* value) override;
int64_t get_version() const;

private:
friend class ov::frontend::paddle::FrontEnd;
Expand Down
14 changes: 11 additions & 3 deletions src/frontends/paddle/src/op/argmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,17 @@ NamedOutputs argmax(const NodeContext& node) {
const Output<ov::Node> reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1});
auto node_reshape = std::make_shared<ov::opset6::Reshape>(data, reshape_flatten, true);
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "max", "index", index_element_type);
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), element::i64)},
{"Out"});
const auto output_info = node.get_output_port_infos("Out");
size_t output_size = output_info[0].second.size();
if (output_size == 0) {
auto out = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1));
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, element::i64)},
{"Out"});
} else {
return node.default_single_output_mapping(
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), element::i64)},
{"Out"});
}
}
}

Expand Down
15 changes: 11 additions & 4 deletions src/frontends/paddle/src/op/elementwise_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,15 @@ NamedOutputs elementwise_pow(const NodeContext& node_context) {
return elementwise_ops<default_opset::Power>(node_context);
}

NamedOutputs elementwise_equal(const NodeContext& node_context) {
NamedOutputs equal(const NodeContext& node_context) {
return elementwise_ops<default_opset::Equal>(node_context);
}

NamedOutputs elementwise_greater_equal(const NodeContext& node_context) {
NamedOutputs greater_equal(const NodeContext& node_context) {
return elementwise_ops<default_opset::GreaterEqual>(node_context);
}

NamedOutputs elementwise_not_equal(const NodeContext& node_context) {
NamedOutputs not_equal(const NodeContext& node_context) {
return elementwise_ops<default_opset::NotEqual>(node_context);
}

Expand All @@ -61,10 +61,17 @@ NamedOutputs elementwise_floordiv(const NodeContext& node_context) {
if (node_context.has_attribute("axis")) {
axis = node_context.get_attribute<int>("axis");
}

int64_t pd_version = node_context.get_version();

bool python_div = false;
if (pd_version >= 2005000 || pd_version == 0) {
python_div = true;
}
return node_context.default_single_output_mapping(
{std::make_shared<default_opset::Divide>(x,
y,
false,
python_div,
ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, axis))},
{"Out"});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,4 +121,4 @@ NamedOutputs fill_constant_batch_size_like(const NodeContext& node) {
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
} // namespace ov
4 changes: 4 additions & 0 deletions src/frontends/paddle/src/op/grid_sampler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,15 @@

#include "default_opset.hpp"
#include "openvino/frontend/paddle/node_context.hpp"
#include "openvino/op/grid_sample.hpp"

namespace ov {
namespace frontend {
namespace paddle {
namespace op {

using namespace ov::op;

NamedOutputs grid_sampler(const NodeContext& node) {
auto data = node.get_input("X");
auto grid = node.get_input("Grid");
Expand Down
4 changes: 3 additions & 1 deletion src/frontends/paddle/src/op/matmul_v2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ NamedOutputs matmul_v2(const NodeContext& node) {
const auto mm = std::make_shared<default_opset::MatMul>(x, y, transpose_a, transpose_b);

std::shared_ptr<Node> result = mm;
if (is_scalar(mm->get_output_partial_shape(0))) {
const auto output_info = node.get_output_port_infos("Out");
size_t output_size = output_info[0].second.size();
if (is_scalar(mm->get_output_partial_shape(0)) && output_size) {
auto unsqueeze_scalar = default_opset::Constant::create(ov::element::i64, {}, {0});
result = std::make_shared<default_opset::Unsqueeze>(mm, unsqueeze_scalar);
}
Expand Down
28 changes: 15 additions & 13 deletions src/frontends/paddle/src/op/p_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,20 @@ NamedOutputs p_norm(const NodeContext& node) {
const auto absNode = std::make_shared<default_opset::Abs>(data);
const auto axisNode = default_opset::Constant::create(ov::element::i32, {1}, {axis});

std::shared_ptr<Node> p_norm_node;
const auto input_shape = data.get_partial_shape();

if (p == std::numeric_limits<float>::infinity()) {
return node.default_single_output_mapping(
{std::make_shared<default_opset::ReduceMax>(absNode, axisNode, keepdim)},
{"Out"});
p_norm_node = std::make_shared<default_opset::ReduceMax>(absNode, axisNode, keepdim);
} else if (p == -std::numeric_limits<float>::infinity()) {
return node.default_single_output_mapping(
{std::make_shared<default_opset::ReduceMin>(absNode, axisNode, keepdim)},
{"Out"});
p_norm_node = std::make_shared<default_opset::ReduceMin>(absNode, axisNode, keepdim);
} else if (p == 0.0) {
const auto input_dtype = data.get_element_type();
const auto zero = default_opset::Constant::create(input_dtype, {1}, {0});
const auto non_zero = std::make_shared<default_opset::NotEqual>(absNode, zero);
const auto converted_non_zero = std::make_shared<default_opset::Convert>(non_zero, input_dtype);

const auto reduce_sum = std::make_shared<default_opset::ReduceSum>(converted_non_zero, axisNode, keepdim);
const auto input_shape = data.get_partial_shape();
p_norm_node = std::make_shared<default_opset::ReduceSum>(converted_non_zero, axisNode, keepdim);
// process 1-d input and keepdim=false, output shape is [1], instead of scalar.
if (!keepdim) {
PADDLE_OP_CHECK(node,
Expand All @@ -42,19 +40,23 @@ NamedOutputs p_norm(const NodeContext& node) {
const auto input_rank = input_shape.rank().get_length();
if (input_rank == 1) {
const auto one = default_opset::Constant::create(ov::element::i64, {1}, {1});
auto out = std::make_shared<default_opset::Reshape>(reduce_sum, one, false);
return node.default_single_output_mapping({out}, {"Out"});
p_norm_node = std::make_shared<default_opset::Reshape>(p_norm_node, one, false);
}
}
return node.default_single_output_mapping({reduce_sum}, {"Out"});
} else {
const auto power_factor = default_opset::Constant::create(ov::element::f32, Shape{1}, {p});
const auto powNode = std::make_shared<default_opset::Power>(absNode, power_factor);
const auto reduce_sum = std::make_shared<default_opset::ReduceSum>(powNode, axisNode, keepdim);
const auto extract_factor = default_opset::Constant::create(ov::element::f32, Shape{1}, {1.0 / p});
return node.default_single_output_mapping({std::make_shared<default_opset::Power>(reduce_sum, extract_factor)},
{"Out"});
p_norm_node = std::make_shared<default_opset::Power>(reduce_sum, extract_factor);
}

const auto output_info = node.get_output_port_infos("Out");
size_t output_size = output_info[0].second.size();
if ((axis == -1 || input_shape.size() == 1) && !keepdim && !output_size) {
p_norm_node = std::make_shared<default_opset::Squeeze>(p_norm_node);
}
return node.default_single_output_mapping({p_norm_node}, {"Out"});
}

} // namespace op
Expand Down
6 changes: 6 additions & 0 deletions src/frontends/paddle/src/op/reduce_ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ NamedOutputs reduce_ops(const NodeContext& node) {
auto unsqueeze_scalar = default_opset::Constant::create(ov::element::i64, {}, {0});
result = std::make_shared<default_opset::Unsqueeze>(reduceNode, unsqueeze_scalar);
}

const auto output_info = node.get_output_port_infos("Out");
size_t output_size = output_info[0].second.size();
if (reduce_all && !output_size) {
result = std::make_shared<default_opset::Squeeze>(reduceNode);
}
return node.default_single_output_mapping({result}, {"Out"});
}

Expand Down
2 changes: 1 addition & 1 deletion src/frontends/paddle/src/op/reverse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ NamedOutputs reverse(const NodeContext& node) {
} // namespace op
} // namespace paddle
} // namespace frontend
} // namespace ov
} // namespace ov
31 changes: 15 additions & 16 deletions src/frontends/paddle/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,25 +29,25 @@ OP_CONVERTER(dequantize_linear);
OP_CONVERTER(dropout);
OP_CONVERTER(elementwise_add);
OP_CONVERTER(elementwise_div);
OP_CONVERTER(elementwise_equal);
OP_CONVERTER(elementwise_floordiv);
OP_CONVERTER(elementwise_greater_equal);
OP_CONVERTER(elementwise_max);
OP_CONVERTER(elementwise_min);
OP_CONVERTER(elementwise_mod);
OP_CONVERTER(elementwise_mul);
OP_CONVERTER(elementwise_not_equal);
OP_CONVERTER(elementwise_pow);
OP_CONVERTER(elementwise_sub);
OP_CONVERTER(equal);
OP_CONVERTER(greater_equal);
OP_CONVERTER(not_equal);
OP_CONVERTER(embedding);
OP_CONVERTER(exp);
OP_CONVERTER(expand_v2);
OP_CONVERTER(flip);
OP_CONVERTER(fill_any_like);
OP_CONVERTER(fill_constant_batch_size_like);
OP_CONVERTER(fill_constant);
OP_CONVERTER(flatten_contiguous_range);
OP_CONVERTER(floor);
OP_CONVERTER(fill_any_like);
OP_CONVERTER(fill_constant);
OP_CONVERTER(fill_constant_batch_size_like);
OP_CONVERTER(gather);
OP_CONVERTER(gather_nd);
OP_CONVERTER(gelu);
Expand Down Expand Up @@ -152,30 +152,30 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"depthwise_conv2d", op::conv2d},
{"depthwise_conv2d_transpose", op::conv2d_transpose},
{"dequantize_linear", op::dequantize_linear},
{"dropout", op::dropout},
{"elementwise_add", op::elementwise_add},
{"elementwise_div", op::elementwise_div},
{"elementwise_floordiv", op::elementwise_floordiv},
{"elementwise_max", op::elementwise_max},
{"elementwise_min", op::elementwise_min},
{"elementwise_mod", op::elementwise_mod},
{"elementwise_mul", op::elementwise_mul},
{"elementwise_pow", op::elementwise_pow},
{"elementwise_max", op::elementwise_max},
{"elementwise_min", op::elementwise_min},
{"elementwise_sub", op::elementwise_sub},
{"equal", op::elementwise_equal},
{"dropout", op::dropout},
{"elementwise_pow", op::elementwise_pow},
{"equal", op::equal},
{"exp", op::exp},
{"expand_v2", op::expand_v2},
{"fill_any_like", op::fill_any_like},
{"fill_constant_batch_size_like", op::fill_constant_batch_size_like},
{"fill_constant", op::fill_constant},
{"fill_constant_batch_size_like", op::fill_constant_batch_size_like},
{"flatten_contiguous_range", op::flatten_contiguous_range},
{"flip", op::flip},
{"floor", op::floor},
{"gather", op::gather},
{"gather_nd", op::gather_nd},
{"gelu", op::gelu},
{"generate_proposals_v2", op::generate_proposals_v2},
{"greater_equal", op::elementwise_greater_equal},
{"greater_equal", op::greater_equal},
{"greater_than", op::greater_than},
{"grid_sampler", op::grid_sampler},
{"group_norm", op::group_norm},
Expand All @@ -202,7 +202,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"multiclass_nms3", op::multiclass_nms},
{"nearest_interp_v2", op::nearest_interp_v2},
{"nearest_interp", op::nearest_interp_v2},
{"not_equal", op::elementwise_not_equal},
{"not_equal", op::not_equal},
{"one_hot_v2", op::one_hot_v2},
{"p_norm", op::p_norm},
{"pad3d", op::pad3d},
Expand Down Expand Up @@ -255,8 +255,7 @@ std::map<std::string, CreatorFunction> get_supported_ops() {
{"while", op::while_},
{"write_to_array", op::write_to_array},
{"where_index", op::where_index},
{"yolo_box", op::yolo_box},
{"generate_proposals_v2", op::generate_proposals_v2}};
{"yolo_box", op::yolo_box}};
};

} // namespace paddle
Expand Down
4 changes: 4 additions & 0 deletions src/frontends/paddle/src/place.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ class Place : public ov::frontend::Place {
return m_names;
}

int64_t get_version() const {
return dynamic_cast<const ov::frontend::paddle::InputModel&>(m_input_model).get_version();
}

private:
const ov::frontend::InputModel& m_input_model;
std::vector<std::string> m_names;
Expand Down
27 changes: 26 additions & 1 deletion src/frontends/paddle/src/proto/framework.proto
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,31 @@ enum AttrType {
VAR = 13;
VARS = 14;
FLOAT64 = 15;
SCALAR = 16;
SCALARS = 17;
}


message Complex {
required double r = 1;
required double i = 2;
};

message Scalar {
enum Type {
BOOLEAN = 1;
LONG = 2;
FLOAT64 = 3;
COMPLEX128 = 4;
}
required Type type = 1;

optional bool b = 2;
optional int64 i = 3;
optional double r = 4;
optional Complex c = 5;
};

// OpDesc describes an instance of a C++ framework::OperatorBase
// derived class type.
message OpDesc {
Expand All @@ -66,6 +89,8 @@ message OpDesc {
optional string var_name = 17;
repeated string vars_name = 18;
optional double float64 = 19;
optional Scalar scalar = 20;
repeated Scalar scalars = 21;
};

message Var {
Expand Down Expand Up @@ -126,7 +151,7 @@ message VarType {
FP16 = 4;
FP32 = 5;
FP64 = 6;
// Tensor<size_t> is used in C++.
// phi::DenseTensor<size_t> is used in C++.
SIZE_T = 19;
UINT8 = 20;
INT8 = 21;
Expand Down
Loading
Loading