From cffcfd5677c91a42863a907369bd15148db7b088 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 25 Jul 2022 14:41:34 +0800 Subject: [PATCH 001/101] Update onnx_decoder.py --- x2paddle/decoder/onnx_decoder.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index db4ed90ba..578bc9861 100755 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -335,8 +335,12 @@ def build_connection(self, layer_name, node): node.which_child[new_nd_name] = idx break else: - first_i = node.inputs.index(nd.name) - node.which_child[nd.name] = idx + # deal with Multiple outputs correspond to one node + if self.node_map[nd.name].outputs.count(layer_name) > 1: + new_child_name = "{}/{}".format(nd.name, idx) + node.which_child[new_child_name] = idx + else: + node.which_child[nd.name] = idx self.node_map[nd.name].index = 0 break if flag == 1: From a05f36e65f9e98da16f6ee6e3402500018280b68 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 25 Jul 2022 14:45:21 +0800 Subject: [PATCH 002/101] Update opset.py --- x2paddle/op_mapper/onnx2paddle/opset9/opset.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/onnx2paddle/opset9/opset.py index 9c205a486..06d573709 100755 --- a/x2paddle/op_mapper/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9/opset.py @@ -209,7 +209,14 @@ def directly_map(self, node, *args, **kwargs): attrs_name_map_dict = op_info[1] for onnx_attr_name, pd_attr_name in attrs_name_map_dict.items(): if onnx_attr_name in onnx_attrs: - layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] + # trans 1 to True, 0 to False + if onnx_attr_name == "keepdims": + if onnx_attrs[onnx_attr_name] == 1: + layer_attrs[pd_attr_name]=True + else: + layer_attrs[pd_attr_name]=False + else: + layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] else: layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name] if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op: From ae260eb4fdda50a532adb58870c6735c83895ceb Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 25 Jul 2022 15:02:59 +0800 Subject: [PATCH 003/101] Update opset.py --- x2paddle/op_mapper/onnx2paddle/opset9/opset.py | 1 + 1 file changed, 1 insertion(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/onnx2paddle/opset9/opset.py index 06d573709..5b5de8903 100755 --- a/x2paddle/op_mapper/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9/opset.py @@ -32,6 +32,7 @@ def _const_weight_or_none(node, necessary=False): + # haha if 'Constant' in node.layer_type: return node.value if isinstance(node, ONNXGraphDataNode): From 02355da9d63dd9f8451ed37dae89fb1870ba720b Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 25 Jul 2022 15:03:59 +0800 Subject: [PATCH 004/101] Update opset.py --- x2paddle/op_mapper/onnx2paddle/opset9/opset.py | 1 - 1 file changed, 1 deletion(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9/opset.py b/x2paddle/op_mapper/onnx2paddle/opset9/opset.py index 5b5de8903..06d573709 100755 --- a/x2paddle/op_mapper/onnx2paddle/opset9/opset.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9/opset.py @@ -32,7 +32,6 @@ def _const_weight_or_none(node, necessary=False): - # haha if 'Constant' in node.layer_type: return node.value if isinstance(node, ONNXGraphDataNode): From 451b43aa8de5181585f0d58f96b381f83451170a Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 25 Jul 2022 18:11:42 +0800 Subject: [PATCH 005/101] Update onnx_decoder.py --- x2paddle/decoder/onnx_decoder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index 578bc9861..f5e9d1bcf 100755 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -335,6 +335,7 @@ def build_connection(self, layer_name, node): node.which_child[new_nd_name] = idx break else: + first_i = node.inputs.index(nd.name) # deal with Multiple outputs correspond to one node if self.node_map[nd.name].outputs.count(layer_name) > 1: new_child_name = "{}/{}".format(nd.name, idx) From 392a9db4b10dc7750bf11b1c2232a6abcb9de2e2 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 27 Jul 2022 14:32:35 +0800 Subject: [PATCH 006/101] fix gemm and resize --- x2paddle/decoder/onnx_decoder.py | 6 +- .../op_mapper/onnx2paddle/opset_legacy.py | 55 +++++++++++++++++-- 2 files changed, 54 insertions(+), 7 deletions(-) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index 1d55f64ca..aeae6f27f 100755 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -337,8 +337,10 @@ def build_connection(self, layer_name, node): else: first_i = node.inputs.index(nd.name) # deal with Multiple outputs correspond to one node - if self.node_map[nd.name].outputs.count(layer_name) > 1: - new_child_name = "{}/{}".format(nd.name, idx) + if self.node_map[nd.name].outputs.count( + layer_name) > 1: + new_child_name = "{}/{}".format(nd.name, + idx) node.which_child[new_child_name] = idx else: node.which_child[nd.name] = idx diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index 812e305cc..1bcef503e 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -213,9 +213,9 @@ def directly_map(self, node, *args, **kwargs): # trans 1 to True, 0 to False if onnx_attr_name == "keepdims": if onnx_attrs[onnx_attr_name] == 1: - layer_attrs[pd_attr_name]=True + layer_attrs[pd_attr_name] = True else: - layer_attrs[pd_attr_name]=False + layer_attrs[pd_attr_name] = False else: layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] else: @@ -354,6 +354,43 @@ def _interpolate(self, node): # which is the same as the rank of input. attrs['scale_factor'] = self.weights[val_scales.name].tolist()[ 2:] + if len(val_x_shape) == 3: + val_scales = self.graph.get_input_node( + node, idx=2, copy=True) + val_scales_values = _const_weight_or_none(val_scales) + + attrs = { + "align_corners": False, + "mode": string(node.get_attr('mode', 'nearest')), + "scale_factor": + self.weights[val_scales.name].tolist()[1:] + } + mode = node.get_attr('mode', 'nearest') + if mode == "linear": + attrs["mode"] = string("bilinear") + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'pytorch_half_pixel': + attrs["align_corners"] = False + attrs["align_mode"] = 0 + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'align_corners': + attrs["align_corners"] = True + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": val_x.name}, + outputs=[val_x.name], + axis=0) + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.name], + **attrs) + self.paddle_graph.add_layer( + 'paddle.squeeze', + inputs={"x": node.name}, + outputs=[node.name], + axis=0) + return elif len(node.layer.input) == 4: # opset 11 val_sizes = self.graph.get_input_node(node, idx=3, copy=True) @@ -1629,7 +1666,6 @@ def Flatten(self, node): def Gemm(self, node): val_a = self.graph.get_input_node(node, idx=0, copy=True) val_b = self.graph.get_input_node(node, idx=1, copy=True) - val_c = self.graph.get_input_node(node, idx=2, copy=True) alpha = node.get_attr('alpha', 1.) # optional beta = node.get_attr('beta', 1.) # optional @@ -1646,10 +1682,19 @@ def Gemm(self, node): inputs=matmul_inputs, outputs=[val_mm], **attr_matmul) - self.paddle_graph.add_layer( - "paddle.scale", inputs={"x": val_mm}, outputs=[val_mm], scale=alpha) + if beta != 0: + self.paddle_graph.add_layer( + "paddle.scale", + inputs={"x": val_mm}, + outputs=[val_mm], + scale=alpha) + else: + self.paddle_graph.add_layer( + "paddle.scale", inputs={"x": val_mm}, outputs=[node.name]) if beta != 0: + # when beta is equal to 0, there is no val_c + val_c = self.graph.get_input_node(node, idx=2, copy=True) if beta == 1.: add_inputs = {"x": val_mm, "y": val_c.name} self.paddle_graph.add_layer( From 1756aafa04c5450d469cc52119fa516a5c29401c Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:00:15 +0800 Subject: [PATCH 007/101] add auto_test of 1 input ops --- .../test_auto_scan_one_input_ops_float32.py | 2814 +++++++++++++++++ 1 file changed, 2814 insertions(+) create mode 100644 tests/onnx/test_auto_scan_one_input_ops_float32.py diff --git a/tests/onnx/test_auto_scan_one_input_ops_float32.py b/tests/onnx/test_auto_scan_one_input_ops_float32.py new file mode 100644 index 000000000..2c5a8860d --- /dev/null +++ b/tests/onnx/test_auto_scan_one_input_ops_float32.py @@ -0,0 +1,2814 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from x2paddle.decoder.onnx_decoder import ONNXGraph, ONNXGraphNode, ONNXGraphDataNode +from x2paddle.core.graph import GraphNode +from x2paddle.core.util import * +from functools import reduce +import numpy as np +import onnx +import onnx.numpy_helper as numpy_helper +from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE +import logging as _logging +from collections import OrderedDict +import math +import os +import copy +import sys +import shutil + +_logger = _logging.getLogger() + + +def _const_weight_or_none(node, necessary=False): + if 'Constant' in node.layer_type: + return node.value + if isinstance(node, ONNXGraphDataNode): + return node.weight + if necessary: + assert '{} should be an initializer or Constant operator.'.format( + node.name) + return None + + +def _rename_or_remove_weight(weights, + origin_name, + target_name=None, + is_remove=True, + rename_mapper=None): + ''' + Rename parameters by Paddle's naming rule of parameters. + + Args: + weights(dict[String:np.ndarray]): Dict stored paramters, the key in weights is name of parameter. + origin_name(String): Name of parameter to rename or remove. + target_name(String, optional): if target_name is not None, add new key-value pair + {target_name:weights[origin_name]} to weights, and target_name must follow paddle's + naming rule of parameters. Default: None. + is_remove: if is_remove is True, remove origin key-value pair. Default: True. + rename_mapper: Solved the same data is used for multiple OPs, key is old_name, value is new_name. + Returns: + None + ''' + if rename_mapper is not None and origin_name in rename_mapper: + origin_name = rename_mapper[origin_name] + is_remove = False + if origin_name not in weights: + raise KeyError('{} not a key in {}'.format(origin_name, weights.keys())) + if is_remove: + # remove weight + data = weights.pop(origin_name) + else: + data = weights[origin_name] + if target_name is not None: + # rename weight + weights[target_name] = data + rename_mapper[origin_name] = target_name + + +def _is_static_shape(shape): + negtive_dims = 0 + error_dims = 0 + for dim in shape: + if dim < 0: + negtive_dims += 1 + if dim < -1: + error_dims += 1 + if negtive_dims > 1: + return False + if error_dims > 0: + return False + return True + + +def _get_same_padding(in_size, kernel_size, stride, autopad): + new_size = int(math.ceil(in_size * 1.0 / stride)) + pad_size = (new_size - 1) * stride + kernel_size - in_size + pad0 = int(pad_size / 2) + pad1 = pad_size - pad0 + if autopad == "SAME_UPPER": + return [pad0, pad1] + if autopad == "SAME_LOWER": + return [pad1, pad0] + + +def print_mapping_info(func): + def run_mapping(*args, **kwargs): + node = args[1] + try: + res = func(*args, **kwargs) + except: + raise Exception("convert failed node:{}, op_type is {}".format( + node.name[9:], node.layer_type)) + else: + return res + + return run_mapping + + +class OpSet(): + def __init__(self, decoder, paddle_graph): + super(OpSet, self).__init__() + self.graph = decoder.graph + self.paddle_graph = paddle_graph + self.inputs_info = dict() + self.weights = dict() + self.nn_name2id = dict() + self.done_weight_list = list() + # solve for same data is used as an argument to multiple OPs. + # PR link(wangjunjie06): https://github.com/PaddlePaddle/X2Paddle/pull/728 + self.rename_mapper = dict() + self.elementwise_ops = { + 'Add': 'paddle.add', + 'Div': 'paddle.divide', + 'Sub': 'paddle.subtract', + 'Mul': 'paddle.multiply', + 'Pow': 'paddle.pow', + 'Less': 'paddle.less_than', + 'LessOrEqual': 'paddle.less_equal', + } + + self.directly_map_ops = { + 'Ceil': ['paddle.ceil'], + # reduce function + 'ReduceMean': [ + 'paddle.mean', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdims=True) + ], + 'ReduceMin': [ + 'paddle.min', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdim=True) + ], + 'ReduceMax': [ + 'paddle.max', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdim=True) + ], + 'ReduceProd': [ + 'paddle.prod', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdim=True) + ], + # active function + 'Relu': ['paddle.nn.ReLU'], + 'LeakyRelu': [ + 'paddle.nn.LeakyReLU', dict(alpha='negative_slope'), + dict(negative_slope=.01) + ], + 'Elu': + ['paddle.nn.functional.elu', dict(alpha='alpha'), dict(alpha=1.)], + 'ThresholdedRelu': [ + 'paddle.nn.functional.thresholded_relu', + dict(alpha='threshold'), dict(alpha=1.) + ], + 'Tanh': ['paddle.nn.Tanh'], + 'Sigmoid': ['paddle.nn.Sigmoid'], + 'Softsign': ['paddle.nn.Softsign'], + 'Softplus': [ + 'paddle.nn.Softplus', dict(threshold='threshold'), + dict(threshold=float(sys.maxsize)) + ], + 'Exp': ['paddle.exp'], + 'Log': ['paddle.log'], + 'LogSoftmax': [ + 'paddle.nn.functional.log_softmax', dict(axis='axis'), + dict(axis=1) + ], + 'Softmax': ['paddle.nn.Softmax', dict(axis='axis'), dict(axis=1)], + 'Sqrt': ['paddle.sqrt'], + 'Floor': ['paddle.floor'], + 'Abs': ['paddle.abs'], + 'Erf': ['paddle.erf'], + 'Sin': ['paddle.sin'], + 'Cos': ['paddle.cos'], + 'Atan': ['paddle.atan'], + 'Acos': ['paddle.acos'], + 'Asin': ['paddle.asin'], + 'IsInf':['paddle.isinf'], + 'IsNaN':['paddle.isnan'], + 'Cosh': ['paddle.cosh'], + 'Acosh': ['paddle.acosh'], + 'Asinh': ['paddle.asinh'], + } + + @print_mapping_info + def directly_map(self, node, *args, **kwargs): + inputs = node.layer.input + assert len(inputs) == 1, 'directly_map error with multi inputs' + input = self.graph.get_input_node(node, idx=0, copy=True) + onnx_attrs = node.attr_map + if '' in onnx_attrs: + onnx_attrs.pop('') + if '_' in onnx_attrs: + onnx_attrs.pop('_') + op_info = self.directly_map_ops[node.layer_type] + paddle_op = op_info[0] + layer_attrs = dict() + if len(op_info) > 1: + attrs_name_map_dict = op_info[1] + for onnx_attr_name, pd_attr_name in attrs_name_map_dict.items(): + if onnx_attr_name in onnx_attrs: + # trans 1 to True, 0 to False + if onnx_attr_name == "keepdims": + if onnx_attrs[onnx_attr_name] == 1: + layer_attrs[pd_attr_name] = True + else: + layer_attrs[pd_attr_name] = False + else: + layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] + else: + layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name] + if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op: + op_name = paddle_op[10:].lower() + op_name = name_generator(op_name, self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + + self.paddle_graph.add_layer( + kernel=paddle_op, + inputs={"x": input.name}, + outputs=layer_outputs, + **layer_attrs) + else: + self.paddle_graph.add_layer( + kernel=paddle_op, + inputs={"x": input.name}, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def elementwise_map(self, node): + op_type = self.elementwise_ops[node.layer_type] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + inputs_dict = {'x': val_x.name, 'y': val_y.name} + self.paddle_graph.add_layer( + op_type, inputs=inputs_dict, outputs=[node.name]) + + @print_mapping_info + def place_holder(self, node): + shape = node.out_shapes[0] + for i, dim_shape in enumerate(shape): + if dim_shape == 0 and i == 0: + shape[i] = 1 + if dim_shape == 0 and i != 0: + assert 'shape of input is not assigned' + self.paddle_graph.add_layer( + kernel="paddle.to_tensor", + inputs={}, + outputs=[node.name], + data=node.name) + self.inputs_info[node.name] = [shape, node.dtype] + + @print_mapping_info + def create_parameter(self, node, parameter=None): + if parameter is not None: + node = parameter + dtype = node.dtype + shape = node.out_shapes[0] + + if hasattr(node.weight, "shape") and len(node.weight.shape) == 0: + if node.weight == float('inf') or node.weight == float('-inf'): + node.weight = string(node.weight) + self.paddle_graph.add_layer( + "paddle.full", + inputs={}, + outputs=[node.name], + dtype=string(dtype), + shape=[1], + fill_value=node.weight) + else: + self.weights[node.name] = node.weight + self.paddle_graph.add_layer( + "self.create_parameter", + inputs={}, + outputs=[node.name], + shape=shape, + attr=string(node.name), + dtype=string(dtype), + default_initializer="paddle.nn.initializer.Constant(value=0.0)") + + def _pad_if_asymmetric(self, node, pads, val_name): # pads: SSEE + assert len(pads) & 1 == 0 + symmetric = True + ndims = len(pads) // 2 + for idx_dim in range(ndims): + if pads[idx_dim] != pads[ndims + idx_dim]: + symmetric = False + break + if symmetric: + return pads[:ndims], val_name + val_padded = self.Pad(node, op_independent=False) + return [0] * ndims, val_padded + + def _interpolate(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + inputs = {'x': val_x.name} + attrs = dict() + val_x_shape = val_x.out_shapes[0] + if node.layer_type == 'Resize': + if len(node.layer.input) == 2: + # opset 10 + val_scales = self.graph.get_input_node(node, idx=1, copy=True) + # TODO(syf): paddle.nn.functional.interpolate will support the length + # which is the same as the rank of input. + scale_values = _const_weight_or_none(val_scales) + if scale_values is not None: + attrs['scale_factor'] = self.weights[ + val_scales.name].tolist()[2:] + else: + var_nc, var_hw = val_scales.name + '_nc', val_scales.name + '_hw' + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_scales.name}, + outputs=[var_nc, var_hw], + num_or_sections=[2, 2], + axis=0) + inputs['scale_factor'] = var_hw + mode = node.get_attr('mode', 'nearest') + attrs.update({ + "align_corners": False, + "mode": string(mode), + "align_mode": 1 + }) + if mode == "linear" and len(val_x_shape) == 4: + attrs["mode"] = string("bilinear") + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.name], + **attrs) + return + elif len(node.layer.input) == 3: + # opset 11 + try: + #to avoid the error causeed by NULL value of resize inputs. + val_scales = self.graph.get_input_node( + node, idx=2, copy=True) + except: + val_scales = self.graph.get_input_node( + node, idx=1, copy=True) + # TODO(syf): paddle.nn.functional.interpolate will support the length + # which is the same as the rank of input. + attrs['scale_factor'] = self.weights[val_scales.name].tolist()[ + 2:] + if len(val_x_shape) == 3: + val_scales = self.graph.get_input_node( + node, idx=2, copy=True) + val_scales_values = _const_weight_or_none(val_scales) + + attrs = { + "align_corners": False, + "mode": string(node.get_attr('mode', 'nearest')), + "scale_factor": + self.weights[val_scales.name].tolist()[1:] + } + mode = node.get_attr('mode', 'nearest') + if mode == "linear": + attrs["mode"] = string("bilinear") + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'pytorch_half_pixel': + attrs["align_corners"] = False + attrs["align_mode"] = 0 + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'align_corners': + attrs["align_corners"] = True + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": val_x.name}, + outputs=[val_x.name], + axis=0) + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.name], + **attrs) + self.paddle_graph.add_layer( + 'paddle.squeeze', + inputs={"x": node.name}, + outputs=[node.name], + axis=0) + return + elif len(node.layer.input) == 4: + # opset 11 + val_sizes = self.graph.get_input_node(node, idx=3, copy=True) + size_values = _const_weight_or_none(val_sizes) + if len(val_x_shape) == 3: + var_n, var_hw = val_sizes.name + '_n', val_sizes.name + '_hw' + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_sizes.name}, + outputs=[var_n, var_hw], + num_or_sections=[1, 2], + axis=0) + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": var_hw}, + outputs=[var_hw], + dtype=string('int32')) + inputs['size'] = var_hw + attrs = { + "align_corners": False, + "mode": string(node.get_attr('mode', 'nearest')) + } + mode = node.get_attr('mode', 'nearest') + if mode == "linear": + attrs["mode"] = string("bilinear") + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'pytorch_half_pixel': + attrs["align_corners"] = False + attrs["align_mode"] = 0 + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'align_corners': + attrs["align_corners"] = True + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": val_x.name}, + outputs=[val_x.name], + axis=0) + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.name], + **attrs) + self.paddle_graph.add_layer( + 'paddle.squeeze', + inputs={"x": node.name}, + outputs=[node.name], + axis=0) + else: + if size_values is not None: + attrs["size"] = [size_values[2], size_values[3]] + else: + var_nc, var_hw = val_sizes.name + '_nc', val_sizes.name + '_hw' + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_sizes.name}, + outputs=[var_nc, var_hw], + num_or_sections=[2, 2], + axis=0) + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": var_hw}, + outputs=[var_hw], + dtype=string('int32')) + inputs['size'] = var_hw + attrs.update({ + "align_corners": False, + "mode": string(node.get_attr('mode', 'nearest')) + }) + mode = node.get_attr('mode', 'nearest') + if mode == "linear": + attrs["mode"] = string("bilinear") + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'pytorch_half_pixel': + attrs["align_corners"] = False + attrs["align_mode"] = 0 + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'align_corners': + attrs["align_corners"] = True + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.name], + **attrs) + return + elif node.layer_type == 'Upsample': + if len(node.layer.input) == 2: + val_scales = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.slice", + inputs={"input": val_scales.name}, + outputs=[val_scales.name], + axes=[0], + starts=[2], + ends=[4]) + inputs['scale_factor'] = val_scales.name + else: + val_scales = node.get_attr('scales')[2:] + + mode = node.get_attr('mode', 'nearest') + attrs.update({ + "align_corners": False, + "mode": string(mode), + "align_mode": 1 + }) + if len(node.layer.input) == 1: + attrs["scale_factor"] = val_scales + if mode == "linear" and len(val_x_shape) == 4: + attrs["mode"] = string("bilinear") + if node.get_attr('coordinate_transformation_mode', + 'half_pixel') == 'pytorch_half_pixel': + attrs["align_corners"] = False + attrs["align_mode"] = 0 + else: + attrs["align_corners"] = True + self.paddle_graph.add_layer( + kernel="paddle.nn.functional.interpolate", + inputs=inputs, + outputs=[node.name], + **attrs) + + @print_mapping_info + def CumSum(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axis = self.graph.get_input_node(node, idx=1, copy=True) + axis_values = _const_weight_or_none(axis) + assert axis_values is not None, 'Axis only support constant tensor!' + layer_attrs = {'axis': axis_values} + self.paddle_graph.add_layer( + 'paddle.cumsum', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def HardSigmoid(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + alpha = node.get_attr('alpha', 0.2) + beta = node.get_attr('beta', 0.5) + self.paddle_graph.add_layer( + kernel="paddle.scale", + inputs={"x": val_x.name}, + outputs=[node.name + "_val"], + scale=alpha, + bias=beta) + self.paddle_graph.add_layer( + kernel="paddle.clip", + inputs={"x": node.name + "_val"}, + outputs=[node.name], + min=0.0, + max=1.0) + + @print_mapping_info + def Shape(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + kernel="paddle.shape", + inputs={"input": val_x.name}, + outputs=[node.name]) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.name}, + outputs=[node.name], + dtype=string('int64')) + + @print_mapping_info + def RoiAlign(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_rois = self.graph.get_input_node(node, idx=1, copy=True) + + pooled_height = node.get_attr('output_height') + pooled_width = node.get_attr('output_width') + spatial_scale = node.get_attr('spatial_scale') + sampling_ratio = node.get_attr('sampling_ratio') + val_rois_shape = val_rois.name + '_shape' + self.paddle_graph.add_layer( + kernel="paddle.shape", + inputs={"input": val_rois.name}, + outputs=[val_rois_shape]) + val_rois_num = val_rois.name + '_num' + if len(val_rois.out_shapes[0]) == 4: + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_rois_shape}, + outputs=[val_rois_num, ' _', ' _', ' _'], + num_or_sections=[1, 1, 1, 1], + axis=0) + elif len(val_rois.out_shapes[0]) == 2: + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_rois_shape}, + outputs=[val_rois_num, ' _'], + num_or_sections=[1, 1], + axis=0) + layer_attrs = { + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + 'spatial_scale': spatial_scale, + 'sampling_ratio': sampling_ratio, + } + self.paddle_graph.add_layer( + 'custom_layer:ROIAlign', + inputs={ + 'input': val_x.name, + 'rois': val_rois.name, + 'rois_num': val_rois_num + }, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def MaxRoiPool(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_rois = self.graph.get_input_node(node, idx=1, copy=True) + + spatial_scale = node.get_attr('spatial_scale') + pooled_height, pooled_width = node.get_attr('pooled_shape') + layer_attrs = { + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + 'spatial_scale': spatial_scale, + } + self.paddle_graph.add_layer( + 'custom_layer:ROIPooling', + inputs={'input': val_x.name, + 'rois': val_rois.name}, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def Pad(self, node, op_independent=True): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + pads = node.get_attr('pads') + is_pads_attr = True + if pads is None: + val_pad = self.graph.get_input_node(node, idx=1, copy=True) + pad_shape = val_pad.out_shapes[0] + is_pads_attr = False + pads = _const_weight_or_none(val_pad) + if pads is not None: + is_pads_attr = True + mode = node.get_attr('mode', 'constant') + if mode in ["edge"]: + mode = "replicate" + value = node.get_attr('value', 0.) + data_shape = val_x.out_shapes[0] + output_shape = node.out_shapes[0] + assume_pad = False + layer_attrs = {} + layer_attrs['mode'] = string(mode) + layer_attrs['value'] = value + if not op_independent: + output_name = node.name + '_paded' + else: + output_name = node.name + nn_op_name = name_generator("pad", self.nn_name2id) + layer_outputs = [nn_op_name, output_name] + if is_pads_attr: + paddings = [] + if len(pads) == 10 and sum(pads) == 0: + pads = pads[0:6] + if len(pads) in [2, 4, 6]: + if data_shape: + assume_pad |= data_shape and 2 * (len(data_shape) - 2 + ) == len(pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * (len(output_shape) - 2 + ) == len(pads) # NCHW + if assume_pad: + paddle_op = 'paddle.nn.Pad{}D'.format(len(output_shape) - 2) + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + paddings = np.flip(paddings, axis=0).flatten().tolist() + layer_attrs['padding'] = paddings + else: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len( + pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len( + output_shape) == len(pads) # NCHW + if assume_pad: + paddle_op = 'paddle.nn.functional.pad' + paddings = np.array(pads).reshape( + (2, + -1)).transpose().astype("int32").flatten().tolist() + layer_attrs['pad'] = paddings + else: + raise Exception("The padding value {} is wrong!".format( + pads)) + elif len(pads) == 8: + if data_shape: + assume_pad |= data_shape and 2 * len(data_shape) == len( + pads) # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len(output_shape) == len( + pads) # NCHW + if assume_pad: + paddle_op = 'paddle.nn.Pad2D' + # x1_begin,x2_begin,x3_begin,x4_begin,x1_end,x2_end,x3_end,x4_end->x1_begin,x1_end,x2_begin,x2_end,x3_begin,x3_end,x4_begin,x4_end + paddings = np.array(pads).reshape( + (2, -1)).transpose().astype("int32") + if mode == 'constant': + paddings = paddings.flatten().tolist() + layer_attrs['padding'] = paddings + else: + paddings = np.flip(paddings, axis=0).flatten().tolist() + if sum(paddings[:4]) == 0: + paddings = paddings[4:] + layer_attrs['padding'] = paddings + else: + layer_attrs["pad"] = paddings + paddle_op = "custom_layer:PadAllDim4WithOneInput" + else: + paddle_op = 'paddle.nn.functional.pad' + layer_attrs["pad"] = np.array(pads).tolist() + else: + pad_data_temp = pads[0::2] + pad_data_all = [] + for i in range(len(pad_data_temp)): + pad_data_all.append(pads[i]) + pad_data_all.append(pads[len(pad_data_temp) + i]) + + layer_attrs["pad"] = pad_data_all + self.paddle_graph.add_layer( + 'paddle.nn.functional.pad', + inputs={'x': val_x.name}, + outputs=layer_outputs[1:], + **layer_attrs) + return + + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x.name}, + outputs=layer_outputs[1:] + if paddle_op == 'paddle.nn.functional.pad' else layer_outputs, + **layer_attrs) + if not op_independent: + return node.name + '_paded' + else: + pads_len = val_pad.out_shapes[0][0] + if pads_len in [2, 4, 6]: + if data_shape: + assume_pad |= data_shape and 2 * (len(data_shape) - 2 + ) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * (len(output_shape) - 2 + ) == pads_len # NCHW + if assume_pad: + if pads_len == 2: + data_format = "NCL" + elif pads_len == 4: + data_format = "NCHW" + else: + data_format = "NCDHW" + self.paddle_graph.add_layer( + "custom_layer:PadWithTwoInput", + inputs={'x': val_x.name, + 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode), + data_format=string(data_format)) + else: + if data_shape: + assume_pad |= data_shape and 2 * len( + data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len( + output_shape) == pads_len # NCHW + if assume_pad: + if pads_len == 4: + self.paddle_graph.add_layer( + "custom_layer:PadAllDim2", + inputs={'x': val_x.name, + 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + raise Exception("The padding value is wrong!") + elif pads_len == 8: + if data_shape: + assume_pad |= data_shape and 2 * len( + data_shape) == pads_len # NCHW + if output_shape: + assume_pad |= output_shape and 2 * len( + output_shape) == pads_len # NCHW + if assume_pad: + self.paddle_graph.add_layer( + "custom_layer:PadAllDim4", + inputs={'x': val_x.name, + 'pad': val_pad.name}, + outputs=layer_outputs, + value=value, + mode=string(mode)) + else: + raise Exception("The padding value is wrong!") + if not op_independent: + return node.name + '_paded' + + @print_mapping_info + def Unsqueeze(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + if axes is None: + axes_node = self.graph.get_input_node(node, idx=1, copy=True) + axes = _const_weight_or_none(axes_node, necessary=True) + # deal with scalar(0D) tensor + if len(val_x.out_shapes[0]) == 0 and len(axes) == 1 and axes[0] == 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=[1]) + else: + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": val_x.name}, + axis=axes, + outputs=[node.name]) + + @print_mapping_info + def Shrink(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + bias = node.get_attr('bias') + lambd = node.get_attr('lambd') + assert bias == 0.0, 'not support bias!=0' + self.paddle_graph.add_layer( + 'paddle.nn.functional.hardshrink', + inputs={"x": val_x.name}, + outputs=[node.name], + threshold=lambd) + + @print_mapping_info + def Constant(self, node): + val_output = self.graph.get_node(node.layer.output[0], copy=True) + + value = node.get_attr('value') + dtype = np.dtype(value.dtype) + output_dtype = val_output.dtype + if output_dtype: + assert dtype == output_dtype, 'tensor dtype unmatches storage dtype' + + shape = node.get_attr('shape', None) + + if shape is None: + shape = val_output.out_shapes[0] + if shape is None: + shape = list(value.shape) + _logger.warning('in (Constant -> %s): ' + 'attribute "shape" of %s not inferred, ' + 'using value as 1-D tensor may lead to fails', + val_output.name, val_output.name) + if len(value) == 1: + value = value.tolist() + value = value[0] + if value == float('inf') or value == float('-inf'): + value = string(value) + self.paddle_graph.add_layer( + "paddle.full", + inputs={}, + outputs=[node.name], + dtype=string(dtype), + shape=[1], + fill_value=value) + else: + value = np.reshape(value, shape) + self.weights[node.name] = value + self.paddle_graph.add_layer( + "self.create_parameter", + inputs={}, + outputs=[node.name], + shape=shape, + attr=string(node.name), + dtype=string(dtype), + default_initializer="paddle.nn.initializer.Constant(value=0.0)") + + @print_mapping_info + def Resize(self, node): + self._interpolate(node) + + @print_mapping_info + def Upsample(self, node): + self._interpolate(node) + + @print_mapping_info + def InstanceNormalization(self, node): + op_name = name_generator("instanse_norm", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_scale = self.graph.get_input_node(node, idx=1, copy=True) + val_b = self.graph.get_input_node(node, idx=2, copy=True) + epsilon = node.get_attr('epsilon', 1e-5) + self.weights[op_name + '.scale'] = self.weights[val_scale.name] + self.weights[op_name + '.bias'] = self.weights[val_b.name] + layer_attrs = { + 'num_features': node.out_shapes[0][1], + 'epsilon': epsilon, + } + dim = len(val_x.out_shapes[0]) + if dim == 3: + paddle_op = "paddle.nn.InstanceNorm1D" + elif dim == 4: + paddle_op = "paddle.nn.InstanceNorm2D" + elif dim == 5: + paddle_op = "paddle.nn.InstanceNorm3D" + else: + raise Exception( + "The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization." + ) + self.paddle_graph.add_layer( + paddle_op, + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def Expand(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_shape = self.graph.get_input_node(node, idx=1, copy=True) + val_x_dtype = val_x.dtype + name_ones = node.name + '_ones' + shape_values = _const_weight_or_none(val_shape) + if shape_values is None: + attr_ones = { + 'shape': val_shape.name, + 'dtype': string(val_x_dtype), + 'fill_value': 1 + } + else: + attr_ones = { + 'shape': shape_values.tolist(), + 'dtype': string(val_x_dtype), + 'fill_value': 1 + } + self.paddle_graph.add_layer( + 'paddle.full', inputs={}, outputs=[name_ones], **attr_ones) + inputs_dict = {'x': name_ones, 'y': val_x.name} + self.paddle_graph.add_layer( + 'paddle.multiply', inputs=inputs_dict, outputs=[node.name]) + + @print_mapping_info + def GatherND(self, node): + x = self.graph.get_input_node(node, idx=0, copy=True) + index = self.graph.get_input_node(node, idx=1, copy=True) + inputs = {'x': x.name, 'index': index.name} + self.paddle_graph.add_layer( + "paddle.gather_nd", inputs=inputs, outputs=[node.name]) + + @print_mapping_info + def Gather(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + indices = self.graph.get_input_node(node, idx=1, copy=True) + indices_values = _const_weight_or_none(indices, necessary=True) + if isinstance(indices_values, np.ndarray): + indices_values = indices_values.tolist() + indices_shape = indices.out_shapes[0] + val_x_shape = val_x.out_shapes[0] + axis = node.get_attr('axis', 0) + if len(indices_shape) == 1 or \ + (indices_values is not None and isinstance(indices_values, int)) or \ + (indices_values is not None and len(indices_values) == 1): + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={'x': val_x.name, + 'index': indices.name}, + outputs=[node.name], + axis=axis) + # deal with indice is scalar(0D) Tensor + if isinstance(indices_values, int) and len(val_x_shape) > 1: + self.paddle_graph.add_layer( + 'paddle.squeeze', + inputs={'x': node.name}, + outputs=[node.name], + axis=[axis]) + else: + # if val_x is DataNode, convert gather to embedding + if axis == 0 and isinstance(val_x, ONNXGraphDataNode): + indices_cast = indices.name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": indices.name}, + outputs=[indices_cast], + dtype=string('int64')) + op_name = name_generator("embedding", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + self.weights[op_name + '.weight'] = _const_weight_or_none(val_x) + self.paddle_graph.add_layer( + 'paddle.nn.Embedding', + inputs={"x": indices_cast}, + outputs=layer_outputs, + num_embeddings=val_x_shape[0], + embedding_dim=val_x_shape[1]) + else: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": indices.name}, + outputs=[indices.name + "_reshape"], + shape=[-1]) + gather_1d = node.name + '_1D' + self.paddle_graph.add_layer( + 'paddle.gather', + inputs={ + 'x': val_x.name, + 'index': indices.name + "_reshape" + }, + outputs=[gather_1d], + axis=axis) + # if shape is known + if len(indices_shape) != 0 and len(val_x_shape) != 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': gather_1d}, + outputs=[node.name], + shape=val_x_shape[:axis] + indices_shape + + val_x_shape[axis + 1:]) + else: + all_shape_name = list() + self.paddle_graph.add_layer( + kernel="paddle.shape", + inputs={"input": val_x.name}, + outputs=[val_x.name + "_shape"]) + self.paddle_graph.add_layer( + kernel="paddle.shape", + inputs={"input": indices.name}, + outputs=[indices.name + "_shape"]) + self.paddle_graph.add_layer( + "paddle.slice", + inputs={"input": val_x.name + "_shape"}, + outputs=[val_x.name + "_shape_slice_start"], + axes=[0], + starts=[0], + ends=[axis]) + all_shape_name.append(val_x.name + "_shape_slice_start") + all_shape_name.append(indices.name + "_shape") + self.paddle_graph.add_layer( + "paddle.slice", + inputs={"input": val_x.name + "_shape"}, + outputs=[val_x.name + "_shape_slice_end"], + axes=[0], + starts=[axis + 1], + ends=[2147483647]) + all_shape_name.append(val_x.name + "_shape_slice_end") + self.paddle_graph.add_layer( + 'paddle.concat', + inputs={"x": all_shape_name}, + outputs=[node.name + "_all_shape"], + axis=0) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': gather_1d}, + outputs=[node.name], + shape=node.name + "_all_shape") + + @print_mapping_info + def ScatterND(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + indices = self.graph.get_input_node(node, idx=1, copy=True) + updates = self.graph.get_input_node(node, idx=2, copy=True) + if len(indices.out_shapes[0]) == 1: + self.paddle_graph.add_layer( + 'paddle.scatter', + inputs={ + 'x': val_x.name, + 'index': indices.name, + 'updates': updates.name + }, + outputs=[node.name]) + else: + input_inner_indices = node.name + '_input_inner_indices' + shape = val_x.out_shapes[0] + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": indices.name}, + outputs=[indices.name], + shape=indices.out_shapes[0]) + + zeros_like_val_x = val_x.name + '_zeros' + self.paddle_graph.add_layer( + 'paddle.zeros_like', + inputs={"x": val_x.name}, + outputs=[zeros_like_val_x]) + self.paddle_graph.add_layer( + 'paddle.scatter_nd_add', + inputs={ + 'x': zeros_like_val_x, + 'index': indices.name, + 'updates': updates.name + }, + outputs=[input_inner_indices]) + indices_mask = node.name + '_indices_mask' + constant_minus_one = node.name + '_constant_minus_one' + # full_like support create tensor shape like input tensor + self.paddle_graph.add_layer( + 'paddle.full_like', + inputs={"x": updates.name}, + outputs=[constant_minus_one], + dtype=string(updates.dtype), + fill_value=-1) + self.paddle_graph.add_layer( + 'paddle.scatter_nd_add', + inputs={ + 'x': zeros_like_val_x, + 'index': indices.name, + 'updates': constant_minus_one + }, + outputs=[indices_mask]) + constant_one = node.name + '_constant_1' + # full_like support create tensor shape like input tensor + self.paddle_graph.add_layer( + 'paddle.full_like', + inputs={"x": val_x.name}, + outputs=[constant_one], + dtype=string(val_x.dtype), + fill_value=1) + input_out_indices_mask = node.name + '_input_out_indices_mask' + self.paddle_graph.add_layer( + "paddle.add", + inputs={"x": indices_mask, + "y": constant_one}, + outputs=[input_out_indices_mask]) + + input_out_indices = node.name + '_input_out_indices' + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={"x": val_x.name, + "y": input_out_indices_mask}, + outputs=[input_out_indices]) + + self.paddle_graph.add_layer( + "paddle.add", + inputs={"x": input_inner_indices, + "y": input_out_indices}, + outputs=[node.name]) + + @print_mapping_info + def Range(self, node): + val_start = self.graph.get_input_node(node, idx=0, copy=True) + val_limit = self.graph.get_input_node(node, idx=1, copy=True) + val_delta = self.graph.get_input_node(node, idx=2, copy=True) + dtype = val_start.dtype + inputs = { + 'start': val_start.name, + 'end': val_limit.name, + 'step': val_delta.name + } + self.paddle_graph.add_layer( + 'paddle.arange', + inputs=inputs, + outputs=[node.name], + dtype=string(dtype)) + + @print_mapping_info + def Slice(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + starts, ends, axes, steps = None, None, None, None + layer_attrs = {} + if val_x.dtype == 'uint8': + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": val_x.name}, + outputs=[val_x.name], + dtype=string('int32')) + if len(node.inputs) > 1: + starts = self.graph.get_input_node(node, idx=1, copy=True) + ends = self.graph.get_input_node(node, idx=2, copy=True) + starts_value = _const_weight_or_none(starts) + if starts_value is not None: + starts_value = starts_value.tolist() + ends_value = _const_weight_or_none(ends) + if ends_value is not None: + ends_value = ends_value.tolist() + if len(node.inputs) > 2: + s_len = len(val_x.out_shapes[0]) + axes = list(range(s_len)) + if len(node.inputs) > 3: + axes_node = self.graph.get_input_node(node, idx=3, copy=True) + axes = _const_weight_or_none(axes_node, necessary=True).tolist() + if len(node.inputs) > 4: + steps = self.graph.get_input_node(node, idx=4, copy=True) + steps = _const_weight_or_none(steps).tolist() + + layer_attrs = { + "axes": axes, + "starts": starts.name, + "ends": ends.name + } + if starts_value is not None and ends_value is not None and axes is not None: + starts_value = starts_value.copy() + ends_value = ends_value.copy() + for idx in range(len(ends_value)): + if len(val_x.out_shapes[0]) != 0 and starts_value[ + idx] >= val_x.out_shapes[0][axes[ + idx]] and val_x.out_shapes[0][axes[idx]] > 0: + starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1 + ends_value[idx] = val_x.out_shapes[0][axes[idx]] + elif ends_value[idx] > 2**31 - 1: + ends_value[idx] = 2**31 - 1 + + layer_attrs = { + "axes": axes, + "starts": starts_value, + "ends": ends_value + } + else: + if starts.dtype != 'int32': + starts_cast = starts.name + '_cast' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": starts.name}, + outputs=[starts_cast], + dtype=string('int32')) + layer_attrs['starts'] = starts_cast + if ends.dtype != 'int32': + ends_cast = ends.name + '_cast' + else: + ends_cast = ends.name + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": ends.name}, + outputs=[ends_cast], + dtype=string('int32')) + layer_attrs['ends'] = ends_cast + else: + starts = node.get_attr('starts') + ends = node.get_attr('ends') + axes = node.get_attr('axes') + output_shape = val_x.out_shapes[0] + + if axes is None: + axes = [i for i in range(len(starts))] + for idx in range(len(ends)): + if ends[idx] > 2**31 - 1: + ends[idx] = 2**31 - 1 + layer_attrs = {"axes": axes, "starts": starts, "ends": ends} + + if steps is not None: + layer_attrs['strides'] = steps + self.paddle_graph.add_layer( + 'paddle.strided_slice', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + else: + self.paddle_graph.add_layer( + 'paddle.slice', + inputs={"input": val_x.name}, + outputs=[node.name], + **layer_attrs) + if val_x.dtype == 'uint8': + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.name}, + outputs=[node.name], + dtype=string('uint8')) + + @print_mapping_info + def ConstantOfShape(self, node): + val_shape = self.graph.get_input_node(node, idx=0, copy=True) + + value = node.get_attr('value') + dtype = value.dtype + value = value.tolist() + assert len(value) == 1, ('given value not Scalar, shape of value > 1, ' + 'this is not supported') + if len(value) == 1: + value = value[0] + if value == float('inf') or value == float('-inf'): + value = string(value) + layer_attrs = {'dtype': string(dtype), 'fill_value': value} + self.paddle_graph.add_layer( + "paddle.full", + inputs={'shape': val_shape.name}, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def Clip(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_node(node.layer.output[0], copy=True) + max_value, min_value = None, None + if len(node.inputs) == 1: + max_value = node.get_attr('max') + min_value = node.get_attr('min') + layer_attrs = { + 'max': max_value, + 'min': min_value, + } + + self.paddle_graph.add_layer( + 'paddle.clip', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + else: + if len(node.inputs) == 2: + val_ipt = self.graph.get_input_node(node, idx=1, copy=True) + + index = node.get_input_index(val_ipt.name) + + val_value = _const_weight_or_none(val_ipt) + if val_value.shape == (1, ): + val_value = val_value[0] + + if index == 1: + layer_attrs = {'min': val_value} + + if index == 2: + layer_attrs = {'max': val_value} + + self.paddle_graph.add_layer( + 'paddle.clip', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + else: + if len(node.inputs) == 3: + min_ipt = self.graph.get_input_node(node, idx=1, copy=True) + max_ipt = self.graph.get_input_node(node, idx=2, copy=True) + self.paddle_graph.add_layer( + 'paddle.clip', + inputs={ + "x": val_x.name, + "min": min_ipt.name, + "max": max_ipt.name + }, + outputs=[node.name]) + else: + raise Exception("max_value or min_value can't be None") + + @print_mapping_info + def ReduceSum(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + if len(node.inputs) == 1: + keepdims = node.get_attr('keepdims') + if keepdims is None: + keepdims = True + axes_value = node.get_attr('axes') + layer_attrs = {'axis': axes_value, 'keepdim': keepdims} + self.paddle_graph.add_layer( + 'paddle.sum', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + else: + axes = self.graph.get_input_node(node, idx=1, copy=True) + axes_value = _const_weight_or_none(axes) + if axes_value.shape == (1, ): + axes_value = axes_value[0] + keepdims = node.get_attr('keepdims') + if keepdims is None: + layer_attrs = {'axis': axes_value} + else: + layer_attrs = {'axis': axes_value, 'keepdim': keepdims} + + self.paddle_graph.add_layer( + 'paddle.sum', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def Max(self, node): + if len(node.inputs) == 2: + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.maximum", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + else: + val_x = self.graph.get_input_node(node, idx=0, copy=True) + temp_name = "max_" + for i in range(1, len(node.inputs)): + val_y = self.graph.get_input_node(node, idx=i, copy=True) + temp_name = temp_name + str(i) + if i == len(node.inputs) - 1: + self.paddle_graph.add_layer( + "paddle.maximum", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + else: + self.paddle_graph.add_layer( + "paddle.maximum", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[temp_name]) + val_x.name = temp_name + + @print_mapping_info + def Min(self, node): + if len(node.inputs) == 2: + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.minimum", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + else: + val_x = self.graph.get_input_node(node, idx=0, copy=True) + temp_name = "min_" + for i in range(1, len(node.inputs)): + val_y = self.graph.get_input_node(node, idx=i, copy=True) + temp_name = temp_name + str(i) + if i == len(node.inputs) - 1: + self.paddle_graph.add_layer( + "paddle.minimum", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + else: + self.paddle_graph.add_layer( + "paddle.minimum", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[temp_name]) + val_x.name = temp_name + + @print_mapping_info + def GreaterOrEqual(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.greater_equal", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + + @print_mapping_info + def And(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.logical_and", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + + @print_mapping_info + def Split(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + paddle_op = 'split' + split = node.get_attr('split') + axis = node.get_attr('axis', 0) + if split is None: + split_num = len(node.layer.output) + try: + #split is an input of this node + split_node = self.graph.get_input_node(node, idx=1, copy=True) + split_value = _const_weight_or_none(split_node) + layer_attrs = { + 'num_or_sections': split_value.tolist(), + 'axis': axis, + } + except: + layer_attrs = { + 'num_or_sections': split_num, + 'axis': axis, + } + outputs_list = list() + for i in range(len(node.layer.output)): + if hasattr(node, 'index'): + outputs_list.append("{}_p{}".format(node.layer_name, i)) + else: + outputs_list.append("{}".format(node.layer_name)) + if split_num > 1: + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_x.name}, + outputs=outputs_list, + **layer_attrs) + else: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=outputs_list, + dtype=string(val_x.dtype)) + + else: + layer_attrs = { + 'num_or_sections': split, + 'axis': axis, + } + outputs_list = list() + if isinstance(split, list) or isinstance(split, tuple): + if len(split) == 1: + outputs_list.append(node.name) + else: + for i in range(len(split)): + outputs_list.append("{}_p{}".format(node.layer_name, i)) + else: + outputs_list.append(node.name) + if len(split) > 1: + self.paddle_graph.add_layer( + 'paddle.split', + inputs={"x": val_x.name}, + outputs=outputs_list, + **layer_attrs) + else: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=outputs_list, + dtype=string(val_x.dtype)) + + @print_mapping_info + def Reshape(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_shape = self.graph.get_input_node(node, idx=1, copy=True) + val_reshaped = self.graph.get_node(node.layer.output[0], copy=True) + shape_value = _const_weight_or_none(val_shape) + shape_dims = len(val_shape.out_shapes[0]) + + if shape_value is not None: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': val_x.name}, + outputs=[node.name], + shape=shape_value.tolist()) + elif len(node.out_shapes[0]) > 0 and _is_static_shape(node.out_shapes[ + 0]): + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': val_x.name}, + outputs=[node.name], + shape=node.out_shapes[0]) + else: + # shape may be [], come form Gather by scalar indices + if len(val_shape.out_shapes[0]) > 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': val_shape.name}, + outputs=[val_shape.name], + shape=val_shape.out_shapes[0]) + if val_shape.dtype != "int32": + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': val_shape.name}, + outputs=[val_shape.name], + dtype=string("int32")) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': val_x.name, + 'shape': val_shape.name}, + outputs=[node.name]) + + @print_mapping_info + def Cast(self, node): + val_input = self.graph.get_input_node(node, idx=0, copy=True) + val_output = self.graph.get_node(node.layer.output[0], copy=True) + + dtype = node.get_attr('to') + if not isinstance(dtype, np.dtype): + dtype = TENSOR_TYPE_TO_NP_TYPE[dtype] + + output_dtype = val_output.dtype + if output_dtype: + assert dtype == output_dtype, 'dtype of to unmatches output' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={'x': val_input.name}, + outputs=[node.name], + dtype=string(dtype)) + + @print_mapping_info + def Not(self, node): + val_input = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + 'paddle.logical_not', + inputs={'x': val_input.name}, + outputs=[node.name]) + + @print_mapping_info + def AveragePool(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + + auto_pad = node.get_attr('auto_pad', 'NOTSET') + kernel_shape = node.get_attr("kernel_shape") + poolnd = len(kernel_shape) + strides = node.get_attr("strides") + pad_mode = node.get_attr("pads") + ceil_mode = bool(node.get_attr('ceil_mode', 0)) + pads = node.get_attr('pads', [0] * (poolnd * 2)) + + paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) + + if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": + input_shape = val_x.out_shapes[0] + pad_h = _get_same_padding(input_shape[2], kernel_shape[0], + strides[0], auto_pad) + pad_w = _get_same_padding(input_shape[3], kernel_shape[1], + strides[1], auto_pad) + paddings = pad_h + pad_w + + op_name = name_generator("pool", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + paddle_op = 'paddle.nn.AvgPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + layer_attrs = { + "kernel_size": kernel_shape, + "stride": strides, + "padding": paddings, + "ceil_mode": ceil_mode, + "exclusive": 'True', + } + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x if isinstance(val_x, str) else val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def Concat(self, node): + inputs_list = [] + dtypes = set() + for i in range(len(node.layer.input)): + ipt = self.graph.get_input_node(node, idx=i, copy=True) + inputs_list.append(ipt.name) + dtypes.add(ipt.dtype) + if len(dtypes) > 1: + assert 'Unspported situation happened, please create issue on https://github.com/PaddlePaddle/X2Paddle/issues.' + axis = node.get_attr('axis') + self.paddle_graph.add_layer( + 'paddle.concat', + inputs={"x": inputs_list}, + outputs=[node.name], + axis=axis) + + @print_mapping_info + def Flatten(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + output_shape = val_x.out_shapes[0] + axis = node.get_attr('axis', 1) + if axis == 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=[1, -1]) + else: + if len(output_shape) != 0: + shape_list = [1, 1] + for s in output_shape[:axis]: + shape_list[0] *= s + for s in output_shape[axis:]: + shape_list[1] *= s + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=shape_list) + else: + # flatten + reshape + self.paddle_graph.add_layer( + "paddle.flatten", + inputs={"input": val_x.name}, + outputs=[val_x.name + "_flatten"], + start_axis=[0], + stop_axis=[axis]) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={'x': val_x.name + "_flatten"}, + outputs=[node.name], + shape=[0, -1]) + + @print_mapping_info + def Gemm(self, node): + val_a = self.graph.get_input_node(node, idx=0, copy=True) + val_b = self.graph.get_input_node(node, idx=1, copy=True) + + alpha = node.get_attr('alpha', 1.) # optional + beta = node.get_attr('beta', 1.) # optional + trans_a = bool(node.get_attr('transA', 0)) # optional + trans_b = bool(node.get_attr('transB', 0)) # optional + val_mm = node.name + '_mm' + matmul_inputs = {"x": val_a.name, "y": val_b.name} + attr_matmul = { + "transpose_x": trans_a, + "transpose_y": trans_b, + } + self.paddle_graph.add_layer( + 'paddle.matmul', + inputs=matmul_inputs, + outputs=[val_mm], + **attr_matmul) + if beta != 0: + self.paddle_graph.add_layer( + "paddle.scale", + inputs={"x": val_mm}, + outputs=[val_mm], + scale=alpha) + else: + self.paddle_graph.add_layer( + "paddle.scale", inputs={"x": val_mm}, outputs=[node.name]) + + if beta != 0: + # when beta is equal to 0, there is no val_c + val_c = self.graph.get_input_node(node, idx=2, copy=True) + if beta == 1.: + add_inputs = {"x": val_mm, "y": val_c.name} + self.paddle_graph.add_layer( + "paddle.add", inputs=add_inputs, outputs=[node.name]) + else: + var_beta = node.name + '_beta' + self.paddle_graph.add_layer( + "paddle.scale", + inputs={"x": val_c.name}, + outputs=[var_beta], + scale=beta) + add_inputs = {"x": val_mm, "y": var_beta} + self.paddle_graph.add_layer( + "paddle.add", inputs=add_inputs, outputs=[node.name]) + + @print_mapping_info + def Sum(self, node): + val_inps = node.layer.input + inputs_dict = { + "x": self.graph.get_input_node( + node, idx=0, copy=True).name, + "y": self.graph.get_input_node( + node, idx=1, copy=True).name, + } + self.paddle_graph.add_layer( + "paddle.add", inputs=inputs_dict, outputs=[node.name]) + + for idx, ipt in enumerate(val_inps[2:]): + y = self.graph.get_input_node(node, idx=idx, copy=True) + inputs_dict = { + "x": node.name, + "y": y.name, + } + self.paddle_graph.add_layer( + "paddle.add", inputs=inputs_dict, outputs=[node.name]) + + @print_mapping_info + def MatMul(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + x_shape = val_x.out_shapes[0] + y_shape = val_y.out_shapes[0] + inputs_dict = {"x": val_x.name, "y": val_y.name} + if len(y_shape) != 0 and y_shape[0] == 1 and len( + x_shape) != 0 and x_shape[-1] != 1 and x_shape[0] != 1: + y_squeeze = val_y.name + '_squeeze' + self.paddle_graph.add_layer( + "paddle.squeeze", + inputs={"x": val_y.name}, + outputs=[y_squeeze], + axis=[0]) + inputs_dict['y'] = y_squeeze + self.paddle_graph.add_layer( + "paddle.matmul", inputs=inputs_dict, outputs=[node.name]) + else: + self.paddle_graph.add_layer( + "paddle.matmul", inputs=inputs_dict, outputs=[node.name]) + + @print_mapping_info + def BatchNormalization(self, node): + op_name = name_generator("batchnorm", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_scale = self.graph.get_input_node(node, idx=1, copy=True) + val_b = self.graph.get_input_node(node, idx=2, copy=True) + val_mean = self.graph.get_input_node(node, idx=3, copy=True) + val_var = self.graph.get_input_node(node, idx=4, copy=True) + + momentum = node.get_attr('momentum', .9) + epsilon = node.get_attr('epsilon', 1e-5) + c = val_x.out_shapes[0][1] + + # solved the same data is used as an argument to multiple OPs. + _rename_or_remove_weight( + self.weights, + val_scale.name, + op_name + '.weight', + rename_mapper=self.rename_mapper) + _rename_or_remove_weight( + self.weights, + val_b.name, + op_name + '.bias', + rename_mapper=self.rename_mapper) + _rename_or_remove_weight( + self.weights, + val_var.name, + op_name + '._variance', + rename_mapper=self.rename_mapper) + _rename_or_remove_weight( + self.weights, + val_mean.name, + op_name + '._mean', + rename_mapper=self.rename_mapper) + + # Attribute: spatial is used in BatchNormalization-1,6,7 + spatial = bool(node.get_attr('spatial')) + layer_attrs = { + "num_channels": c, + "momentum": momentum, + "epsilon": epsilon, + "is_test": True, + "use_global_stats": False, + } + self.paddle_graph.add_layer( + "paddle.nn.BatchNorm", + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def Transpose(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + s_len = len(val_x.out_shapes[0]) + perm_default = list(range(s_len)) + perm_default.reverse() + perm = node.get_attr('perm', perm_default) + self.paddle_graph.add_layer( + "paddle.transpose", + inputs={"x": val_x.name}, + outputs=[node.name], + perm=perm) + + @print_mapping_info + def PRelu(self, node): + op_name = name_generator("prelu", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_slope = self.graph.get_input_node(node, idx=1, copy=True) + + mode = 'channel' + shape_slope = val_slope.out_shapes[0] + if shape_slope == [1] * len(shape_slope): + mode = 'all' + + if mode == "element": + self.paddle_graph.add_layer( + "paddle.zeros", + inputs={}, + outputs=[output_name + "__zeros"], + shape=shape_slope, + dtype=string(node.dtype)) + self.paddle_graph.add_layer( + "paddle.maximum", + inputs={"x": val_x.name, + "y": output_name + "__zeros"}, + outputs=[output_name + "__max"]) + self.paddle_graph.add_layer( + "paddle.minimum", + inputs={"x": val_x.name, + "y": output_name + "__zeros"}, + outputs=[output_name + "__min"]) + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={"x": val_slope.name, + "y": output_name + "__min"}, + outputs=[output_name + "__mul"]) + self.paddle_graph.add_layer( + "paddle.add", + inputs={ + "x": output_name + "__max", + "y": output_name + "__mul" + }, + outputs=[output_name]) + else: + if mode == 'channel': + slope_data = _const_weight_or_none(val_slope) + if slope_data is None: + self.paddle_graph.add_layer( + "paddle.reshape", + inputs={"x": val_slope.name}, + outputs=[val_slope.name], + shape=[shape_slope[0]]) + self.paddle_graph.add_layer( + "paddle.nn.functional.prelu", + inputs={"x": val_x.name, + "weight": val_slope.name}, + outputs=[node.name]) + return + _rename_or_remove_weight(self.weights, val_slope.name) + if len(shape_slope) > 1: + self.weights[op_name + '._weight'] = np.reshape( + slope_data, shape_slope[0]) + num_parameters = val_x.out_shapes[0][1] + else: + num_parameters = 1 + slope_data = self.weights[val_slope.name] + _rename_or_remove_weight(self.weights, val_slope.name) + self.weights[op_name + '._weight'] = np.reshape(slope_data, [1]) + self.paddle_graph.add_layer( + "paddle.nn.PReLU", + inputs={"x": val_x.name}, + outputs=layer_outputs, + num_parameters=num_parameters) + + @print_mapping_info + def Squeeze(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + if axes is None: + axes_node = self.graph.get_input_node(node, idx=1, copy=True) + axes = _const_weight_or_none(axes_node, necessary=True) + # deal with scalar(0D) tensor + if len(val_x.out_shapes[0]) <= 1 and len(axes) == 1 and axes[0] == 0: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=[node.name], + dtype=string(val_x.dtype)) + else: + self.paddle_graph.add_layer( + "paddle.squeeze", + inputs={"x": val_x.name}, + outputs=[node.name], + axis=axes) + + @print_mapping_info + def Equal(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.equal", + inputs={'x': val_x.name, + 'y': val_y.name}, + outputs=[node.name]) + + @print_mapping_info + def Greater(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.greater_than", + inputs={'x': val_x.name, + 'y': val_y.name}, + outputs=[node.name]) + + @print_mapping_info + def Where(self, node): + condition = self.graph.get_input_node(node, idx=0, copy=True) + val_x = self.graph.get_input_node(node, idx=1, copy=True) + val_y = self.graph.get_input_node(node, idx=2, copy=True) + + self.paddle_graph.add_layer( + "paddle.where", + inputs={ + 'condition': condition.name, + 'x': val_x.name, + 'y': val_y.name + }, + outputs=[node.name]) + + @print_mapping_info + def NonZero(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.nonzero", + inputs={"x": val_x.name}, + outputs=[val_x.name], + as_tuple=True) + self.paddle_graph.add_layer( + "paddle.concat", inputs={"x": val_x.name}, outputs=[node.name]) + + @print_mapping_info + def Identity(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.assign", inputs={"x": val_x.name}, outputs=[node.name]) + + @print_mapping_info + def Tile(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_repeats = self.graph.get_input_node(node, idx=1, copy=True) + repeats = _const_weight_or_none(val_repeats) + + if repeats is None: + repeats = val_repeats.name + if val_repeats.dtype != 'int32': + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": repeats}, + outputs=["{}_tmp".format(repeats)], + dtype=string("int32")) + repeats = "{}_tmp".format(repeats) + + elif isinstance(repeats, int): + repeats = [repeats] + + elif type(repeats) is np.ndarray: + repeats = repeats.tolist() + + attr = { + 'expand_times': repeats, + "name": string(node.name), + } + self.paddle_graph.add_layer( + "paddle.tile", + inputs={"x": val_x.name}, + outputs=[node.name], + repeat_times=repeats) + + @print_mapping_info + def MaxPool(self, node): + op_name = name_generator("pool", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + auto_pad = node.get_attr('auto_pad', 'NOTSET') + assert node.get_attr( + "dilations") is None, 'only dilations = 0 is supported' # optional + + kernel_shape = node.get_attr("kernel_shape") + poolnd = len(kernel_shape) + strides = node.get_attr("strides") + pad_mode = node.get_attr("pads") + ceil_mode = bool(node.get_attr('ceil_mode', 0)) # optional + pads = node.get_attr('pads', [0] * (poolnd * 2)) # optional + paddle_op = 'paddle.nn.MaxPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + + paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) + + if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": + input_shape = val_x.out_shapes[0] + pad_h = _get_same_padding(input_shape[2], kernel_shape[0], + strides[0], auto_pad) + pad_w = _get_same_padding(input_shape[3], kernel_shape[1], + strides[1], auto_pad) + paddings = pad_h + pad_w + + layer_attrs = { + "kernel_size": kernel_shape, + "stride": strides, + "padding": paddings, + "ceil_mode": ceil_mode, + } + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x if isinstance(val_x, str) else val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def GlobalMaxPool(self, node): + op_name = name_generator("pool", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + input_shape = val_x.out_shapes[0] + if len(input_shape) == 4: + poolnd = 2 + elif len(input_shape) == 5: + poolnd = 3 + elif len(input_shape) == 3: + poolnd = 1 + paddle_op = 'paddle.nn.AdaptiveMaxPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + output_shape = node.out_shapes[0] + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x.name}, + outputs=layer_outputs, + output_size=output_shape[2:]) + + @print_mapping_info + def Neg(self, node): + import paddle + val_x = self.graph.get_input_node(node, idx=0, copy=True) + v0, v1, v2 = paddle.__version__.split('.') + if int(v0) >= 2 and int(v1) >= 2: + self.paddle_graph.add_layer( + "paddle.neg", inputs={'x': val_x.name}, outputs=[node.name]) + else: + val_y = node.name + "_y" + dtype = np.dtype(val_x.dtype) + self.paddle_graph.add_layer( + "paddle.full", + inputs={}, + outputs=[val_y], + dtype=string(dtype), + shape=[1], + fill_value=-1) + self.paddle_graph.add_layer( + "paddle.multiply", + inputs={'x': val_x.name, + 'y': val_y}, + outputs=[node.name]) + + @print_mapping_info + def SpaceToDepth(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + blocksize = node.get_attr('blocksize') + val_x_shape = val_x.out_shapes[0] + b, c, h, w = val_x_shape + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=[b, c, h // blocksize, blocksize, w // blocksize, blocksize]) + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": node.name}, + outputs=[node.name], + perm=[0, 3, 5, 1, 2, 4]) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": node.name}, + outputs=[node.name], + shape=[b, c * (blocksize**2), h // blocksize, w // blocksize]) + + @print_mapping_info + def GatherElements(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + indices = self.graph.get_input_node(node, idx=1, copy=True) + axis = node.get_attr('axis') + val_x_shape = val_x.out_shapes[0] + indices_shape = indices.out_shapes[0] + axis = axis if axis >= 0 else axis + len(val_x_shape) + if axis == 0: + axis_perm = [i for i in range(len(val_x_shape))] + data_swaped = val_x.name + index_swaped = indices.name + else: + axis_perm = [i for i in range(len(val_x_shape))] + axis_perm[axis] = 0 + axis_perm[0] = axis + data_swaped = val_x.name + "_transpose" + self.paddle_graph.add_layer( + "paddle.transpose", + inputs={'x': val_x.name}, + perm=axis_perm, + outputs=[data_swaped]) + index_swaped = indices.name + "_transpose" + self.paddle_graph.add_layer( + "paddle.transpose", + inputs={'x': indices.name}, + perm=axis_perm, + outputs=[index_swaped]) + temp = indices_shape[0] + indices_shape[0] = indices_shape[axis] + indices_shape[axis] = temp + + idx_tensors_per_axis_pre = [ + indices_shape[i] for i in range(len(indices_shape)) + ] + name_list = list() + for i in range(len(idx_tensors_per_axis_pre)): + tensor_name = val_x.name + "_meshgrid_" + str(i) + self.paddle_graph.add_layer( + kernel="paddle.linspace", + inputs={}, + outputs=[tensor_name], + start=0, + stop=idx_tensors_per_axis_pre[i] - 1, + num=idx_tensors_per_axis_pre[i]) + name_list.append(tensor_name) + + self.paddle_graph.add_layer( + "paddle.meshgrid", inputs=name_list, outputs=name_list) + + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": index_swaped}, + outputs=[index_swaped], + dtype=string("float32")) + import copy + copy_name_list = copy.copy(name_list) + copy_name_list[0] = index_swaped + new_name_list = list() + for i in range(len(copy_name_list)): + unsqueeze_name = copy_name_list[i] + "_unsqueeze" + self.paddle_graph.add_layer( + "paddle.unsqueeze", + inputs={"x": copy_name_list[i]}, + axis=-1, + outputs=[unsqueeze_name]) + new_name_list.append(unsqueeze_name) + concat_name = val_x.name + "_concated_layer" + self.paddle_graph.add_layer( + "paddle.concat", + inputs={'x': new_name_list}, + axis=-1, + outputs=[concat_name]) + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": concat_name}, + outputs=[concat_name], + dtype=string("int32")) + gather_nd_name = "gather_nd_layer" + self.paddle_graph.add_layer( + "paddle.gather_nd", + inputs={'x': data_swaped, + "index": concat_name}, + outputs=[gather_nd_name]) + + self.paddle_graph.add_layer( + "paddle.transpose", + inputs={'x': gather_nd_name}, + perm=axis_perm, + outputs=[node.name]) + + @print_mapping_info + def GlobalAveragePool(self, node): + op_name = name_generator("pool", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + input_shape = val_x.out_shapes[0] + if len(input_shape) == 4: + poolnd = 2 + elif len(input_shape) == 5: + poolnd = 3 + elif len(input_shape) == 3: + poolnd = 1 + paddle_op = 'paddle.nn.AdaptiveAvgPool{}D'.format(poolnd) + assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' + output_shape = node.out_shapes[0] + self.paddle_graph.add_layer( + paddle_op, + inputs={'x': val_x.name}, + outputs=layer_outputs, + output_size=output_shape[2:]) + + @print_mapping_info + def Conv(self, node): + output_name = node.name + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_w = self.graph.get_input_node(node, idx=1, copy=True) + + if val_w.name in self.weights.keys(): + op_name = name_generator("conv", self.nn_name2id) + else: + op_name = output_name + + layer_outputs = [op_name, output_name] + + has_bias = len(node.layer.input) == 3 + if has_bias: + val_b = self.graph.get_input_node(node, idx=2, copy=True) + auto_pad = node.get_attr('auto_pad', 'NOTSET') + + kernel_shape = node.get_attr('kernel_shape') + convnd = len(kernel_shape) + assert 2 <= convnd <= 3, 'only Conv2D and Conv3D is supported' + num_out_channels = val_w.out_shapes[0][0] + num_in_channels = val_w.out_shapes[0][1] + paddle_op = 'paddle.nn.Conv{}D'.format(convnd) + + num_groups = node.get_attr('group', 1) + strides = node.get_attr('strides', [1] * convnd) + dilations = node.get_attr('dilations', [1] * convnd) + pads = node.get_attr('pads', [0] * (convnd * 2)) + + input_shape = val_x.out_shapes[0] + paddings = np.array(pads).reshape((2, -1)).transpose().astype("int32") + paddings = paddings.flatten().tolist() + + if auto_pad in ["SAME_UPPER", "SAME_LOWER"]: + # Warning: SAME_UPPER and SAME_LOWER does not yet support dynamic shapes + if input_shape[2] == -1 or input_shape[3] == -1: + _logger.warning( + 'SAME_UPPER and SAME_LOWER does not yet support dynamic shapes, the conversion result may have a diff!!!' + ) + pad_h = _get_same_padding(input_shape[2], kernel_shape[0], + strides[0], auto_pad) + pad_w = _get_same_padding(input_shape[3], kernel_shape[1], + strides[1], auto_pad) + paddings = pad_h + pad_w + + layer_inputs = {'x': val_x if isinstance(val_x, str) else val_x.name} + if val_w.name not in self.weights.keys(): + layer_attrs = { + "stride": strides, + "padding": paddings, + "dilation": dilations, + "groups": num_groups, + } + layer_inputs['weight'] = val_w.name + if has_bias: + layer_inputs['bias'] = val_b.name + + paddle_op = 'paddle.nn.functional.conv{}d'.format(convnd) + self.paddle_graph.add_layer( + paddle_op, + inputs=layer_inputs, + outputs=[node.name], + **layer_attrs) + return + + layer_attrs = { + "in_channels": num_in_channels * num_groups, + "out_channels": num_out_channels, + "kernel_size": kernel_shape, + "stride": strides, + "padding": paddings, + "dilation": dilations, + "groups": num_groups, + } + remove_weight = True if val_w.name in self.done_weight_list else False + if remove_weight: + self.done_weight_list.append(val_w.name) + _rename_or_remove_weight( + self.weights, + val_w.name, + op_name + '.weight', + remove_weight, + rename_mapper=self.rename_mapper) + if has_bias: + remove_bias = True if val_b.name in self.done_weight_list else False + if remove_bias: + self.done_weight_list.append(val_b.name) + _rename_or_remove_weight( + self.weights, + val_b.name, + op_name + '.bias', + remove_bias, + rename_mapper=self.rename_mapper) + else: + layer_attrs["bias_attr"] = False + if reduce(lambda x, y: x * y, + input_shape) in [1, -1] and 1 not in input_shape: + input_shape[1] = num_in_channels * num_groups + input_shape[0] = 0 + input_shape[2] = 0 + self.paddle_graph.add_layer( + "paddle.reshape", + inputs=layer_inputs, + outputs=[layer_inputs["x"]], + shape=input_shape) + self.paddle_graph.add_layer( + paddle_op, + inputs=layer_inputs, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def ConvTranspose(self, node): + output_name = node.name + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_w = self.graph.get_input_node(node, idx=1, copy=True) + + if val_w.name in self.weights.keys(): + op_name = name_generator("conv_trans", self.nn_name2id) + else: + op_name = output_name + + layer_outputs = [op_name, output_name] + + val_b = None + if len(node.layer.input) > 2: + val_b = self.graph.get_input_node(node, idx=2, copy=True) + auto_pad = node.get_attr('auto_pad', 'NOTSET') + out_padding = node.get_attr('output_padding', [0, 0]) + kernel_shape = node.get_attr('kernel_shape') + assert kernel_shape, 'kernel_shape not inferred' + convnd = len(kernel_shape) + assert 2 <= convnd <= 3, 'only Conv2DTranspose and Conv3DTranspose supported' + num_in_channels = val_w.out_shapes[0][0] + num_out_channels = val_w.out_shapes[0][1] + paddle_op = 'paddle.nn.Conv{}DTranspose'.format(convnd) + + num_groups = node.get_attr('group', 1) + strides = node.get_attr('strides', [1] * convnd) + dilations = node.get_attr('dilations', [1] * convnd) + output_size = node.get_attr('output_shape', []) + pads = node.get_attr('pads', [0] * (convnd * 2)) + + paddings = np.array(pads).reshape((2, -1)).transpose().astype("int32") + paddings = paddings.flatten().tolist() + + if len(output_size) != 0: + paddings = [0] * 4 + total_paddings = list() + total_paddings.append((val_x.out_shapes[0][2] - 1) * strides[ + 0] + dilations[0] * (kernel_shape[0] - 1) + 1 + out_padding[0] - + output_size[0]) + total_paddings.append((val_x.out_shapes[0][3] - 1) * strides[ + 1] + dilations[1] * (kernel_shape[1] - 1) + 1 + out_padding[1] - + output_size[1]) + if auto_pad == "SAME_UPPER": + for i in range(len(total_paddings)): + paddings[2 * i] = total_paddings[0] - total_paddings[0] // 2 + paddings[2 * i + 1] = total_paddings[0] // 2 + else: + for i in range(len(total_paddings)): + paddings[2 * i] = total_paddings[0] // 2 + paddings[2 * i + 1] = total_paddings[0] - total_paddings[ + 0] // 2 + else: + output_size = [0, 0] + + output_size[0] = ( + val_x.out_shapes[0][2] - 1 + ) * strides[0] - 2 * paddings[0] + dilations[0] * ( + kernel_shape[0] - 1) + 1 + out_padding[0] + output_size[1] = ( + val_x.out_shapes[0][3] - 1 + ) * strides[1] - 2 * paddings[1] + dilations[1] * ( + kernel_shape[1] - 1) + 1 + out_padding[1] + + # Conv2DTranspose缺少output_size,只能在forward里头传进output_size + inputs_dict = {'x': val_x if isinstance(val_x, str) else val_x.name} + if val_w.name not in self.weights.keys(): + layer_attrs = { + "stride": strides, + "dilation": dilations, + "padding": paddings, + "groups": num_groups, + "output_padding": out_padding + } + paddle_op = 'paddle.nn.functional.conv{}d_transpose'.format(convnd) + + inputs_dict['weight'] = val_w.name + if len(node.layer.input) > 2: + inputs_dict['bias'] = val_b.name + + self.paddle_graph.add_layer( + paddle_op, + inputs=inputs_dict, + outputs=[node.name], + **layer_attrs) + return + + layer_attrs = { + "in_channels": num_in_channels, + "out_channels": num_out_channels * num_groups, + "kernel_size": kernel_shape, + "stride": strides, + "dilation": dilations, + "padding": paddings, + "groups": num_groups, + "output_padding": out_padding + } + + _rename_or_remove_weight( + self.weights, + val_w.name, + op_name + '.weight', + rename_mapper=self.rename_mapper) + if val_b is not None: + _rename_or_remove_weight( + self.weights, + val_b.name, + op_name + '.bias', + rename_mapper=self.rename_mapper) + else: + layer_attrs["bias_attr"] = False + self.paddle_graph.add_layer( + kernel=paddle_op, + inputs=inputs_dict, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def ArgMax(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axis = node.get_attr('axis') + keepdims = False if node.get_attr('keepdims') == 0 else True + layer_attrs = {'axis': axis, 'keepdim': keepdims} + self.paddle_graph.add_layer( + 'paddle.argmax', + inputs={"x": val_x.name}, + outputs=[node.name], + **layer_attrs) + + @print_mapping_info + def Size(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.shape", inputs={"input": val_x.name}, outputs=[node.name]) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": node.name}, + outputs=[node.name], + dtype=string('int64')) + self.paddle_graph.add_layer( + "paddle.prod", inputs={"x": node.name}, outputs=[node.name]) + + @print_mapping_info + def Sign(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_x.name}, + outputs=[val_x.name], + dtype=string("float32")) + self.paddle_graph.add_layer( + "paddle.sign", inputs={"x": val_x.name}, outputs=[node.name]) + if node.dtype not in ["float16", "float32", "float64"]: + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": node.name}, + outputs=[node.name], + dtype=string(node.dtype)) + + @print_mapping_info + def OneHot(self, node): + nn_op_name = name_generator("onehot", self.nn_name2id) + output_name = node.name + layer_outputs = [nn_op_name, output_name] + indices = self.graph.get_input_node(node, idx=0, copy=True) + depth = self.graph.get_input_node(node, idx=1, copy=True) + values = self.graph.get_input_node(node, idx=2, copy=True) + axis = node.get_attr('axis', -1) + self.paddle_graph.add_layer( + "custom_layer:OneHot", + inputs={ + "indices": indices.name, + "depth": depth.name, + "values": values.name + }, + outputs=layer_outputs, + axis=axis) + + @print_mapping_info + def Reciprocal(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + self.paddle_graph.add_layer( + "paddle.reciprocal", inputs={"x": val_x.name}, outputs=[node.name]) + + @print_mapping_info + def LSTM(self, node): + x = self.graph.get_input_node(node, idx=0, copy=True) + input_weight = self.graph.get_input_node(node, idx=1, copy=True) + hidden_weight = self.graph.get_input_node(node, idx=2, copy=True) + + input_nums = len(node.layer.input) + exist_input_nums = 3 + have_bias = False + if input_nums > 3 and node.layer.input[3] != '': + bias = self.graph.get_input_node( + node, idx=exist_input_nums, copy=True) + have_bias = True + exist_input_nums += 1 + if input_nums > 4 and node.layer.input[4] != '': + sequence_lens = self.graph.get_input_node( + node, idx=exist_input_nums, copy=True) + exist_input_nums += 1 + if input_nums > 5 and node.layer.input[5] != '': + init_h = self.graph.get_input_node( + node, idx=exist_input_nums, copy=True) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": init_h.name}, + outputs=[init_h.name], + shape=init_h.out_shapes[0]) + exist_input_nums += 1 + if input_nums > 6 and node.layer.input[6] != '': + init_c = self.graph.get_input_node( + node, idx=exist_input_nums, copy=True) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": init_c.name}, + outputs=[init_c.name], + shape=init_c.out_shapes[0]) + + input_weight_np = _const_weight_or_none(input_weight) + _rename_or_remove_weight(self.weights, input_weight.name) + hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1] / 4) + input_size = input_weight_np.shape[2] + hidden_weight_np = _const_weight_or_none(hidden_weight) + _rename_or_remove_weight(self.weights, hidden_weight.name) + bias_np = _const_weight_or_none(bias) + _rename_or_remove_weight(self.weights, bias.name) + input_bias_np = bias_np[:, :4 * hidden_size] + hidden_bias_np = bias_np[:, 4 * hidden_size:] + + # parameters order in paddle:lstm: + # 1. gate order in paddle is: input, forget, cell, output. + # 2. gate orfer in onnx is: input, output, forget, cell. + + def reform_weights(w, n, intervals): + slices = [w[:, x * n:y * n] for x, y in intervals] + return np.concatenate(slices, axis=1) + + def transform_weight_with_bias(weights, n, intervals): + return [reform_weights(w, n, intervals) for w in weights] + + reform_permutation = [(0, 1), (2, 4), (1, 2)] + + weights = transform_weight_with_bias( + [input_weight_np, hidden_weight_np, input_bias_np, hidden_bias_np], + hidden_size, reform_permutation) + + op_name = name_generator("lstm", self.nn_name2id) + y_out = node.output(0) + yh_out = node.output(1) + yc_out = node.output(2) + direction = node.get_attr('direction', 'forward') + + def generate_paddle_param_names(op_name, suffix=''): + param_names = [] + param_names.extend(['{}.weight_ih_l0{}', '{}.weight_hh_l0{}']) + if have_bias != False: param_names.append('{}.bias_ih_l0{}') + if have_bias != False: param_names.append('{}.bias_hh_l0{}') + param_names = [x.format(op_name, suffix) for x in param_names] + return param_names + + def assign_params(op_name, weights, weight_idx=0, suffix=''): + param_names = generate_paddle_param_names(op_name, suffix) + for param_name, weight in zip(param_names, weights): + self.weights[param_name] = weight[weight_idx] + + if direction == 'backward': + raise Exception( + "LSTM support 'forward' or 'bidirectional', except '{}'.". + format(direction)) + else: + assign_params(op_name, weights) + if direction == 'bidirectional': + assign_params(op_name, weights, 1, '_reverse') + + self.paddle_graph.add_layer( + 'paddle.nn.LSTM', + inputs={ + 'input': x.name, + 'initial_states': (init_h.name, init_c.name) + }, + outputs=[op_name, y_out, yh_out, yc_out], + input_size=input_size, + hidden_size=hidden_size, + num_layers=1, + direction=string(direction), + time_major=True) + + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": y_out}, + outputs=[y_out], + shape=[0, 0, -1, hidden_size]) + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": y_out}, + outputs=[y_out], + perm=[0, 2, 1, 3]) + + @print_mapping_info + def TopK(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_k = self.graph.get_input_node(node, idx=1, copy=True) + layer_attrs = dict() + layer_attrs["axis"] = node.get_attr('axis', -1) + layer_attrs["largest"] = True if node.get_attr('largest', + 1) == 1 else False + layer_attrs["sorted"] = True if node.get_attr('sorted', + 1) == 1 else False + k = _const_weight_or_none(val_k) + if isinstance(k, (list, tuple, np.ndarray)): + k = k[0] + # If k can get the value directly, it is used as an attribute; otherwise it is used as an input tensor + if k is not None: + layer_attrs["k"] = k + self.paddle_graph.add_layer( + "paddle.topk", + inputs={"x": val_x.name}, + outputs=[ + "{}_p{}".format(node.layer_name, 0), + "{}_p{}".format(node.layer_name, 1) + ], + **layer_attrs) + else: + if val_k.dtype != "int32": + self.paddle_graph.add_layer( + "paddle.cast", + inputs={"x": val_k.name}, + outputs=[val_k.name], + dtype=string('int32')) + self.paddle_graph.add_layer( + "paddle.topk", + inputs={"x": val_x.name, + "k": val_k.name}, + outputs=[ + "{}_p{}".format(node.layer_name, 0), + "{}_p{}".format(node.layer_name, 1) + ], + **layer_attrs) + + @print_mapping_info + def LRN(self, node): + op_name = name_generator("lrn", self.nn_name2id) + output_name = node.name + layer_outputs = [op_name, output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + alpha = node.get_attr('alpha', 0.0001) + beta = node.get_attr('beta', 0.75) + bias = node.get_attr('bias', 1.0) + size = node.get_attr('size') + layer_attrs = {'size': size, 'alpha': alpha, 'beta': beta, 'k': bias} + self.paddle_graph.add_layer( + "paddle.nn.LocalResponseNorm", + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def DepthToSpace(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + blocksize = node.get_attr('blocksize') + mode = node.get_attr('mode', "DCR") + val_x_shape = val_x.out_shapes[0] + b, c, h, w = val_x_shape + if mode == "DCR": + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=[b, blocksize, blocksize, c // (blocksize**2), h, w]) + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": node.name}, + outputs=[node.name], + perm=[0, 3, 4, 1, 5, 2]) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": node.name}, + outputs=[node.name], + shape=[b, c // (blocksize**2), h * blocksize, w * blocksize]) + else: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=[b, c // (blocksize**2), blocksize, blocksize, h, w]) + self.paddle_graph.add_layer( + 'paddle.transpose', + inputs={"x": node.name}, + outputs=[node.name], + perm=[0, 1, 4, 2, 5, 3]) + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": node.name}, + outputs=[node.name], + shape=[b, c // (blocksize**2), h * blocksize, w * blocksize]) + + @print_mapping_info + def NonMaxSuppression(self, node): + nn_op_name = name_generator("nms", self.nn_name2id) + output_name = node.name + layer_outputs = [nn_op_name, output_name] + boxes = self.graph.get_input_node(node, idx=0, copy=True) + scores = self.graph.get_input_node(node, idx=1, copy=True) + inputs_len = len(node.layer.input) + layer_attrs = dict() + layer_attrs["keep_top_k"] = -1 + layer_attrs["nms_threshold"] = 0.0 + layer_attrs["score_threshold"] = 0.0 + if inputs_len > 2: + max_output_boxes_per_class = self.graph.get_input_node( + node, idx=2, copy=True) + max_output_boxes_per_class = _const_weight_or_none( + max_output_boxes_per_class) + if len(scores.out_shapes[0]) != 0: + num_classes = scores.out_shapes[0][1] + else: + num_classes = 1 + if max_output_boxes_per_class is not None: + max_output_boxes_per_class = max_output_boxes_per_class.tolist() + if isinstance(max_output_boxes_per_class, int): + layer_attrs[ + "keep_top_k"] = max_output_boxes_per_class * num_classes + else: + layer_attrs["keep_top_k"] = max_output_boxes_per_class[ + 0] * num_classes + if inputs_len > 3: + iou_threshold = self.graph.get_input_node(node, idx=3, copy=True) + layer_attrs["nms_threshold"] = _const_weight_or_none( + iou_threshold).tolist()[0] + if inputs_len > 4: + score_threshold = self.graph.get_input_node(node, idx=4, copy=True) + layer_attrs["score_threshold"] = _const_weight_or_none( + score_threshold).tolist()[0] + self.paddle_graph.add_layer( + "custom_layer:NMS", + inputs={"bboxes": boxes.name, + "scores": scores.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def ReduceL1(self, node): + output_name = node.name + layer_outputs = [output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + keepdims = False if node.get_attr('keepdims') == 0 else True + layer_attrs = {'p': 1, 'axis': axes, 'keepdim': keepdims} + self.paddle_graph.add_layer( + "paddle.norm", + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def ReduceL2(self, node): + output_name = node.name + layer_outputs = [output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + keepdims = False if node.get_attr('keepdims') == 0 else True + layer_attrs = {'p': 2, 'axis': axes, 'keepdim': keepdims} + self.paddle_graph.add_layer( + "paddle.norm", + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) From d22c3eab52896ceaa19486848326970c0e2efd06 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:05:28 +0800 Subject: [PATCH 008/101] Update onnx_decoder.py --- x2paddle/decoder/onnx_decoder.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index 045530010..4d88592d1 100755 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -336,14 +336,7 @@ def build_connection(self, layer_name, node): break else: first_i = node.inputs.index(nd.name) - # deal with Multiple outputs correspond to one node - if self.node_map[nd.name].outputs.count( - layer_name) > 1: - new_child_name = "{}/{}".format(nd.name, - idx) - node.which_child[new_child_name] = idx - else: - node.which_child[nd.name] = idx + node.which_child[new_child_name] = idx self.node_map[nd.name].index = 0 break if flag == 1: From 3beef68d6b9be918714c5e9e3ef9c184de193076 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:06:29 +0800 Subject: [PATCH 009/101] Update opset_legacy.py --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index f1c483854..fd6235258 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -213,14 +213,7 @@ def directly_map(self, node, *args, **kwargs): attrs_name_map_dict = op_info[1] for onnx_attr_name, pd_attr_name in attrs_name_map_dict.items(): if onnx_attr_name in onnx_attrs: - # trans 1 to True, 0 to False - if onnx_attr_name == "keepdims": - if onnx_attrs[onnx_attr_name] == 1: - layer_attrs[pd_attr_name] = True - else: - layer_attrs[pd_attr_name] = False - else: - layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] + layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] else: layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name] if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op: From 43f1fa8a65336d9d4235629e48b913041377620b Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:08:53 +0800 Subject: [PATCH 010/101] Update opset_legacy.py --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index fd6235258..e5044e203 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -1662,7 +1662,8 @@ def Flatten(self, node): def Gemm(self, node): val_a = self.graph.get_input_node(node, idx=0, copy=True) val_b = self.graph.get_input_node(node, idx=1, copy=True) - + val_c = self.graph.get_input_node(node, idx=2, copy=True) + alpha = node.get_attr('alpha', 1.) # optional beta = node.get_attr('beta', 1.) # optional trans_a = bool(node.get_attr('transA', 0)) # optional @@ -1678,19 +1679,11 @@ def Gemm(self, node): inputs=matmul_inputs, outputs=[val_mm], **attr_matmul) - if beta != 0: - self.paddle_graph.add_layer( - "paddle.scale", - inputs={"x": val_mm}, - outputs=[val_mm], - scale=alpha) - else: - self.paddle_graph.add_layer( + + self.paddle_graph.add_layer( "paddle.scale", inputs={"x": val_mm}, outputs=[node.name]) if beta != 0: - # when beta is equal to 0, there is no val_c - val_c = self.graph.get_input_node(node, idx=2, copy=True) if beta == 1.: add_inputs = {"x": val_mm, "y": val_c.name} self.paddle_graph.add_layer( From ca1fb1ffb74a9ab99748f99d090ac4e245ad0f7b Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:11:27 +0800 Subject: [PATCH 011/101] Update onnx_decoder.py --- x2paddle/decoder/onnx_decoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index 4d88592d1..57e5cbe38 100755 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -336,7 +336,7 @@ def build_connection(self, layer_name, node): break else: first_i = node.inputs.index(nd.name) - node.which_child[new_child_name] = idx + node.which_child[nd.name] = idx self.node_map[nd.name].index = 0 break if flag == 1: From 1780f92fb774dab81d87011b9d2f99abcf31fb3a Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:12:32 +0800 Subject: [PATCH 012/101] Update opset_legacy.py --- .../op_mapper/onnx2paddle/opset_legacy.py | 37 ------------------- 1 file changed, 37 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index e5044e203..d6a484412 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -350,43 +350,6 @@ def _interpolate(self, node): # which is the same as the rank of input. attrs['scale_factor'] = self.weights[val_scales.name].tolist()[ 2:] - if len(val_x_shape) == 3: - val_scales = self.graph.get_input_node( - node, idx=2, copy=True) - val_scales_values = _const_weight_or_none(val_scales) - - attrs = { - "align_corners": False, - "mode": string(node.get_attr('mode', 'nearest')), - "scale_factor": - self.weights[val_scales.name].tolist()[1:] - } - mode = node.get_attr('mode', 'nearest') - if mode == "linear": - attrs["mode"] = string("bilinear") - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'pytorch_half_pixel': - attrs["align_corners"] = False - attrs["align_mode"] = 0 - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'align_corners': - attrs["align_corners"] = True - self.paddle_graph.add_layer( - 'paddle.unsqueeze', - inputs={"x": val_x.name}, - outputs=[val_x.name], - axis=0) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - self.paddle_graph.add_layer( - 'paddle.squeeze', - inputs={"x": node.name}, - outputs=[node.name], - axis=0) - return elif len(node.layer.input) == 4: # opset 11 val_sizes = self.graph.get_input_node(node, idx=3, copy=True) From 6327cea9682508e6cc00b9e915e7df82ea6a102e Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:15:02 +0800 Subject: [PATCH 013/101] Update opset_legacy.py --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index d6a484412..03948885d 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -1642,9 +1642,8 @@ def Gemm(self, node): inputs=matmul_inputs, outputs=[val_mm], **attr_matmul) - self.paddle_graph.add_layer( - "paddle.scale", inputs={"x": val_mm}, outputs=[node.name]) + "paddle.scale", inputs={"x": val_mm}, outputs=[val_mm],scale=alpha) if beta != 0: if beta == 1.: From 8f70001f41630777d4b7ee6d5fa631b17ae7be5d Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:16:35 +0800 Subject: [PATCH 014/101] Update opset_legacy.py --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index 03948885d..055749bc4 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -1643,7 +1643,7 @@ def Gemm(self, node): outputs=[val_mm], **attr_matmul) self.paddle_graph.add_layer( - "paddle.scale", inputs={"x": val_mm}, outputs=[val_mm],scale=alpha) + "paddle.scale", inputs={"x": val_mm}, outputs=[val_mm], scale=alpha) if beta != 0: if beta == 1.: From 0a16de117075a5dc6d3938656444fe10b6273456 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:21:51 +0800 Subject: [PATCH 015/101] Update test_auto_scan_one_input_ops_float32.py --- .../test_auto_scan_one_input_ops_float32.py | 2853 +---------------- 1 file changed, 62 insertions(+), 2791 deletions(-) diff --git a/tests/onnx/test_auto_scan_one_input_ops_float32.py b/tests/onnx/test_auto_scan_one_input_ops_float32.py index 2c5a8860d..97e1f697a 100644 --- a/tests/onnx/test_auto_scan_one_input_ops_float32.py +++ b/tests/onnx/test_auto_scan_one_input_ops_float32.py @@ -12,2803 +12,74 @@ # See the License for the specific language governing permissions and # limitations under the License. -from x2paddle.decoder.onnx_decoder import ONNXGraph, ONNXGraphNode, ONNXGraphDataNode -from x2paddle.core.graph import GraphNode -from x2paddle.core.util import * -from functools import reduce -import numpy as np +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st import onnx -import onnx.numpy_helper as numpy_helper -from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE -import logging as _logging -from collections import OrderedDict -import math -import os -import copy -import sys -import shutil - -_logger = _logging.getLogger() - - -def _const_weight_or_none(node, necessary=False): - if 'Constant' in node.layer_type: - return node.value - if isinstance(node, ONNXGraphDataNode): - return node.weight - if necessary: - assert '{} should be an initializer or Constant operator.'.format( - node.name) - return None - - -def _rename_or_remove_weight(weights, - origin_name, - target_name=None, - is_remove=True, - rename_mapper=None): - ''' - Rename parameters by Paddle's naming rule of parameters. - - Args: - weights(dict[String:np.ndarray]): Dict stored paramters, the key in weights is name of parameter. - origin_name(String): Name of parameter to rename or remove. - target_name(String, optional): if target_name is not None, add new key-value pair - {target_name:weights[origin_name]} to weights, and target_name must follow paddle's - naming rule of parameters. Default: None. - is_remove: if is_remove is True, remove origin key-value pair. Default: True. - rename_mapper: Solved the same data is used for multiple OPs, key is old_name, value is new_name. - Returns: - None - ''' - if rename_mapper is not None and origin_name in rename_mapper: - origin_name = rename_mapper[origin_name] - is_remove = False - if origin_name not in weights: - raise KeyError('{} not a key in {}'.format(origin_name, weights.keys())) - if is_remove: - # remove weight - data = weights.pop(origin_name) - else: - data = weights[origin_name] - if target_name is not None: - # rename weight - weights[target_name] = data - rename_mapper[origin_name] = target_name - - -def _is_static_shape(shape): - negtive_dims = 0 - error_dims = 0 - for dim in shape: - if dim < 0: - negtive_dims += 1 - if dim < -1: - error_dims += 1 - if negtive_dims > 1: - return False - if error_dims > 0: - return False - return True - - -def _get_same_padding(in_size, kernel_size, stride, autopad): - new_size = int(math.ceil(in_size * 1.0 / stride)) - pad_size = (new_size - 1) * stride + kernel_size - in_size - pad0 = int(pad_size / 2) - pad1 = pad_size - pad0 - if autopad == "SAME_UPPER": - return [pad0, pad1] - if autopad == "SAME_LOWER": - return [pad1, pad0] - - -def print_mapping_info(func): - def run_mapping(*args, **kwargs): - node = args[1] - try: - res = func(*args, **kwargs) - except: - raise Exception("convert failed node:{}, op_type is {}".format( - node.name[9:], node.layer_type)) - else: - return res - - return run_mapping - - -class OpSet(): - def __init__(self, decoder, paddle_graph): - super(OpSet, self).__init__() - self.graph = decoder.graph - self.paddle_graph = paddle_graph - self.inputs_info = dict() - self.weights = dict() - self.nn_name2id = dict() - self.done_weight_list = list() - # solve for same data is used as an argument to multiple OPs. - # PR link(wangjunjie06): https://github.com/PaddlePaddle/X2Paddle/pull/728 - self.rename_mapper = dict() - self.elementwise_ops = { - 'Add': 'paddle.add', - 'Div': 'paddle.divide', - 'Sub': 'paddle.subtract', - 'Mul': 'paddle.multiply', - 'Pow': 'paddle.pow', - 'Less': 'paddle.less_than', - 'LessOrEqual': 'paddle.less_equal', - } - - self.directly_map_ops = { - 'Ceil': ['paddle.ceil'], - # reduce function - 'ReduceMean': [ - 'paddle.mean', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdims=True) - ], - 'ReduceMin': [ - 'paddle.min', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdim=True) - ], - 'ReduceMax': [ - 'paddle.max', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdim=True) - ], - 'ReduceProd': [ - 'paddle.prod', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdim=True) - ], - # active function - 'Relu': ['paddle.nn.ReLU'], - 'LeakyRelu': [ - 'paddle.nn.LeakyReLU', dict(alpha='negative_slope'), - dict(negative_slope=.01) - ], - 'Elu': - ['paddle.nn.functional.elu', dict(alpha='alpha'), dict(alpha=1.)], - 'ThresholdedRelu': [ - 'paddle.nn.functional.thresholded_relu', - dict(alpha='threshold'), dict(alpha=1.) - ], - 'Tanh': ['paddle.nn.Tanh'], - 'Sigmoid': ['paddle.nn.Sigmoid'], - 'Softsign': ['paddle.nn.Softsign'], - 'Softplus': [ - 'paddle.nn.Softplus', dict(threshold='threshold'), - dict(threshold=float(sys.maxsize)) - ], - 'Exp': ['paddle.exp'], - 'Log': ['paddle.log'], - 'LogSoftmax': [ - 'paddle.nn.functional.log_softmax', dict(axis='axis'), - dict(axis=1) +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + +min_opset_version_map = { + "IsInf": 10, + "Elu": 7, + "IsNaN": 9, + "Log": 7, + "Cosh": 9, + "Cos": 7, + "Atan": 7, + "Asinh": 9, + "Asin": 7, + "Acosh": 9, + "Acos": 7, + "Exp": 7, + "Floor": 7 +} + + +class TestIsinfConcert(OPConvertAutoScanTest): + """ + ONNX op: elementwise ops + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + + config = { + "op_names": [ + "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", + "Cosh", "Exp", "Floor" ], - 'Softmax': ['paddle.nn.Softmax', dict(axis='axis'), dict(axis=1)], - 'Sqrt': ['paddle.sqrt'], - 'Floor': ['paddle.floor'], - 'Abs': ['paddle.abs'], - 'Erf': ['paddle.erf'], - 'Sin': ['paddle.sin'], - 'Cos': ['paddle.cos'], - 'Atan': ['paddle.atan'], - 'Acos': ['paddle.acos'], - 'Asin': ['paddle.asin'], - 'IsInf':['paddle.isinf'], - 'IsNaN':['paddle.isnan'], - 'Cosh': ['paddle.cosh'], - 'Acosh': ['paddle.acosh'], - 'Asinh': ['paddle.asinh'], - } - - @print_mapping_info - def directly_map(self, node, *args, **kwargs): - inputs = node.layer.input - assert len(inputs) == 1, 'directly_map error with multi inputs' - input = self.graph.get_input_node(node, idx=0, copy=True) - onnx_attrs = node.attr_map - if '' in onnx_attrs: - onnx_attrs.pop('') - if '_' in onnx_attrs: - onnx_attrs.pop('_') - op_info = self.directly_map_ops[node.layer_type] - paddle_op = op_info[0] - layer_attrs = dict() - if len(op_info) > 1: - attrs_name_map_dict = op_info[1] - for onnx_attr_name, pd_attr_name in attrs_name_map_dict.items(): - if onnx_attr_name in onnx_attrs: - # trans 1 to True, 0 to False - if onnx_attr_name == "keepdims": - if onnx_attrs[onnx_attr_name] == 1: - layer_attrs[pd_attr_name] = True - else: - layer_attrs[pd_attr_name] = False - else: - layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] - else: - layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name] - if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op: - op_name = paddle_op[10:].lower() - op_name = name_generator(op_name, self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - - self.paddle_graph.add_layer( - kernel=paddle_op, - inputs={"x": input.name}, - outputs=layer_outputs, - **layer_attrs) - else: - self.paddle_graph.add_layer( - kernel=paddle_op, - inputs={"x": input.name}, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def elementwise_map(self, node): - op_type = self.elementwise_ops[node.layer_type] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - inputs_dict = {'x': val_x.name, 'y': val_y.name} - self.paddle_graph.add_layer( - op_type, inputs=inputs_dict, outputs=[node.name]) - - @print_mapping_info - def place_holder(self, node): - shape = node.out_shapes[0] - for i, dim_shape in enumerate(shape): - if dim_shape == 0 and i == 0: - shape[i] = 1 - if dim_shape == 0 and i != 0: - assert 'shape of input is not assigned' - self.paddle_graph.add_layer( - kernel="paddle.to_tensor", - inputs={}, - outputs=[node.name], - data=node.name) - self.inputs_info[node.name] = [shape, node.dtype] - - @print_mapping_info - def create_parameter(self, node, parameter=None): - if parameter is not None: - node = parameter - dtype = node.dtype - shape = node.out_shapes[0] - - if hasattr(node.weight, "shape") and len(node.weight.shape) == 0: - if node.weight == float('inf') or node.weight == float('-inf'): - node.weight = string(node.weight) - self.paddle_graph.add_layer( - "paddle.full", - inputs={}, - outputs=[node.name], - dtype=string(dtype), - shape=[1], - fill_value=node.weight) - else: - self.weights[node.name] = node.weight - self.paddle_graph.add_layer( - "self.create_parameter", - inputs={}, - outputs=[node.name], - shape=shape, - attr=string(node.name), - dtype=string(dtype), - default_initializer="paddle.nn.initializer.Constant(value=0.0)") - - def _pad_if_asymmetric(self, node, pads, val_name): # pads: SSEE - assert len(pads) & 1 == 0 - symmetric = True - ndims = len(pads) // 2 - for idx_dim in range(ndims): - if pads[idx_dim] != pads[ndims + idx_dim]: - symmetric = False - break - if symmetric: - return pads[:ndims], val_name - val_padded = self.Pad(node, op_independent=False) - return [0] * ndims, val_padded - - def _interpolate(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - inputs = {'x': val_x.name} - attrs = dict() - val_x_shape = val_x.out_shapes[0] - if node.layer_type == 'Resize': - if len(node.layer.input) == 2: - # opset 10 - val_scales = self.graph.get_input_node(node, idx=1, copy=True) - # TODO(syf): paddle.nn.functional.interpolate will support the length - # which is the same as the rank of input. - scale_values = _const_weight_or_none(val_scales) - if scale_values is not None: - attrs['scale_factor'] = self.weights[ - val_scales.name].tolist()[2:] - else: - var_nc, var_hw = val_scales.name + '_nc', val_scales.name + '_hw' - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_scales.name}, - outputs=[var_nc, var_hw], - num_or_sections=[2, 2], - axis=0) - inputs['scale_factor'] = var_hw - mode = node.get_attr('mode', 'nearest') - attrs.update({ - "align_corners": False, - "mode": string(mode), - "align_mode": 1 - }) - if mode == "linear" and len(val_x_shape) == 4: - attrs["mode"] = string("bilinear") - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - return - elif len(node.layer.input) == 3: - # opset 11 - try: - #to avoid the error causeed by NULL value of resize inputs. - val_scales = self.graph.get_input_node( - node, idx=2, copy=True) - except: - val_scales = self.graph.get_input_node( - node, idx=1, copy=True) - # TODO(syf): paddle.nn.functional.interpolate will support the length - # which is the same as the rank of input. - attrs['scale_factor'] = self.weights[val_scales.name].tolist()[ - 2:] - if len(val_x_shape) == 3: - val_scales = self.graph.get_input_node( - node, idx=2, copy=True) - val_scales_values = _const_weight_or_none(val_scales) - - attrs = { - "align_corners": False, - "mode": string(node.get_attr('mode', 'nearest')), - "scale_factor": - self.weights[val_scales.name].tolist()[1:] - } - mode = node.get_attr('mode', 'nearest') - if mode == "linear": - attrs["mode"] = string("bilinear") - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'pytorch_half_pixel': - attrs["align_corners"] = False - attrs["align_mode"] = 0 - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'align_corners': - attrs["align_corners"] = True - self.paddle_graph.add_layer( - 'paddle.unsqueeze', - inputs={"x": val_x.name}, - outputs=[val_x.name], - axis=0) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - self.paddle_graph.add_layer( - 'paddle.squeeze', - inputs={"x": node.name}, - outputs=[node.name], - axis=0) - return - elif len(node.layer.input) == 4: - # opset 11 - val_sizes = self.graph.get_input_node(node, idx=3, copy=True) - size_values = _const_weight_or_none(val_sizes) - if len(val_x_shape) == 3: - var_n, var_hw = val_sizes.name + '_n', val_sizes.name + '_hw' - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_sizes.name}, - outputs=[var_n, var_hw], - num_or_sections=[1, 2], - axis=0) - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": var_hw}, - outputs=[var_hw], - dtype=string('int32')) - inputs['size'] = var_hw - attrs = { - "align_corners": False, - "mode": string(node.get_attr('mode', 'nearest')) - } - mode = node.get_attr('mode', 'nearest') - if mode == "linear": - attrs["mode"] = string("bilinear") - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'pytorch_half_pixel': - attrs["align_corners"] = False - attrs["align_mode"] = 0 - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'align_corners': - attrs["align_corners"] = True - self.paddle_graph.add_layer( - 'paddle.unsqueeze', - inputs={"x": val_x.name}, - outputs=[val_x.name], - axis=0) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - self.paddle_graph.add_layer( - 'paddle.squeeze', - inputs={"x": node.name}, - outputs=[node.name], - axis=0) - else: - if size_values is not None: - attrs["size"] = [size_values[2], size_values[3]] - else: - var_nc, var_hw = val_sizes.name + '_nc', val_sizes.name + '_hw' - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_sizes.name}, - outputs=[var_nc, var_hw], - num_or_sections=[2, 2], - axis=0) - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": var_hw}, - outputs=[var_hw], - dtype=string('int32')) - inputs['size'] = var_hw - attrs.update({ - "align_corners": False, - "mode": string(node.get_attr('mode', 'nearest')) - }) - mode = node.get_attr('mode', 'nearest') - if mode == "linear": - attrs["mode"] = string("bilinear") - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'pytorch_half_pixel': - attrs["align_corners"] = False - attrs["align_mode"] = 0 - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'align_corners': - attrs["align_corners"] = True - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - return - elif node.layer_type == 'Upsample': - if len(node.layer.input) == 2: - val_scales = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.slice", - inputs={"input": val_scales.name}, - outputs=[val_scales.name], - axes=[0], - starts=[2], - ends=[4]) - inputs['scale_factor'] = val_scales.name - else: - val_scales = node.get_attr('scales')[2:] - - mode = node.get_attr('mode', 'nearest') - attrs.update({ - "align_corners": False, - "mode": string(mode), - "align_mode": 1 - }) - if len(node.layer.input) == 1: - attrs["scale_factor"] = val_scales - if mode == "linear" and len(val_x_shape) == 4: - attrs["mode"] = string("bilinear") - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'pytorch_half_pixel': - attrs["align_corners"] = False - attrs["align_mode"] = 0 - else: - attrs["align_corners"] = True - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - - @print_mapping_info - def CumSum(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axis = self.graph.get_input_node(node, idx=1, copy=True) - axis_values = _const_weight_or_none(axis) - assert axis_values is not None, 'Axis only support constant tensor!' - layer_attrs = {'axis': axis_values} - self.paddle_graph.add_layer( - 'paddle.cumsum', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def HardSigmoid(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - alpha = node.get_attr('alpha', 0.2) - beta = node.get_attr('beta', 0.5) - self.paddle_graph.add_layer( - kernel="paddle.scale", - inputs={"x": val_x.name}, - outputs=[node.name + "_val"], - scale=alpha, - bias=beta) - self.paddle_graph.add_layer( - kernel="paddle.clip", - inputs={"x": node.name + "_val"}, - outputs=[node.name], - min=0.0, - max=1.0) - - @print_mapping_info - def Shape(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - self.paddle_graph.add_layer( - kernel="paddle.shape", - inputs={"input": val_x.name}, - outputs=[node.name]) - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": node.name}, - outputs=[node.name], - dtype=string('int64')) - - @print_mapping_info - def RoiAlign(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_rois = self.graph.get_input_node(node, idx=1, copy=True) - - pooled_height = node.get_attr('output_height') - pooled_width = node.get_attr('output_width') - spatial_scale = node.get_attr('spatial_scale') - sampling_ratio = node.get_attr('sampling_ratio') - val_rois_shape = val_rois.name + '_shape' - self.paddle_graph.add_layer( - kernel="paddle.shape", - inputs={"input": val_rois.name}, - outputs=[val_rois_shape]) - val_rois_num = val_rois.name + '_num' - if len(val_rois.out_shapes[0]) == 4: - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_rois_shape}, - outputs=[val_rois_num, ' _', ' _', ' _'], - num_or_sections=[1, 1, 1, 1], - axis=0) - elif len(val_rois.out_shapes[0]) == 2: - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_rois_shape}, - outputs=[val_rois_num, ' _'], - num_or_sections=[1, 1], - axis=0) - layer_attrs = { - 'pooled_height': pooled_height, - 'pooled_width': pooled_width, - 'spatial_scale': spatial_scale, - 'sampling_ratio': sampling_ratio, - } - self.paddle_graph.add_layer( - 'custom_layer:ROIAlign', - inputs={ - 'input': val_x.name, - 'rois': val_rois.name, - 'rois_num': val_rois_num - }, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def MaxRoiPool(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_rois = self.graph.get_input_node(node, idx=1, copy=True) - - spatial_scale = node.get_attr('spatial_scale') - pooled_height, pooled_width = node.get_attr('pooled_shape') - layer_attrs = { - 'pooled_height': pooled_height, - 'pooled_width': pooled_width, - 'spatial_scale': spatial_scale, + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 } - self.paddle_graph.add_layer( - 'custom_layer:ROIPooling', - inputs={'input': val_x.name, - 'rois': val_rois.name}, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def Pad(self, node, op_independent=True): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - pads = node.get_attr('pads') - is_pads_attr = True - if pads is None: - val_pad = self.graph.get_input_node(node, idx=1, copy=True) - pad_shape = val_pad.out_shapes[0] - is_pads_attr = False - pads = _const_weight_or_none(val_pad) - if pads is not None: - is_pads_attr = True - mode = node.get_attr('mode', 'constant') - if mode in ["edge"]: - mode = "replicate" - value = node.get_attr('value', 0.) - data_shape = val_x.out_shapes[0] - output_shape = node.out_shapes[0] - assume_pad = False - layer_attrs = {} - layer_attrs['mode'] = string(mode) - layer_attrs['value'] = value - if not op_independent: - output_name = node.name + '_paded' - else: - output_name = node.name - nn_op_name = name_generator("pad", self.nn_name2id) - layer_outputs = [nn_op_name, output_name] - if is_pads_attr: - paddings = [] - if len(pads) == 10 and sum(pads) == 0: - pads = pads[0:6] - if len(pads) in [2, 4, 6]: - if data_shape: - assume_pad |= data_shape and 2 * (len(data_shape) - 2 - ) == len(pads) # NCHW - if output_shape: - assume_pad |= output_shape and 2 * (len(output_shape) - 2 - ) == len(pads) # NCHW - if assume_pad: - paddle_op = 'paddle.nn.Pad{}D'.format(len(output_shape) - 2) - paddings = np.array(pads).reshape( - (2, -1)).transpose().astype("int32") - paddings = np.flip(paddings, axis=0).flatten().tolist() - layer_attrs['padding'] = paddings - else: - if data_shape: - assume_pad |= data_shape and 2 * len(data_shape) == len( - pads) # NCHW - if output_shape: - assume_pad |= output_shape and 2 * len( - output_shape) == len(pads) # NCHW - if assume_pad: - paddle_op = 'paddle.nn.functional.pad' - paddings = np.array(pads).reshape( - (2, - -1)).transpose().astype("int32").flatten().tolist() - layer_attrs['pad'] = paddings - else: - raise Exception("The padding value {} is wrong!".format( - pads)) - elif len(pads) == 8: - if data_shape: - assume_pad |= data_shape and 2 * len(data_shape) == len( - pads) # NCHW - if output_shape: - assume_pad |= output_shape and 2 * len(output_shape) == len( - pads) # NCHW - if assume_pad: - paddle_op = 'paddle.nn.Pad2D' - # x1_begin,x2_begin,x3_begin,x4_begin,x1_end,x2_end,x3_end,x4_end->x1_begin,x1_end,x2_begin,x2_end,x3_begin,x3_end,x4_begin,x4_end - paddings = np.array(pads).reshape( - (2, -1)).transpose().astype("int32") - if mode == 'constant': - paddings = paddings.flatten().tolist() - layer_attrs['padding'] = paddings - else: - paddings = np.flip(paddings, axis=0).flatten().tolist() - if sum(paddings[:4]) == 0: - paddings = paddings[4:] - layer_attrs['padding'] = paddings - else: - layer_attrs["pad"] = paddings - paddle_op = "custom_layer:PadAllDim4WithOneInput" - else: - paddle_op = 'paddle.nn.functional.pad' - layer_attrs["pad"] = np.array(pads).tolist() - else: - pad_data_temp = pads[0::2] - pad_data_all = [] - for i in range(len(pad_data_temp)): - pad_data_all.append(pads[i]) - pad_data_all.append(pads[len(pad_data_temp) + i]) - - layer_attrs["pad"] = pad_data_all - self.paddle_graph.add_layer( - 'paddle.nn.functional.pad', - inputs={'x': val_x.name}, - outputs=layer_outputs[1:], - **layer_attrs) - return - - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=layer_outputs[1:] - if paddle_op == 'paddle.nn.functional.pad' else layer_outputs, - **layer_attrs) - if not op_independent: - return node.name + '_paded' - else: - pads_len = val_pad.out_shapes[0][0] - if pads_len in [2, 4, 6]: - if data_shape: - assume_pad |= data_shape and 2 * (len(data_shape) - 2 - ) == pads_len # NCHW - if output_shape: - assume_pad |= output_shape and 2 * (len(output_shape) - 2 - ) == pads_len # NCHW - if assume_pad: - if pads_len == 2: - data_format = "NCL" - elif pads_len == 4: - data_format = "NCHW" - else: - data_format = "NCDHW" - self.paddle_graph.add_layer( - "custom_layer:PadWithTwoInput", - inputs={'x': val_x.name, - 'pad': val_pad.name}, - outputs=layer_outputs, - value=value, - mode=string(mode), - data_format=string(data_format)) - else: - if data_shape: - assume_pad |= data_shape and 2 * len( - data_shape) == pads_len # NCHW - if output_shape: - assume_pad |= output_shape and 2 * len( - output_shape) == pads_len # NCHW - if assume_pad: - if pads_len == 4: - self.paddle_graph.add_layer( - "custom_layer:PadAllDim2", - inputs={'x': val_x.name, - 'pad': val_pad.name}, - outputs=layer_outputs, - value=value, - mode=string(mode)) - else: - raise Exception("The padding value is wrong!") - elif pads_len == 8: - if data_shape: - assume_pad |= data_shape and 2 * len( - data_shape) == pads_len # NCHW - if output_shape: - assume_pad |= output_shape and 2 * len( - output_shape) == pads_len # NCHW - if assume_pad: - self.paddle_graph.add_layer( - "custom_layer:PadAllDim4", - inputs={'x': val_x.name, - 'pad': val_pad.name}, - outputs=layer_outputs, - value=value, - mode=string(mode)) - else: - raise Exception("The padding value is wrong!") - if not op_independent: - return node.name + '_paded' - - @print_mapping_info - def Unsqueeze(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axes = node.get_attr('axes') - if axes is None: - axes_node = self.graph.get_input_node(node, idx=1, copy=True) - axes = _const_weight_or_none(axes_node, necessary=True) - # deal with scalar(0D) tensor - if len(val_x.out_shapes[0]) == 0 and len(axes) == 1 and axes[0] == 0: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=[1]) - else: - self.paddle_graph.add_layer( - 'paddle.unsqueeze', - inputs={"x": val_x.name}, - axis=axes, - outputs=[node.name]) - - @print_mapping_info - def Shrink(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - bias = node.get_attr('bias') - lambd = node.get_attr('lambd') - assert bias == 0.0, 'not support bias!=0' - self.paddle_graph.add_layer( - 'paddle.nn.functional.hardshrink', - inputs={"x": val_x.name}, - outputs=[node.name], - threshold=lambd) - - @print_mapping_info - def Constant(self, node): - val_output = self.graph.get_node(node.layer.output[0], copy=True) - - value = node.get_attr('value') - dtype = np.dtype(value.dtype) - output_dtype = val_output.dtype - if output_dtype: - assert dtype == output_dtype, 'tensor dtype unmatches storage dtype' - - shape = node.get_attr('shape', None) - - if shape is None: - shape = val_output.out_shapes[0] - if shape is None: - shape = list(value.shape) - _logger.warning('in (Constant -> %s): ' - 'attribute "shape" of %s not inferred, ' - 'using value as 1-D tensor may lead to fails', - val_output.name, val_output.name) - if len(value) == 1: - value = value.tolist() - value = value[0] - if value == float('inf') or value == float('-inf'): - value = string(value) - self.paddle_graph.add_layer( - "paddle.full", - inputs={}, - outputs=[node.name], - dtype=string(dtype), - shape=[1], - fill_value=value) - else: - value = np.reshape(value, shape) - self.weights[node.name] = value - self.paddle_graph.add_layer( - "self.create_parameter", - inputs={}, - outputs=[node.name], - shape=shape, - attr=string(node.name), - dtype=string(dtype), - default_initializer="paddle.nn.initializer.Constant(value=0.0)") - - @print_mapping_info - def Resize(self, node): - self._interpolate(node) - - @print_mapping_info - def Upsample(self, node): - self._interpolate(node) - - @print_mapping_info - def InstanceNormalization(self, node): - op_name = name_generator("instanse_norm", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_scale = self.graph.get_input_node(node, idx=1, copy=True) - val_b = self.graph.get_input_node(node, idx=2, copy=True) - epsilon = node.get_attr('epsilon', 1e-5) - self.weights[op_name + '.scale'] = self.weights[val_scale.name] - self.weights[op_name + '.bias'] = self.weights[val_b.name] - layer_attrs = { - 'num_features': node.out_shapes[0][1], - 'epsilon': epsilon, - } - dim = len(val_x.out_shapes[0]) - if dim == 3: - paddle_op = "paddle.nn.InstanceNorm1D" - elif dim == 4: - paddle_op = "paddle.nn.InstanceNorm2D" - elif dim == 5: - paddle_op = "paddle.nn.InstanceNorm3D" - else: - raise Exception( - "The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization." - ) - self.paddle_graph.add_layer( - paddle_op, - inputs={"x": val_x.name}, - outputs=layer_outputs, - **layer_attrs) - - @print_mapping_info - def Expand(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_shape = self.graph.get_input_node(node, idx=1, copy=True) - val_x_dtype = val_x.dtype - name_ones = node.name + '_ones' - shape_values = _const_weight_or_none(val_shape) - if shape_values is None: - attr_ones = { - 'shape': val_shape.name, - 'dtype': string(val_x_dtype), - 'fill_value': 1 - } - else: - attr_ones = { - 'shape': shape_values.tolist(), - 'dtype': string(val_x_dtype), - 'fill_value': 1 - } - self.paddle_graph.add_layer( - 'paddle.full', inputs={}, outputs=[name_ones], **attr_ones) - inputs_dict = {'x': name_ones, 'y': val_x.name} - self.paddle_graph.add_layer( - 'paddle.multiply', inputs=inputs_dict, outputs=[node.name]) - - @print_mapping_info - def GatherND(self, node): - x = self.graph.get_input_node(node, idx=0, copy=True) - index = self.graph.get_input_node(node, idx=1, copy=True) - inputs = {'x': x.name, 'index': index.name} - self.paddle_graph.add_layer( - "paddle.gather_nd", inputs=inputs, outputs=[node.name]) - - @print_mapping_info - def Gather(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - indices = self.graph.get_input_node(node, idx=1, copy=True) - indices_values = _const_weight_or_none(indices, necessary=True) - if isinstance(indices_values, np.ndarray): - indices_values = indices_values.tolist() - indices_shape = indices.out_shapes[0] - val_x_shape = val_x.out_shapes[0] - axis = node.get_attr('axis', 0) - if len(indices_shape) == 1 or \ - (indices_values is not None and isinstance(indices_values, int)) or \ - (indices_values is not None and len(indices_values) == 1): - self.paddle_graph.add_layer( - 'paddle.gather', - inputs={'x': val_x.name, - 'index': indices.name}, - outputs=[node.name], - axis=axis) - # deal with indice is scalar(0D) Tensor - if isinstance(indices_values, int) and len(val_x_shape) > 1: - self.paddle_graph.add_layer( - 'paddle.squeeze', - inputs={'x': node.name}, - outputs=[node.name], - axis=[axis]) - else: - # if val_x is DataNode, convert gather to embedding - if axis == 0 and isinstance(val_x, ONNXGraphDataNode): - indices_cast = indices.name + '_cast' - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": indices.name}, - outputs=[indices_cast], - dtype=string('int64')) - op_name = name_generator("embedding", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - self.weights[op_name + '.weight'] = _const_weight_or_none(val_x) - self.paddle_graph.add_layer( - 'paddle.nn.Embedding', - inputs={"x": indices_cast}, - outputs=layer_outputs, - num_embeddings=val_x_shape[0], - embedding_dim=val_x_shape[1]) - else: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": indices.name}, - outputs=[indices.name + "_reshape"], - shape=[-1]) - gather_1d = node.name + '_1D' - self.paddle_graph.add_layer( - 'paddle.gather', - inputs={ - 'x': val_x.name, - 'index': indices.name + "_reshape" - }, - outputs=[gather_1d], - axis=axis) - # if shape is known - if len(indices_shape) != 0 and len(val_x_shape) != 0: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': gather_1d}, - outputs=[node.name], - shape=val_x_shape[:axis] + indices_shape + - val_x_shape[axis + 1:]) - else: - all_shape_name = list() - self.paddle_graph.add_layer( - kernel="paddle.shape", - inputs={"input": val_x.name}, - outputs=[val_x.name + "_shape"]) - self.paddle_graph.add_layer( - kernel="paddle.shape", - inputs={"input": indices.name}, - outputs=[indices.name + "_shape"]) - self.paddle_graph.add_layer( - "paddle.slice", - inputs={"input": val_x.name + "_shape"}, - outputs=[val_x.name + "_shape_slice_start"], - axes=[0], - starts=[0], - ends=[axis]) - all_shape_name.append(val_x.name + "_shape_slice_start") - all_shape_name.append(indices.name + "_shape") - self.paddle_graph.add_layer( - "paddle.slice", - inputs={"input": val_x.name + "_shape"}, - outputs=[val_x.name + "_shape_slice_end"], - axes=[0], - starts=[axis + 1], - ends=[2147483647]) - all_shape_name.append(val_x.name + "_shape_slice_end") - self.paddle_graph.add_layer( - 'paddle.concat', - inputs={"x": all_shape_name}, - outputs=[node.name + "_all_shape"], - axis=0) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': gather_1d}, - outputs=[node.name], - shape=node.name + "_all_shape") - - @print_mapping_info - def ScatterND(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - indices = self.graph.get_input_node(node, idx=1, copy=True) - updates = self.graph.get_input_node(node, idx=2, copy=True) - if len(indices.out_shapes[0]) == 1: - self.paddle_graph.add_layer( - 'paddle.scatter', - inputs={ - 'x': val_x.name, - 'index': indices.name, - 'updates': updates.name - }, - outputs=[node.name]) - else: - input_inner_indices = node.name + '_input_inner_indices' - shape = val_x.out_shapes[0] - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": indices.name}, - outputs=[indices.name], - shape=indices.out_shapes[0]) - - zeros_like_val_x = val_x.name + '_zeros' - self.paddle_graph.add_layer( - 'paddle.zeros_like', - inputs={"x": val_x.name}, - outputs=[zeros_like_val_x]) - self.paddle_graph.add_layer( - 'paddle.scatter_nd_add', - inputs={ - 'x': zeros_like_val_x, - 'index': indices.name, - 'updates': updates.name - }, - outputs=[input_inner_indices]) - indices_mask = node.name + '_indices_mask' - constant_minus_one = node.name + '_constant_minus_one' - # full_like support create tensor shape like input tensor - self.paddle_graph.add_layer( - 'paddle.full_like', - inputs={"x": updates.name}, - outputs=[constant_minus_one], - dtype=string(updates.dtype), - fill_value=-1) - self.paddle_graph.add_layer( - 'paddle.scatter_nd_add', - inputs={ - 'x': zeros_like_val_x, - 'index': indices.name, - 'updates': constant_minus_one - }, - outputs=[indices_mask]) - constant_one = node.name + '_constant_1' - # full_like support create tensor shape like input tensor - self.paddle_graph.add_layer( - 'paddle.full_like', - inputs={"x": val_x.name}, - outputs=[constant_one], - dtype=string(val_x.dtype), - fill_value=1) - input_out_indices_mask = node.name + '_input_out_indices_mask' - self.paddle_graph.add_layer( - "paddle.add", - inputs={"x": indices_mask, - "y": constant_one}, - outputs=[input_out_indices_mask]) - - input_out_indices = node.name + '_input_out_indices' - self.paddle_graph.add_layer( - "paddle.multiply", - inputs={"x": val_x.name, - "y": input_out_indices_mask}, - outputs=[input_out_indices]) - - self.paddle_graph.add_layer( - "paddle.add", - inputs={"x": input_inner_indices, - "y": input_out_indices}, - outputs=[node.name]) - - @print_mapping_info - def Range(self, node): - val_start = self.graph.get_input_node(node, idx=0, copy=True) - val_limit = self.graph.get_input_node(node, idx=1, copy=True) - val_delta = self.graph.get_input_node(node, idx=2, copy=True) - dtype = val_start.dtype - inputs = { - 'start': val_start.name, - 'end': val_limit.name, - 'step': val_delta.name - } - self.paddle_graph.add_layer( - 'paddle.arange', - inputs=inputs, - outputs=[node.name], - dtype=string(dtype)) - - @print_mapping_info - def Slice(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - starts, ends, axes, steps = None, None, None, None - layer_attrs = {} - if val_x.dtype == 'uint8': - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": val_x.name}, - outputs=[val_x.name], - dtype=string('int32')) - if len(node.inputs) > 1: - starts = self.graph.get_input_node(node, idx=1, copy=True) - ends = self.graph.get_input_node(node, idx=2, copy=True) - starts_value = _const_weight_or_none(starts) - if starts_value is not None: - starts_value = starts_value.tolist() - ends_value = _const_weight_or_none(ends) - if ends_value is not None: - ends_value = ends_value.tolist() - if len(node.inputs) > 2: - s_len = len(val_x.out_shapes[0]) - axes = list(range(s_len)) - if len(node.inputs) > 3: - axes_node = self.graph.get_input_node(node, idx=3, copy=True) - axes = _const_weight_or_none(axes_node, necessary=True).tolist() - if len(node.inputs) > 4: - steps = self.graph.get_input_node(node, idx=4, copy=True) - steps = _const_weight_or_none(steps).tolist() - - layer_attrs = { - "axes": axes, - "starts": starts.name, - "ends": ends.name - } - if starts_value is not None and ends_value is not None and axes is not None: - starts_value = starts_value.copy() - ends_value = ends_value.copy() - for idx in range(len(ends_value)): - if len(val_x.out_shapes[0]) != 0 and starts_value[ - idx] >= val_x.out_shapes[0][axes[ - idx]] and val_x.out_shapes[0][axes[idx]] > 0: - starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1 - ends_value[idx] = val_x.out_shapes[0][axes[idx]] - elif ends_value[idx] > 2**31 - 1: - ends_value[idx] = 2**31 - 1 - - layer_attrs = { - "axes": axes, - "starts": starts_value, - "ends": ends_value - } - else: - if starts.dtype != 'int32': - starts_cast = starts.name + '_cast' - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": starts.name}, - outputs=[starts_cast], - dtype=string('int32')) - layer_attrs['starts'] = starts_cast - if ends.dtype != 'int32': - ends_cast = ends.name + '_cast' - else: - ends_cast = ends.name - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": ends.name}, - outputs=[ends_cast], - dtype=string('int32')) - layer_attrs['ends'] = ends_cast - else: - starts = node.get_attr('starts') - ends = node.get_attr('ends') - axes = node.get_attr('axes') - output_shape = val_x.out_shapes[0] - - if axes is None: - axes = [i for i in range(len(starts))] - for idx in range(len(ends)): - if ends[idx] > 2**31 - 1: - ends[idx] = 2**31 - 1 - layer_attrs = {"axes": axes, "starts": starts, "ends": ends} - - if steps is not None: - layer_attrs['strides'] = steps - self.paddle_graph.add_layer( - 'paddle.strided_slice', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - else: - self.paddle_graph.add_layer( - 'paddle.slice', - inputs={"input": val_x.name}, - outputs=[node.name], - **layer_attrs) - if val_x.dtype == 'uint8': - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": node.name}, - outputs=[node.name], - dtype=string('uint8')) - - @print_mapping_info - def ConstantOfShape(self, node): - val_shape = self.graph.get_input_node(node, idx=0, copy=True) - - value = node.get_attr('value') - dtype = value.dtype - value = value.tolist() - assert len(value) == 1, ('given value not Scalar, shape of value > 1, ' - 'this is not supported') - if len(value) == 1: - value = value[0] - if value == float('inf') or value == float('-inf'): - value = string(value) - layer_attrs = {'dtype': string(dtype), 'fill_value': value} - self.paddle_graph.add_layer( - "paddle.full", - inputs={'shape': val_shape.name}, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def Clip(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_node(node.layer.output[0], copy=True) - max_value, min_value = None, None - if len(node.inputs) == 1: - max_value = node.get_attr('max') - min_value = node.get_attr('min') - layer_attrs = { - 'max': max_value, - 'min': min_value, - } - - self.paddle_graph.add_layer( - 'paddle.clip', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - else: - if len(node.inputs) == 2: - val_ipt = self.graph.get_input_node(node, idx=1, copy=True) - - index = node.get_input_index(val_ipt.name) - - val_value = _const_weight_or_none(val_ipt) - if val_value.shape == (1, ): - val_value = val_value[0] - - if index == 1: - layer_attrs = {'min': val_value} - - if index == 2: - layer_attrs = {'max': val_value} - - self.paddle_graph.add_layer( - 'paddle.clip', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - else: - if len(node.inputs) == 3: - min_ipt = self.graph.get_input_node(node, idx=1, copy=True) - max_ipt = self.graph.get_input_node(node, idx=2, copy=True) - self.paddle_graph.add_layer( - 'paddle.clip', - inputs={ - "x": val_x.name, - "min": min_ipt.name, - "max": max_ipt.name - }, - outputs=[node.name]) - else: - raise Exception("max_value or min_value can't be None") - - @print_mapping_info - def ReduceSum(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - if len(node.inputs) == 1: - keepdims = node.get_attr('keepdims') - if keepdims is None: - keepdims = True - axes_value = node.get_attr('axes') - layer_attrs = {'axis': axes_value, 'keepdim': keepdims} - self.paddle_graph.add_layer( - 'paddle.sum', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - else: - axes = self.graph.get_input_node(node, idx=1, copy=True) - axes_value = _const_weight_or_none(axes) - if axes_value.shape == (1, ): - axes_value = axes_value[0] - keepdims = node.get_attr('keepdims') - if keepdims is None: - layer_attrs = {'axis': axes_value} - else: - layer_attrs = {'axis': axes_value, 'keepdim': keepdims} - - self.paddle_graph.add_layer( - 'paddle.sum', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def Max(self, node): - if len(node.inputs) == 2: - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.maximum", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[node.name]) - else: - val_x = self.graph.get_input_node(node, idx=0, copy=True) - temp_name = "max_" - for i in range(1, len(node.inputs)): - val_y = self.graph.get_input_node(node, idx=i, copy=True) - temp_name = temp_name + str(i) - if i == len(node.inputs) - 1: - self.paddle_graph.add_layer( - "paddle.maximum", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[node.name]) - else: - self.paddle_graph.add_layer( - "paddle.maximum", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[temp_name]) - val_x.name = temp_name - - @print_mapping_info - def Min(self, node): - if len(node.inputs) == 2: - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.minimum", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[node.name]) - else: - val_x = self.graph.get_input_node(node, idx=0, copy=True) - temp_name = "min_" - for i in range(1, len(node.inputs)): - val_y = self.graph.get_input_node(node, idx=i, copy=True) - temp_name = temp_name + str(i) - if i == len(node.inputs) - 1: - self.paddle_graph.add_layer( - "paddle.minimum", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[node.name]) - else: - self.paddle_graph.add_layer( - "paddle.minimum", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[temp_name]) - val_x.name = temp_name - - @print_mapping_info - def GreaterOrEqual(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.greater_equal", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[node.name]) - - @print_mapping_info - def And(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.logical_and", - inputs={"x": val_x.name, - "y": val_y.name}, - outputs=[node.name]) - - @print_mapping_info - def Split(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - paddle_op = 'split' - split = node.get_attr('split') - axis = node.get_attr('axis', 0) - if split is None: - split_num = len(node.layer.output) - try: - #split is an input of this node - split_node = self.graph.get_input_node(node, idx=1, copy=True) - split_value = _const_weight_or_none(split_node) - layer_attrs = { - 'num_or_sections': split_value.tolist(), - 'axis': axis, - } - except: - layer_attrs = { - 'num_or_sections': split_num, - 'axis': axis, - } - outputs_list = list() - for i in range(len(node.layer.output)): - if hasattr(node, 'index'): - outputs_list.append("{}_p{}".format(node.layer_name, i)) - else: - outputs_list.append("{}".format(node.layer_name)) - if split_num > 1: - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_x.name}, - outputs=outputs_list, - **layer_attrs) - else: - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": val_x.name}, - outputs=outputs_list, - dtype=string(val_x.dtype)) - - else: - layer_attrs = { - 'num_or_sections': split, - 'axis': axis, - } - outputs_list = list() - if isinstance(split, list) or isinstance(split, tuple): - if len(split) == 1: - outputs_list.append(node.name) - else: - for i in range(len(split)): - outputs_list.append("{}_p{}".format(node.layer_name, i)) - else: - outputs_list.append(node.name) - if len(split) > 1: - self.paddle_graph.add_layer( - 'paddle.split', - inputs={"x": val_x.name}, - outputs=outputs_list, - **layer_attrs) - else: - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": val_x.name}, - outputs=outputs_list, - dtype=string(val_x.dtype)) - - @print_mapping_info - def Reshape(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_shape = self.graph.get_input_node(node, idx=1, copy=True) - val_reshaped = self.graph.get_node(node.layer.output[0], copy=True) - shape_value = _const_weight_or_none(val_shape) - shape_dims = len(val_shape.out_shapes[0]) - - if shape_value is not None: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': val_x.name}, - outputs=[node.name], - shape=shape_value.tolist()) - elif len(node.out_shapes[0]) > 0 and _is_static_shape(node.out_shapes[ - 0]): - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': val_x.name}, - outputs=[node.name], - shape=node.out_shapes[0]) - else: - # shape may be [], come form Gather by scalar indices - if len(val_shape.out_shapes[0]) > 0: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': val_shape.name}, - outputs=[val_shape.name], - shape=val_shape.out_shapes[0]) - if val_shape.dtype != "int32": - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={'x': val_shape.name}, - outputs=[val_shape.name], - dtype=string("int32")) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': val_x.name, - 'shape': val_shape.name}, - outputs=[node.name]) - - @print_mapping_info - def Cast(self, node): - val_input = self.graph.get_input_node(node, idx=0, copy=True) - val_output = self.graph.get_node(node.layer.output[0], copy=True) - - dtype = node.get_attr('to') - if not isinstance(dtype, np.dtype): - dtype = TENSOR_TYPE_TO_NP_TYPE[dtype] - - output_dtype = val_output.dtype - if output_dtype: - assert dtype == output_dtype, 'dtype of to unmatches output' - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={'x': val_input.name}, - outputs=[node.name], - dtype=string(dtype)) - - @print_mapping_info - def Not(self, node): - val_input = self.graph.get_input_node(node, idx=0, copy=True) - self.paddle_graph.add_layer( - 'paddle.logical_not', - inputs={'x': val_input.name}, - outputs=[node.name]) - - @print_mapping_info - def AveragePool(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - - auto_pad = node.get_attr('auto_pad', 'NOTSET') - kernel_shape = node.get_attr("kernel_shape") - poolnd = len(kernel_shape) - strides = node.get_attr("strides") - pad_mode = node.get_attr("pads") - ceil_mode = bool(node.get_attr('ceil_mode', 0)) - pads = node.get_attr('pads', [0] * (poolnd * 2)) - - paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) - - if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": - input_shape = val_x.out_shapes[0] - pad_h = _get_same_padding(input_shape[2], kernel_shape[0], - strides[0], auto_pad) - pad_w = _get_same_padding(input_shape[3], kernel_shape[1], - strides[1], auto_pad) - paddings = pad_h + pad_w - - op_name = name_generator("pool", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - paddle_op = 'paddle.nn.AvgPool{}D'.format(poolnd) - assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' - layer_attrs = { - "kernel_size": kernel_shape, - "stride": strides, - "padding": paddings, - "ceil_mode": ceil_mode, - "exclusive": 'True', - } - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x if isinstance(val_x, str) else val_x.name}, - outputs=layer_outputs, - **layer_attrs) - - @print_mapping_info - def Concat(self, node): - inputs_list = [] - dtypes = set() - for i in range(len(node.layer.input)): - ipt = self.graph.get_input_node(node, idx=i, copy=True) - inputs_list.append(ipt.name) - dtypes.add(ipt.dtype) - if len(dtypes) > 1: - assert 'Unspported situation happened, please create issue on https://github.com/PaddlePaddle/X2Paddle/issues.' - axis = node.get_attr('axis') - self.paddle_graph.add_layer( - 'paddle.concat', - inputs={"x": inputs_list}, - outputs=[node.name], - axis=axis) - - @print_mapping_info - def Flatten(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - output_shape = val_x.out_shapes[0] - axis = node.get_attr('axis', 1) - if axis == 0: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=[1, -1]) - else: - if len(output_shape) != 0: - shape_list = [1, 1] - for s in output_shape[:axis]: - shape_list[0] *= s - for s in output_shape[axis:]: - shape_list[1] *= s - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=shape_list) - else: - # flatten + reshape - self.paddle_graph.add_layer( - "paddle.flatten", - inputs={"input": val_x.name}, - outputs=[val_x.name + "_flatten"], - start_axis=[0], - stop_axis=[axis]) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={'x': val_x.name + "_flatten"}, - outputs=[node.name], - shape=[0, -1]) - - @print_mapping_info - def Gemm(self, node): - val_a = self.graph.get_input_node(node, idx=0, copy=True) - val_b = self.graph.get_input_node(node, idx=1, copy=True) - - alpha = node.get_attr('alpha', 1.) # optional - beta = node.get_attr('beta', 1.) # optional - trans_a = bool(node.get_attr('transA', 0)) # optional - trans_b = bool(node.get_attr('transB', 0)) # optional - val_mm = node.name + '_mm' - matmul_inputs = {"x": val_a.name, "y": val_b.name} - attr_matmul = { - "transpose_x": trans_a, - "transpose_y": trans_b, - } - self.paddle_graph.add_layer( - 'paddle.matmul', - inputs=matmul_inputs, - outputs=[val_mm], - **attr_matmul) - if beta != 0: - self.paddle_graph.add_layer( - "paddle.scale", - inputs={"x": val_mm}, - outputs=[val_mm], - scale=alpha) - else: - self.paddle_graph.add_layer( - "paddle.scale", inputs={"x": val_mm}, outputs=[node.name]) - - if beta != 0: - # when beta is equal to 0, there is no val_c - val_c = self.graph.get_input_node(node, idx=2, copy=True) - if beta == 1.: - add_inputs = {"x": val_mm, "y": val_c.name} - self.paddle_graph.add_layer( - "paddle.add", inputs=add_inputs, outputs=[node.name]) - else: - var_beta = node.name + '_beta' - self.paddle_graph.add_layer( - "paddle.scale", - inputs={"x": val_c.name}, - outputs=[var_beta], - scale=beta) - add_inputs = {"x": val_mm, "y": var_beta} - self.paddle_graph.add_layer( - "paddle.add", inputs=add_inputs, outputs=[node.name]) - - @print_mapping_info - def Sum(self, node): - val_inps = node.layer.input - inputs_dict = { - "x": self.graph.get_input_node( - node, idx=0, copy=True).name, - "y": self.graph.get_input_node( - node, idx=1, copy=True).name, - } - self.paddle_graph.add_layer( - "paddle.add", inputs=inputs_dict, outputs=[node.name]) - - for idx, ipt in enumerate(val_inps[2:]): - y = self.graph.get_input_node(node, idx=idx, copy=True) - inputs_dict = { - "x": node.name, - "y": y.name, - } - self.paddle_graph.add_layer( - "paddle.add", inputs=inputs_dict, outputs=[node.name]) - - @print_mapping_info - def MatMul(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - x_shape = val_x.out_shapes[0] - y_shape = val_y.out_shapes[0] - inputs_dict = {"x": val_x.name, "y": val_y.name} - if len(y_shape) != 0 and y_shape[0] == 1 and len( - x_shape) != 0 and x_shape[-1] != 1 and x_shape[0] != 1: - y_squeeze = val_y.name + '_squeeze' - self.paddle_graph.add_layer( - "paddle.squeeze", - inputs={"x": val_y.name}, - outputs=[y_squeeze], - axis=[0]) - inputs_dict['y'] = y_squeeze - self.paddle_graph.add_layer( - "paddle.matmul", inputs=inputs_dict, outputs=[node.name]) - else: - self.paddle_graph.add_layer( - "paddle.matmul", inputs=inputs_dict, outputs=[node.name]) - - @print_mapping_info - def BatchNormalization(self, node): - op_name = name_generator("batchnorm", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_scale = self.graph.get_input_node(node, idx=1, copy=True) - val_b = self.graph.get_input_node(node, idx=2, copy=True) - val_mean = self.graph.get_input_node(node, idx=3, copy=True) - val_var = self.graph.get_input_node(node, idx=4, copy=True) - - momentum = node.get_attr('momentum', .9) - epsilon = node.get_attr('epsilon', 1e-5) - c = val_x.out_shapes[0][1] - - # solved the same data is used as an argument to multiple OPs. - _rename_or_remove_weight( - self.weights, - val_scale.name, - op_name + '.weight', - rename_mapper=self.rename_mapper) - _rename_or_remove_weight( - self.weights, - val_b.name, - op_name + '.bias', - rename_mapper=self.rename_mapper) - _rename_or_remove_weight( - self.weights, - val_var.name, - op_name + '._variance', - rename_mapper=self.rename_mapper) - _rename_or_remove_weight( - self.weights, - val_mean.name, - op_name + '._mean', - rename_mapper=self.rename_mapper) - - # Attribute: spatial is used in BatchNormalization-1,6,7 - spatial = bool(node.get_attr('spatial')) - layer_attrs = { - "num_channels": c, - "momentum": momentum, - "epsilon": epsilon, - "is_test": True, - "use_global_stats": False, - } - self.paddle_graph.add_layer( - "paddle.nn.BatchNorm", - inputs={"x": val_x.name}, - outputs=layer_outputs, - **layer_attrs) - - @print_mapping_info - def Transpose(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - s_len = len(val_x.out_shapes[0]) - perm_default = list(range(s_len)) - perm_default.reverse() - perm = node.get_attr('perm', perm_default) - self.paddle_graph.add_layer( - "paddle.transpose", - inputs={"x": val_x.name}, - outputs=[node.name], - perm=perm) - - @print_mapping_info - def PRelu(self, node): - op_name = name_generator("prelu", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_slope = self.graph.get_input_node(node, idx=1, copy=True) - - mode = 'channel' - shape_slope = val_slope.out_shapes[0] - if shape_slope == [1] * len(shape_slope): - mode = 'all' - - if mode == "element": - self.paddle_graph.add_layer( - "paddle.zeros", - inputs={}, - outputs=[output_name + "__zeros"], - shape=shape_slope, - dtype=string(node.dtype)) - self.paddle_graph.add_layer( - "paddle.maximum", - inputs={"x": val_x.name, - "y": output_name + "__zeros"}, - outputs=[output_name + "__max"]) - self.paddle_graph.add_layer( - "paddle.minimum", - inputs={"x": val_x.name, - "y": output_name + "__zeros"}, - outputs=[output_name + "__min"]) - self.paddle_graph.add_layer( - "paddle.multiply", - inputs={"x": val_slope.name, - "y": output_name + "__min"}, - outputs=[output_name + "__mul"]) - self.paddle_graph.add_layer( - "paddle.add", - inputs={ - "x": output_name + "__max", - "y": output_name + "__mul" - }, - outputs=[output_name]) - else: - if mode == 'channel': - slope_data = _const_weight_or_none(val_slope) - if slope_data is None: - self.paddle_graph.add_layer( - "paddle.reshape", - inputs={"x": val_slope.name}, - outputs=[val_slope.name], - shape=[shape_slope[0]]) - self.paddle_graph.add_layer( - "paddle.nn.functional.prelu", - inputs={"x": val_x.name, - "weight": val_slope.name}, - outputs=[node.name]) - return - _rename_or_remove_weight(self.weights, val_slope.name) - if len(shape_slope) > 1: - self.weights[op_name + '._weight'] = np.reshape( - slope_data, shape_slope[0]) - num_parameters = val_x.out_shapes[0][1] - else: - num_parameters = 1 - slope_data = self.weights[val_slope.name] - _rename_or_remove_weight(self.weights, val_slope.name) - self.weights[op_name + '._weight'] = np.reshape(slope_data, [1]) - self.paddle_graph.add_layer( - "paddle.nn.PReLU", - inputs={"x": val_x.name}, - outputs=layer_outputs, - num_parameters=num_parameters) - - @print_mapping_info - def Squeeze(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axes = node.get_attr('axes') - if axes is None: - axes_node = self.graph.get_input_node(node, idx=1, copy=True) - axes = _const_weight_or_none(axes_node, necessary=True) - # deal with scalar(0D) tensor - if len(val_x.out_shapes[0]) <= 1 and len(axes) == 1 and axes[0] == 0: - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": val_x.name}, - outputs=[node.name], - dtype=string(val_x.dtype)) - else: - self.paddle_graph.add_layer( - "paddle.squeeze", - inputs={"x": val_x.name}, - outputs=[node.name], - axis=axes) - - @print_mapping_info - def Equal(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.equal", - inputs={'x': val_x.name, - 'y': val_y.name}, - outputs=[node.name]) - - @print_mapping_info - def Greater(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_y = self.graph.get_input_node(node, idx=1, copy=True) - self.paddle_graph.add_layer( - "paddle.greater_than", - inputs={'x': val_x.name, - 'y': val_y.name}, - outputs=[node.name]) - - @print_mapping_info - def Where(self, node): - condition = self.graph.get_input_node(node, idx=0, copy=True) - val_x = self.graph.get_input_node(node, idx=1, copy=True) - val_y = self.graph.get_input_node(node, idx=2, copy=True) - - self.paddle_graph.add_layer( - "paddle.where", - inputs={ - 'condition': condition.name, - 'x': val_x.name, - 'y': val_y.name - }, - outputs=[node.name]) - - @print_mapping_info - def NonZero(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - self.paddle_graph.add_layer( - "paddle.nonzero", - inputs={"x": val_x.name}, - outputs=[val_x.name], - as_tuple=True) - self.paddle_graph.add_layer( - "paddle.concat", inputs={"x": val_x.name}, outputs=[node.name]) - - @print_mapping_info - def Identity(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - self.paddle_graph.add_layer( - "paddle.assign", inputs={"x": val_x.name}, outputs=[node.name]) - - @print_mapping_info - def Tile(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_repeats = self.graph.get_input_node(node, idx=1, copy=True) - repeats = _const_weight_or_none(val_repeats) - - if repeats is None: - repeats = val_repeats.name - if val_repeats.dtype != 'int32': - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": repeats}, - outputs=["{}_tmp".format(repeats)], - dtype=string("int32")) - repeats = "{}_tmp".format(repeats) - - elif isinstance(repeats, int): - repeats = [repeats] - - elif type(repeats) is np.ndarray: - repeats = repeats.tolist() - - attr = { - 'expand_times': repeats, - "name": string(node.name), - } - self.paddle_graph.add_layer( - "paddle.tile", - inputs={"x": val_x.name}, - outputs=[node.name], - repeat_times=repeats) - - @print_mapping_info - def MaxPool(self, node): - op_name = name_generator("pool", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - auto_pad = node.get_attr('auto_pad', 'NOTSET') - assert node.get_attr( - "dilations") is None, 'only dilations = 0 is supported' # optional - - kernel_shape = node.get_attr("kernel_shape") - poolnd = len(kernel_shape) - strides = node.get_attr("strides") - pad_mode = node.get_attr("pads") - ceil_mode = bool(node.get_attr('ceil_mode', 0)) # optional - pads = node.get_attr('pads', [0] * (poolnd * 2)) # optional - paddle_op = 'paddle.nn.MaxPool{}D'.format(poolnd) - assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' - - paddings, val_x = self._pad_if_asymmetric(node, pads, val_x) - - if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER": - input_shape = val_x.out_shapes[0] - pad_h = _get_same_padding(input_shape[2], kernel_shape[0], - strides[0], auto_pad) - pad_w = _get_same_padding(input_shape[3], kernel_shape[1], - strides[1], auto_pad) - paddings = pad_h + pad_w - - layer_attrs = { - "kernel_size": kernel_shape, - "stride": strides, - "padding": paddings, - "ceil_mode": ceil_mode, - } - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x if isinstance(val_x, str) else val_x.name}, - outputs=layer_outputs, - **layer_attrs) - - @print_mapping_info - def GlobalMaxPool(self, node): - op_name = name_generator("pool", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - input_shape = val_x.out_shapes[0] - if len(input_shape) == 4: - poolnd = 2 - elif len(input_shape) == 5: - poolnd = 3 - elif len(input_shape) == 3: - poolnd = 1 - paddle_op = 'paddle.nn.AdaptiveMaxPool{}D'.format(poolnd) - assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' - output_shape = node.out_shapes[0] - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=layer_outputs, - output_size=output_shape[2:]) - - @print_mapping_info - def Neg(self, node): - import paddle - val_x = self.graph.get_input_node(node, idx=0, copy=True) - v0, v1, v2 = paddle.__version__.split('.') - if int(v0) >= 2 and int(v1) >= 2: - self.paddle_graph.add_layer( - "paddle.neg", inputs={'x': val_x.name}, outputs=[node.name]) - else: - val_y = node.name + "_y" - dtype = np.dtype(val_x.dtype) - self.paddle_graph.add_layer( - "paddle.full", - inputs={}, - outputs=[val_y], - dtype=string(dtype), - shape=[1], - fill_value=-1) - self.paddle_graph.add_layer( - "paddle.multiply", - inputs={'x': val_x.name, - 'y': val_y}, - outputs=[node.name]) - - @print_mapping_info - def SpaceToDepth(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - blocksize = node.get_attr('blocksize') - val_x_shape = val_x.out_shapes[0] - b, c, h, w = val_x_shape - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=[b, c, h // blocksize, blocksize, w // blocksize, blocksize]) - self.paddle_graph.add_layer( - 'paddle.transpose', - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 3, 5, 1, 2, 4]) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": node.name}, - outputs=[node.name], - shape=[b, c * (blocksize**2), h // blocksize, w // blocksize]) - - @print_mapping_info - def GatherElements(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - indices = self.graph.get_input_node(node, idx=1, copy=True) - axis = node.get_attr('axis') - val_x_shape = val_x.out_shapes[0] - indices_shape = indices.out_shapes[0] - axis = axis if axis >= 0 else axis + len(val_x_shape) - if axis == 0: - axis_perm = [i for i in range(len(val_x_shape))] - data_swaped = val_x.name - index_swaped = indices.name - else: - axis_perm = [i for i in range(len(val_x_shape))] - axis_perm[axis] = 0 - axis_perm[0] = axis - data_swaped = val_x.name + "_transpose" - self.paddle_graph.add_layer( - "paddle.transpose", - inputs={'x': val_x.name}, - perm=axis_perm, - outputs=[data_swaped]) - index_swaped = indices.name + "_transpose" - self.paddle_graph.add_layer( - "paddle.transpose", - inputs={'x': indices.name}, - perm=axis_perm, - outputs=[index_swaped]) - temp = indices_shape[0] - indices_shape[0] = indices_shape[axis] - indices_shape[axis] = temp - - idx_tensors_per_axis_pre = [ - indices_shape[i] for i in range(len(indices_shape)) - ] - name_list = list() - for i in range(len(idx_tensors_per_axis_pre)): - tensor_name = val_x.name + "_meshgrid_" + str(i) - self.paddle_graph.add_layer( - kernel="paddle.linspace", - inputs={}, - outputs=[tensor_name], - start=0, - stop=idx_tensors_per_axis_pre[i] - 1, - num=idx_tensors_per_axis_pre[i]) - name_list.append(tensor_name) - - self.paddle_graph.add_layer( - "paddle.meshgrid", inputs=name_list, outputs=name_list) - - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": index_swaped}, - outputs=[index_swaped], - dtype=string("float32")) - import copy - copy_name_list = copy.copy(name_list) - copy_name_list[0] = index_swaped - new_name_list = list() - for i in range(len(copy_name_list)): - unsqueeze_name = copy_name_list[i] + "_unsqueeze" - self.paddle_graph.add_layer( - "paddle.unsqueeze", - inputs={"x": copy_name_list[i]}, - axis=-1, - outputs=[unsqueeze_name]) - new_name_list.append(unsqueeze_name) - concat_name = val_x.name + "_concated_layer" - self.paddle_graph.add_layer( - "paddle.concat", - inputs={'x': new_name_list}, - axis=-1, - outputs=[concat_name]) - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": concat_name}, - outputs=[concat_name], - dtype=string("int32")) - gather_nd_name = "gather_nd_layer" - self.paddle_graph.add_layer( - "paddle.gather_nd", - inputs={'x': data_swaped, - "index": concat_name}, - outputs=[gather_nd_name]) - - self.paddle_graph.add_layer( - "paddle.transpose", - inputs={'x': gather_nd_name}, - perm=axis_perm, - outputs=[node.name]) - - @print_mapping_info - def GlobalAveragePool(self, node): - op_name = name_generator("pool", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - input_shape = val_x.out_shapes[0] - if len(input_shape) == 4: - poolnd = 2 - elif len(input_shape) == 5: - poolnd = 3 - elif len(input_shape) == 3: - poolnd = 1 - paddle_op = 'paddle.nn.AdaptiveAvgPool{}D'.format(poolnd) - assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported' - output_shape = node.out_shapes[0] - self.paddle_graph.add_layer( - paddle_op, - inputs={'x': val_x.name}, - outputs=layer_outputs, - output_size=output_shape[2:]) - - @print_mapping_info - def Conv(self, node): - output_name = node.name - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_w = self.graph.get_input_node(node, idx=1, copy=True) - - if val_w.name in self.weights.keys(): - op_name = name_generator("conv", self.nn_name2id) - else: - op_name = output_name - - layer_outputs = [op_name, output_name] - - has_bias = len(node.layer.input) == 3 - if has_bias: - val_b = self.graph.get_input_node(node, idx=2, copy=True) - auto_pad = node.get_attr('auto_pad', 'NOTSET') - - kernel_shape = node.get_attr('kernel_shape') - convnd = len(kernel_shape) - assert 2 <= convnd <= 3, 'only Conv2D and Conv3D is supported' - num_out_channels = val_w.out_shapes[0][0] - num_in_channels = val_w.out_shapes[0][1] - paddle_op = 'paddle.nn.Conv{}D'.format(convnd) - - num_groups = node.get_attr('group', 1) - strides = node.get_attr('strides', [1] * convnd) - dilations = node.get_attr('dilations', [1] * convnd) - pads = node.get_attr('pads', [0] * (convnd * 2)) - - input_shape = val_x.out_shapes[0] - paddings = np.array(pads).reshape((2, -1)).transpose().astype("int32") - paddings = paddings.flatten().tolist() - - if auto_pad in ["SAME_UPPER", "SAME_LOWER"]: - # Warning: SAME_UPPER and SAME_LOWER does not yet support dynamic shapes - if input_shape[2] == -1 or input_shape[3] == -1: - _logger.warning( - 'SAME_UPPER and SAME_LOWER does not yet support dynamic shapes, the conversion result may have a diff!!!' - ) - pad_h = _get_same_padding(input_shape[2], kernel_shape[0], - strides[0], auto_pad) - pad_w = _get_same_padding(input_shape[3], kernel_shape[1], - strides[1], auto_pad) - paddings = pad_h + pad_w - - layer_inputs = {'x': val_x if isinstance(val_x, str) else val_x.name} - if val_w.name not in self.weights.keys(): - layer_attrs = { - "stride": strides, - "padding": paddings, - "dilation": dilations, - "groups": num_groups, - } - layer_inputs['weight'] = val_w.name - if has_bias: - layer_inputs['bias'] = val_b.name - - paddle_op = 'paddle.nn.functional.conv{}d'.format(convnd) - self.paddle_graph.add_layer( - paddle_op, - inputs=layer_inputs, - outputs=[node.name], - **layer_attrs) - return - - layer_attrs = { - "in_channels": num_in_channels * num_groups, - "out_channels": num_out_channels, - "kernel_size": kernel_shape, - "stride": strides, - "padding": paddings, - "dilation": dilations, - "groups": num_groups, - } - remove_weight = True if val_w.name in self.done_weight_list else False - if remove_weight: - self.done_weight_list.append(val_w.name) - _rename_or_remove_weight( - self.weights, - val_w.name, - op_name + '.weight', - remove_weight, - rename_mapper=self.rename_mapper) - if has_bias: - remove_bias = True if val_b.name in self.done_weight_list else False - if remove_bias: - self.done_weight_list.append(val_b.name) - _rename_or_remove_weight( - self.weights, - val_b.name, - op_name + '.bias', - remove_bias, - rename_mapper=self.rename_mapper) - else: - layer_attrs["bias_attr"] = False - if reduce(lambda x, y: x * y, - input_shape) in [1, -1] and 1 not in input_shape: - input_shape[1] = num_in_channels * num_groups - input_shape[0] = 0 - input_shape[2] = 0 - self.paddle_graph.add_layer( - "paddle.reshape", - inputs=layer_inputs, - outputs=[layer_inputs["x"]], - shape=input_shape) - self.paddle_graph.add_layer( - paddle_op, - inputs=layer_inputs, - outputs=layer_outputs, - **layer_attrs) - - @print_mapping_info - def ConvTranspose(self, node): - output_name = node.name - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_w = self.graph.get_input_node(node, idx=1, copy=True) - - if val_w.name in self.weights.keys(): - op_name = name_generator("conv_trans", self.nn_name2id) - else: - op_name = output_name - - layer_outputs = [op_name, output_name] - - val_b = None - if len(node.layer.input) > 2: - val_b = self.graph.get_input_node(node, idx=2, copy=True) - auto_pad = node.get_attr('auto_pad', 'NOTSET') - out_padding = node.get_attr('output_padding', [0, 0]) - kernel_shape = node.get_attr('kernel_shape') - assert kernel_shape, 'kernel_shape not inferred' - convnd = len(kernel_shape) - assert 2 <= convnd <= 3, 'only Conv2DTranspose and Conv3DTranspose supported' - num_in_channels = val_w.out_shapes[0][0] - num_out_channels = val_w.out_shapes[0][1] - paddle_op = 'paddle.nn.Conv{}DTranspose'.format(convnd) - - num_groups = node.get_attr('group', 1) - strides = node.get_attr('strides', [1] * convnd) - dilations = node.get_attr('dilations', [1] * convnd) - output_size = node.get_attr('output_shape', []) - pads = node.get_attr('pads', [0] * (convnd * 2)) - - paddings = np.array(pads).reshape((2, -1)).transpose().astype("int32") - paddings = paddings.flatten().tolist() - - if len(output_size) != 0: - paddings = [0] * 4 - total_paddings = list() - total_paddings.append((val_x.out_shapes[0][2] - 1) * strides[ - 0] + dilations[0] * (kernel_shape[0] - 1) + 1 + out_padding[0] - - output_size[0]) - total_paddings.append((val_x.out_shapes[0][3] - 1) * strides[ - 1] + dilations[1] * (kernel_shape[1] - 1) + 1 + out_padding[1] - - output_size[1]) - if auto_pad == "SAME_UPPER": - for i in range(len(total_paddings)): - paddings[2 * i] = total_paddings[0] - total_paddings[0] // 2 - paddings[2 * i + 1] = total_paddings[0] // 2 - else: - for i in range(len(total_paddings)): - paddings[2 * i] = total_paddings[0] // 2 - paddings[2 * i + 1] = total_paddings[0] - total_paddings[ - 0] // 2 - else: - output_size = [0, 0] - - output_size[0] = ( - val_x.out_shapes[0][2] - 1 - ) * strides[0] - 2 * paddings[0] + dilations[0] * ( - kernel_shape[0] - 1) + 1 + out_padding[0] - output_size[1] = ( - val_x.out_shapes[0][3] - 1 - ) * strides[1] - 2 * paddings[1] + dilations[1] * ( - kernel_shape[1] - 1) + 1 + out_padding[1] - - # Conv2DTranspose缺少output_size,只能在forward里头传进output_size - inputs_dict = {'x': val_x if isinstance(val_x, str) else val_x.name} - if val_w.name not in self.weights.keys(): - layer_attrs = { - "stride": strides, - "dilation": dilations, - "padding": paddings, - "groups": num_groups, - "output_padding": out_padding - } - paddle_op = 'paddle.nn.functional.conv{}d_transpose'.format(convnd) - - inputs_dict['weight'] = val_w.name - if len(node.layer.input) > 2: - inputs_dict['bias'] = val_b.name - - self.paddle_graph.add_layer( - paddle_op, - inputs=inputs_dict, - outputs=[node.name], - **layer_attrs) - return - - layer_attrs = { - "in_channels": num_in_channels, - "out_channels": num_out_channels * num_groups, - "kernel_size": kernel_shape, - "stride": strides, - "dilation": dilations, - "padding": paddings, - "groups": num_groups, - "output_padding": out_padding - } - - _rename_or_remove_weight( - self.weights, - val_w.name, - op_name + '.weight', - rename_mapper=self.rename_mapper) - if val_b is not None: - _rename_or_remove_weight( - self.weights, - val_b.name, - op_name + '.bias', - rename_mapper=self.rename_mapper) - else: - layer_attrs["bias_attr"] = False - self.paddle_graph.add_layer( - kernel=paddle_op, - inputs=inputs_dict, - outputs=layer_outputs, - **layer_attrs) - - @print_mapping_info - def ArgMax(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axis = node.get_attr('axis') - keepdims = False if node.get_attr('keepdims') == 0 else True - layer_attrs = {'axis': axis, 'keepdim': keepdims} - self.paddle_graph.add_layer( - 'paddle.argmax', - inputs={"x": val_x.name}, - outputs=[node.name], - **layer_attrs) - - @print_mapping_info - def Size(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - self.paddle_graph.add_layer( - "paddle.shape", inputs={"input": val_x.name}, outputs=[node.name]) - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": node.name}, - outputs=[node.name], - dtype=string('int64')) - self.paddle_graph.add_layer( - "paddle.prod", inputs={"x": node.name}, outputs=[node.name]) - - @print_mapping_info - def Sign(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - if node.dtype not in ["float16", "float32", "float64"]: - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": val_x.name}, - outputs=[val_x.name], - dtype=string("float32")) - self.paddle_graph.add_layer( - "paddle.sign", inputs={"x": val_x.name}, outputs=[node.name]) - if node.dtype not in ["float16", "float32", "float64"]: - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": node.name}, - outputs=[node.name], - dtype=string(node.dtype)) - - @print_mapping_info - def OneHot(self, node): - nn_op_name = name_generator("onehot", self.nn_name2id) - output_name = node.name - layer_outputs = [nn_op_name, output_name] - indices = self.graph.get_input_node(node, idx=0, copy=True) - depth = self.graph.get_input_node(node, idx=1, copy=True) - values = self.graph.get_input_node(node, idx=2, copy=True) - axis = node.get_attr('axis', -1) - self.paddle_graph.add_layer( - "custom_layer:OneHot", - inputs={ - "indices": indices.name, - "depth": depth.name, - "values": values.name - }, - outputs=layer_outputs, - axis=axis) - - @print_mapping_info - def Reciprocal(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - self.paddle_graph.add_layer( - "paddle.reciprocal", inputs={"x": val_x.name}, outputs=[node.name]) - - @print_mapping_info - def LSTM(self, node): - x = self.graph.get_input_node(node, idx=0, copy=True) - input_weight = self.graph.get_input_node(node, idx=1, copy=True) - hidden_weight = self.graph.get_input_node(node, idx=2, copy=True) - - input_nums = len(node.layer.input) - exist_input_nums = 3 - have_bias = False - if input_nums > 3 and node.layer.input[3] != '': - bias = self.graph.get_input_node( - node, idx=exist_input_nums, copy=True) - have_bias = True - exist_input_nums += 1 - if input_nums > 4 and node.layer.input[4] != '': - sequence_lens = self.graph.get_input_node( - node, idx=exist_input_nums, copy=True) - exist_input_nums += 1 - if input_nums > 5 and node.layer.input[5] != '': - init_h = self.graph.get_input_node( - node, idx=exist_input_nums, copy=True) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": init_h.name}, - outputs=[init_h.name], - shape=init_h.out_shapes[0]) - exist_input_nums += 1 - if input_nums > 6 and node.layer.input[6] != '': - init_c = self.graph.get_input_node( - node, idx=exist_input_nums, copy=True) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": init_c.name}, - outputs=[init_c.name], - shape=init_c.out_shapes[0]) - - input_weight_np = _const_weight_or_none(input_weight) - _rename_or_remove_weight(self.weights, input_weight.name) - hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1] / 4) - input_size = input_weight_np.shape[2] - hidden_weight_np = _const_weight_or_none(hidden_weight) - _rename_or_remove_weight(self.weights, hidden_weight.name) - bias_np = _const_weight_or_none(bias) - _rename_or_remove_weight(self.weights, bias.name) - input_bias_np = bias_np[:, :4 * hidden_size] - hidden_bias_np = bias_np[:, 4 * hidden_size:] - - # parameters order in paddle:lstm: - # 1. gate order in paddle is: input, forget, cell, output. - # 2. gate orfer in onnx is: input, output, forget, cell. - - def reform_weights(w, n, intervals): - slices = [w[:, x * n:y * n] for x, y in intervals] - return np.concatenate(slices, axis=1) - - def transform_weight_with_bias(weights, n, intervals): - return [reform_weights(w, n, intervals) for w in weights] - - reform_permutation = [(0, 1), (2, 4), (1, 2)] - - weights = transform_weight_with_bias( - [input_weight_np, hidden_weight_np, input_bias_np, hidden_bias_np], - hidden_size, reform_permutation) - - op_name = name_generator("lstm", self.nn_name2id) - y_out = node.output(0) - yh_out = node.output(1) - yc_out = node.output(2) - direction = node.get_attr('direction', 'forward') - - def generate_paddle_param_names(op_name, suffix=''): - param_names = [] - param_names.extend(['{}.weight_ih_l0{}', '{}.weight_hh_l0{}']) - if have_bias != False: param_names.append('{}.bias_ih_l0{}') - if have_bias != False: param_names.append('{}.bias_hh_l0{}') - param_names = [x.format(op_name, suffix) for x in param_names] - return param_names - - def assign_params(op_name, weights, weight_idx=0, suffix=''): - param_names = generate_paddle_param_names(op_name, suffix) - for param_name, weight in zip(param_names, weights): - self.weights[param_name] = weight[weight_idx] - - if direction == 'backward': - raise Exception( - "LSTM support 'forward' or 'bidirectional', except '{}'.". - format(direction)) - else: - assign_params(op_name, weights) - if direction == 'bidirectional': - assign_params(op_name, weights, 1, '_reverse') - - self.paddle_graph.add_layer( - 'paddle.nn.LSTM', - inputs={ - 'input': x.name, - 'initial_states': (init_h.name, init_c.name) - }, - outputs=[op_name, y_out, yh_out, yc_out], - input_size=input_size, - hidden_size=hidden_size, - num_layers=1, - direction=string(direction), - time_major=True) - - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": y_out}, - outputs=[y_out], - shape=[0, 0, -1, hidden_size]) - self.paddle_graph.add_layer( - 'paddle.transpose', - inputs={"x": y_out}, - outputs=[y_out], - perm=[0, 2, 1, 3]) - @print_mapping_info - def TopK(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - val_k = self.graph.get_input_node(node, idx=1, copy=True) - layer_attrs = dict() - layer_attrs["axis"] = node.get_attr('axis', -1) - layer_attrs["largest"] = True if node.get_attr('largest', - 1) == 1 else False - layer_attrs["sorted"] = True if node.get_attr('sorted', - 1) == 1 else False - k = _const_weight_or_none(val_k) - if isinstance(k, (list, tuple, np.ndarray)): - k = k[0] - # If k can get the value directly, it is used as an attribute; otherwise it is used as an input tensor - if k is not None: - layer_attrs["k"] = k - self.paddle_graph.add_layer( - "paddle.topk", - inputs={"x": val_x.name}, - outputs=[ - "{}_p{}".format(node.layer_name, 0), - "{}_p{}".format(node.layer_name, 1) - ], - **layer_attrs) - else: - if val_k.dtype != "int32": - self.paddle_graph.add_layer( - "paddle.cast", - inputs={"x": val_k.name}, - outputs=[val_k.name], - dtype=string('int32')) - self.paddle_graph.add_layer( - "paddle.topk", - inputs={"x": val_x.name, - "k": val_k.name}, - outputs=[ - "{}_p{}".format(node.layer_name, 0), - "{}_p{}".format(node.layer_name, 1) - ], - **layer_attrs) + min_opset_versions = list() + for op_name in config["op_names"]: + min_opset_versions.append(min_opset_version_map[op_name]) + config["min_opset_version"] = min_opset_versions - @print_mapping_info - def LRN(self, node): - op_name = name_generator("lrn", self.nn_name2id) - output_name = node.name - layer_outputs = [op_name, output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - alpha = node.get_attr('alpha', 0.0001) - beta = node.get_attr('beta', 0.75) - bias = node.get_attr('bias', 1.0) - size = node.get_attr('size') - layer_attrs = {'size': size, 'alpha': alpha, 'beta': beta, 'k': bias} - self.paddle_graph.add_layer( - "paddle.nn.LocalResponseNorm", - inputs={"x": val_x.name}, - outputs=layer_outputs, - **layer_attrs) + attrs = {} - @print_mapping_info - def DepthToSpace(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - blocksize = node.get_attr('blocksize') - mode = node.get_attr('mode', "DCR") - val_x_shape = val_x.out_shapes[0] - b, c, h, w = val_x_shape - if mode == "DCR": - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=[b, blocksize, blocksize, c // (blocksize**2), h, w]) - self.paddle_graph.add_layer( - 'paddle.transpose', - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 3, 4, 1, 5, 2]) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": node.name}, - outputs=[node.name], - shape=[b, c // (blocksize**2), h * blocksize, w * blocksize]) - else: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=[b, c // (blocksize**2), blocksize, blocksize, h, w]) - self.paddle_graph.add_layer( - 'paddle.transpose', - inputs={"x": node.name}, - outputs=[node.name], - perm=[0, 1, 4, 2, 5, 3]) - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": node.name}, - outputs=[node.name], - shape=[b, c // (blocksize**2), h * blocksize, w * blocksize]) + return (config, attrs) - @print_mapping_info - def NonMaxSuppression(self, node): - nn_op_name = name_generator("nms", self.nn_name2id) - output_name = node.name - layer_outputs = [nn_op_name, output_name] - boxes = self.graph.get_input_node(node, idx=0, copy=True) - scores = self.graph.get_input_node(node, idx=1, copy=True) - inputs_len = len(node.layer.input) - layer_attrs = dict() - layer_attrs["keep_top_k"] = -1 - layer_attrs["nms_threshold"] = 0.0 - layer_attrs["score_threshold"] = 0.0 - if inputs_len > 2: - max_output_boxes_per_class = self.graph.get_input_node( - node, idx=2, copy=True) - max_output_boxes_per_class = _const_weight_or_none( - max_output_boxes_per_class) - if len(scores.out_shapes[0]) != 0: - num_classes = scores.out_shapes[0][1] - else: - num_classes = 1 - if max_output_boxes_per_class is not None: - max_output_boxes_per_class = max_output_boxes_per_class.tolist() - if isinstance(max_output_boxes_per_class, int): - layer_attrs[ - "keep_top_k"] = max_output_boxes_per_class * num_classes - else: - layer_attrs["keep_top_k"] = max_output_boxes_per_class[ - 0] * num_classes - if inputs_len > 3: - iou_threshold = self.graph.get_input_node(node, idx=3, copy=True) - layer_attrs["nms_threshold"] = _const_weight_or_none( - iou_threshold).tolist()[0] - if inputs_len > 4: - score_threshold = self.graph.get_input_node(node, idx=4, copy=True) - layer_attrs["score_threshold"] = _const_weight_or_none( - score_threshold).tolist()[0] - self.paddle_graph.add_layer( - "custom_layer:NMS", - inputs={"bboxes": boxes.name, - "scores": scores.name}, - outputs=layer_outputs, - **layer_attrs) + def test(self): + self.run_and_statis(max_examples=50) - @print_mapping_info - def ReduceL1(self, node): - output_name = node.name - layer_outputs = [output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axes = node.get_attr('axes') - keepdims = False if node.get_attr('keepdims') == 0 else True - layer_attrs = {'p': 1, 'axis': axes, 'keepdim': keepdims} - self.paddle_graph.add_layer( - "paddle.norm", - inputs={"x": val_x.name}, - outputs=layer_outputs, - **layer_attrs) - @print_mapping_info - def ReduceL2(self, node): - output_name = node.name - layer_outputs = [output_name] - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axes = node.get_attr('axes') - keepdims = False if node.get_attr('keepdims') == 0 else True - layer_attrs = {'p': 2, 'axis': axes, 'keepdim': keepdims} - self.paddle_graph.add_layer( - "paddle.norm", - inputs={"x": val_x.name}, - outputs=layer_outputs, - **layer_attrs) +if __name__ == "__main__": + unittest.main() From 63e2f4f369b368f50f98663c557e75b1e9f6c086 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:22:55 +0800 Subject: [PATCH 016/101] Delete test_auto_scan_one_input_ops_float32.py --- .../test_auto_scan_one_input_ops_float32.py | 85 ------------------- 1 file changed, 85 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_one_input_ops_float32.py diff --git a/tests/onnx/test_auto_scan_one_input_ops_float32.py b/tests/onnx/test_auto_scan_one_input_ops_float32.py deleted file mode 100644 index 97e1f697a..000000000 --- a/tests/onnx/test_auto_scan_one_input_ops_float32.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - -min_opset_version_map = { - "IsInf": 10, - "Elu": 7, - "IsNaN": 9, - "Log": 7, - "Cosh": 9, - "Cos": 7, - "Atan": 7, - "Asinh": 9, - "Asin": 7, - "Acosh": 9, - "Acos": 7, - "Exp": 7, - "Floor": 7 -} - - -class TestIsinfConcert(OPConvertAutoScanTest): - """ - ONNX op: elementwise ops - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - - config = { - "op_names": [ - "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", - "Cosh", "Exp", "Floor" - ], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - min_opset_versions = list() - for op_name in config["op_names"]: - min_opset_versions.append(min_opset_version_map[op_name]) - config["min_opset_version"] = min_opset_versions - - attrs = {} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From 4e3bd88bf965bc6a9c1a89f744cb37fd6d44f38f Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:23:27 +0800 Subject: [PATCH 017/101] Create test_auto_scan_one_input_ops_float32.py --- .../test_auto_scan_one_input_ops_float32.py | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 tests/onnx/test_auto_scan_one_input_ops_float32.py diff --git a/tests/onnx/test_auto_scan_one_input_ops_float32.py b/tests/onnx/test_auto_scan_one_input_ops_float32.py new file mode 100644 index 000000000..97e1f697a --- /dev/null +++ b/tests/onnx/test_auto_scan_one_input_ops_float32.py @@ -0,0 +1,85 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + +min_opset_version_map = { + "IsInf": 10, + "Elu": 7, + "IsNaN": 9, + "Log": 7, + "Cosh": 9, + "Cos": 7, + "Atan": 7, + "Asinh": 9, + "Asin": 7, + "Acosh": 9, + "Acos": 7, + "Exp": 7, + "Floor": 7 +} + + +class TestIsinfConcert(OPConvertAutoScanTest): + """ + ONNX op: elementwise ops + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + + config = { + "op_names": [ + "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", + "Cosh", "Exp", "Floor" + ], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + min_opset_versions = list() + for op_name in config["op_names"]: + min_opset_versions.append(min_opset_version_map[op_name]) + config["min_opset_version"] = min_opset_versions + + attrs = {} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From f93208c6d57ae8956379e506b919a7162a040dc7 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 2 Aug 2022 19:54:10 +0800 Subject: [PATCH 018/101] Update opset_legacy.py --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index 055749bc4..fbad2eed3 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -194,6 +194,14 @@ def __init__(self, decoder, paddle_graph): 'Erf': ['paddle.erf'], 'Sin': ['paddle.sin'], 'Cos': ['paddle.cos'], + 'Atan': ['paddle.atan'], + 'Acos': ['paddle.acos'], + 'Asin': ['paddle.asin'], + 'IsInf':['paddle.isinf'], + 'IsNaN':['paddle.isnan'], + 'Cosh': ['paddle.cosh'], + 'Acosh': ['paddle.acosh'], + 'Asinh': ['paddle.asinh'], } @print_mapping_info From 6b7ae0894bb0d2797f3e78e57f407a75e218659f Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 15:17:21 +0800 Subject: [PATCH 019/101] test --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index fbad2eed3..f65a9fb87 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -197,11 +197,12 @@ def __init__(self, decoder, paddle_graph): 'Atan': ['paddle.atan'], 'Acos': ['paddle.acos'], 'Asin': ['paddle.asin'], - 'IsInf':['paddle.isinf'], - 'IsNaN':['paddle.isnan'], + 'IsInf': ['paddle.isinf'], + 'IsNaN': ['paddle.isnan'], 'Cosh': ['paddle.cosh'], 'Acosh': ['paddle.acosh'], 'Asinh': ['paddle.asinh'], + 'Tan': ['paddle.tan'], } @print_mapping_info @@ -1634,7 +1635,7 @@ def Gemm(self, node): val_a = self.graph.get_input_node(node, idx=0, copy=True) val_b = self.graph.get_input_node(node, idx=1, copy=True) val_c = self.graph.get_input_node(node, idx=2, copy=True) - + alpha = node.get_attr('alpha', 1.) # optional beta = node.get_attr('beta', 1.) # optional trans_a = bool(node.get_attr('transA', 0)) # optional From d359f2ce4bd6ccc1ef7368c2e362d9284dc091af Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 15:20:44 +0800 Subject: [PATCH 020/101] add test of Tan --- tests/onnx/test_auto_scan_one_input_ops_float32.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/onnx/test_auto_scan_one_input_ops_float32.py b/tests/onnx/test_auto_scan_one_input_ops_float32.py index 97e1f697a..15762edda 100644 --- a/tests/onnx/test_auto_scan_one_input_ops_float32.py +++ b/tests/onnx/test_auto_scan_one_input_ops_float32.py @@ -35,7 +35,8 @@ "Acosh": 9, "Acos": 7, "Exp": 7, - "Floor": 7 + "Floor": 7, + "Tan": 7 } @@ -56,7 +57,7 @@ def sample_convert_config(self, draw): config = { "op_names": [ "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", - "Cosh", "Exp", "Floor" + "Cosh", "Exp", "Floor", "Tan" ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 2383ae7d19e50b4759ee605153741bf19955fa7d Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 16:05:31 +0800 Subject: [PATCH 021/101] rename --- tests/onnx/test_auto_scan_unarray_ops.py | 86 ++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 tests/onnx/test_auto_scan_unarray_ops.py diff --git a/tests/onnx/test_auto_scan_unarray_ops.py b/tests/onnx/test_auto_scan_unarray_ops.py new file mode 100644 index 000000000..ebfc53a2a --- /dev/null +++ b/tests/onnx/test_auto_scan_unarray_ops.py @@ -0,0 +1,86 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + +min_opset_version_map = { + "IsInf": 10, + "Elu": 7, + "IsNaN": 9, + "Log": 7, + "Cosh": 9, + "Cos": 7, + "Atan": 7, + "Asinh": 9, + "Asin": 7, + "Acosh": 9, + "Acos": 7, + "Exp": 7, + "Floor": 7, + "Tan": 7, +} + + +class TestUnarrayOpsConcert(OPConvertAutoScanTest): + """ + ONNX op: unarray ops + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + + config = { + "op_names": [ + "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", + "Cosh", "Exp", "Floor", "Tan" + ], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + min_opset_versions = list() + for op_name in config["op_names"]: + min_opset_versions.append(min_opset_version_map[op_name]) + config["min_opset_version"] = min_opset_versions + + attrs = {} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From 0e0e2877548bd9d39032269f23c8a58be85244bd Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 16:08:02 +0800 Subject: [PATCH 022/101] add ops in directly map --- x2paddle/op_mapper/onnx2paddle/opset7.py | 66 ++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 444446519..e3cbe1167 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -13,6 +13,7 @@ # limitations under the License. from .opset_legacy import OpSet +import sys def print_mapping_info(func): @@ -32,6 +33,71 @@ def run_mapping(*args, **kwargs): class OpSet7(OpSet): def __init__(self, decoder, paddle_graph): super(OpSet7, self).__init__(decoder, paddle_graph) + self.directly_map_ops = { + 'Ceil': ['paddle.ceil'], + # reduce function + 'ReduceMean': [ + 'paddle.mean', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdims=True) + ], + 'ReduceMin': [ + 'paddle.min', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdim=True) + ], + 'ReduceMax': [ + 'paddle.max', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdim=True) + ], + 'ReduceProd': [ + 'paddle.prod', dict( + axes='axis', keepdims='keepdim'), dict( + axes=None, keepdim=True) + ], + # active function + 'Relu': ['paddle.nn.ReLU'], + 'LeakyRelu': [ + 'paddle.nn.LeakyReLU', dict(alpha='negative_slope'), + dict(negative_slope=.01) + ], + 'Elu': + ['paddle.nn.functional.elu', dict(alpha='alpha'), dict(alpha=1.)], + 'ThresholdedRelu': [ + 'paddle.nn.functional.thresholded_relu', + dict(alpha='threshold'), dict(alpha=1.) + ], + 'Tanh': ['paddle.nn.Tanh'], + 'Sigmoid': ['paddle.nn.Sigmoid'], + 'Softsign': ['paddle.nn.Softsign'], + 'Softplus': [ + 'paddle.nn.Softplus', dict(threshold='threshold'), + dict(threshold=float(sys.maxsize)) + ], + 'Exp': ['paddle.exp'], + 'Log': ['paddle.log'], + 'LogSoftmax': [ + 'paddle.nn.functional.log_softmax', dict(axis='axis'), + dict(axis=1) + ], + 'Softmax': ['paddle.nn.Softmax', dict(axis='axis'), dict(axis=1)], + 'Sqrt': ['paddle.sqrt'], + 'Floor': ['paddle.floor'], + 'Abs': ['paddle.abs'], + 'Erf': ['paddle.erf'], + 'Sin': ['paddle.sin'], + 'Cos': ['paddle.cos'], + 'Atan': ['paddle.atan'], + 'Acos': ['paddle.acos'], + 'Asin': ['paddle.asin'], + 'IsInf': ['paddle.isinf'], + 'IsNaN': ['paddle.isnan'], + 'Cosh': ['paddle.cosh'], + 'Acosh': ['paddle.acosh'], + 'Asinh': ['paddle.asinh'], + 'Tan': ['paddle.tan'], + } @print_mapping_info def Unsqueeze(self, node): From 4e75ad1f328cbeadef2cc8d585d711f1c7d1dc0a Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 16:09:41 +0800 Subject: [PATCH 023/101] keep op_legacy --- x2paddle/op_mapper/onnx2paddle/opset_legacy.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index f65a9fb87..808dd39fb 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -194,15 +194,6 @@ def __init__(self, decoder, paddle_graph): 'Erf': ['paddle.erf'], 'Sin': ['paddle.sin'], 'Cos': ['paddle.cos'], - 'Atan': ['paddle.atan'], - 'Acos': ['paddle.acos'], - 'Asin': ['paddle.asin'], - 'IsInf': ['paddle.isinf'], - 'IsNaN': ['paddle.isnan'], - 'Cosh': ['paddle.cosh'], - 'Acosh': ['paddle.acosh'], - 'Asinh': ['paddle.asinh'], - 'Tan': ['paddle.tan'], } @print_mapping_info From dfb332d6485f1af805069d9401fa1146868f0092 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Wed, 3 Aug 2022 16:13:13 +0800 Subject: [PATCH 024/101] Delete test_auto_scan_one_input_ops_float32.py --- .../test_auto_scan_one_input_ops_float32.py | 86 ------------------- 1 file changed, 86 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_one_input_ops_float32.py diff --git a/tests/onnx/test_auto_scan_one_input_ops_float32.py b/tests/onnx/test_auto_scan_one_input_ops_float32.py deleted file mode 100644 index 15762edda..000000000 --- a/tests/onnx/test_auto_scan_one_input_ops_float32.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - -min_opset_version_map = { - "IsInf": 10, - "Elu": 7, - "IsNaN": 9, - "Log": 7, - "Cosh": 9, - "Cos": 7, - "Atan": 7, - "Asinh": 9, - "Asin": 7, - "Acosh": 9, - "Acos": 7, - "Exp": 7, - "Floor": 7, - "Tan": 7 -} - - -class TestIsinfConcert(OPConvertAutoScanTest): - """ - ONNX op: elementwise ops - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - - config = { - "op_names": [ - "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", - "Cosh", "Exp", "Floor", "Tan" - ], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - min_opset_versions = list() - for op_name in config["op_names"]: - min_opset_versions.append(min_opset_version_map[op_name]) - config["min_opset_version"] = min_opset_versions - - attrs = {} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From 9e412cf152970bd817b4b4a56bf8b81ab12787a2 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 20:45:08 +0800 Subject: [PATCH 025/101] modify the name --- tests/onnx/test_auto_scan_unary_ops.py | 83 ++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 tests/onnx/test_auto_scan_unary_ops.py diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py new file mode 100644 index 000000000..e7f8207f3 --- /dev/null +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -0,0 +1,83 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + +min_opset_version_map = { + "Log": 7, + "Cosh": 9, + "Cos": 7, + "Atan": 7, + "Asinh": 9, + "Asin": 7, + "Acosh": 9, + "Acos": 7, + "Exp": 7, + "Floor": 7, + "Tan": 7, +} + + +class TestUnaryopsConcert(OPConvertAutoScanTest): + """ + ONNX op: unary ops + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + + config = { + "op_names": [ + "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + "Exp", "Floor", "Tan" + ], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + min_opset_versions = list() + for op_name in config["op_names"]: + min_opset_versions.append(min_opset_version_map[op_name]) + config["min_opset_version"] = min_opset_versions + + attrs = {} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From fff2ce48b599d4a51e7ca26830adf4b838e81226 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 3 Aug 2022 20:52:44 +0800 Subject: [PATCH 026/101] fix --- x2paddle/op_mapper/onnx2paddle/opset7.py | 56 +----------------------- 1 file changed, 1 insertion(+), 55 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index e3cbe1167..65f251701 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -13,7 +13,6 @@ # limitations under the License. from .opset_legacy import OpSet -import sys def print_mapping_info(func): @@ -33,61 +32,8 @@ def run_mapping(*args, **kwargs): class OpSet7(OpSet): def __init__(self, decoder, paddle_graph): super(OpSet7, self).__init__(decoder, paddle_graph) + self.directly_map_ops.update self.directly_map_ops = { - 'Ceil': ['paddle.ceil'], - # reduce function - 'ReduceMean': [ - 'paddle.mean', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdims=True) - ], - 'ReduceMin': [ - 'paddle.min', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdim=True) - ], - 'ReduceMax': [ - 'paddle.max', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdim=True) - ], - 'ReduceProd': [ - 'paddle.prod', dict( - axes='axis', keepdims='keepdim'), dict( - axes=None, keepdim=True) - ], - # active function - 'Relu': ['paddle.nn.ReLU'], - 'LeakyRelu': [ - 'paddle.nn.LeakyReLU', dict(alpha='negative_slope'), - dict(negative_slope=.01) - ], - 'Elu': - ['paddle.nn.functional.elu', dict(alpha='alpha'), dict(alpha=1.)], - 'ThresholdedRelu': [ - 'paddle.nn.functional.thresholded_relu', - dict(alpha='threshold'), dict(alpha=1.) - ], - 'Tanh': ['paddle.nn.Tanh'], - 'Sigmoid': ['paddle.nn.Sigmoid'], - 'Softsign': ['paddle.nn.Softsign'], - 'Softplus': [ - 'paddle.nn.Softplus', dict(threshold='threshold'), - dict(threshold=float(sys.maxsize)) - ], - 'Exp': ['paddle.exp'], - 'Log': ['paddle.log'], - 'LogSoftmax': [ - 'paddle.nn.functional.log_softmax', dict(axis='axis'), - dict(axis=1) - ], - 'Softmax': ['paddle.nn.Softmax', dict(axis='axis'), dict(axis=1)], - 'Sqrt': ['paddle.sqrt'], - 'Floor': ['paddle.floor'], - 'Abs': ['paddle.abs'], - 'Erf': ['paddle.erf'], - 'Sin': ['paddle.sin'], - 'Cos': ['paddle.cos'], 'Atan': ['paddle.atan'], 'Acos': ['paddle.acos'], 'Asin': ['paddle.asin'], From 86b8736a0a3430efde2b78106713d5dacefcde1c Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 10:59:38 +0800 Subject: [PATCH 027/101] add ceil op --- tests/onnx/test_auto_scan_unary_ops.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index e7f8207f3..a4de031ca 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -34,6 +34,7 @@ "Exp": 7, "Floor": 7, "Tan": 7, + "Ceil": 7 } @@ -52,10 +53,7 @@ def sample_convert_config(self, draw): input_dtype = draw(st.sampled_from(["float32"])) config = { - "op_names": [ - "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan" - ], + "op_names": ["Ceil"], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From 90441affe9b42e818f1cd58f7cffa719ed4dd24e Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 11:01:03 +0800 Subject: [PATCH 028/101] modify opset7 --- x2paddle/op_mapper/onnx2paddle/opset7.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 65f251701..978757a21 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -32,8 +32,7 @@ def run_mapping(*args, **kwargs): class OpSet7(OpSet): def __init__(self, decoder, paddle_graph): super(OpSet7, self).__init__(decoder, paddle_graph) - self.directly_map_ops.update - self.directly_map_ops = { + self.directly_map_ops.update({ 'Atan': ['paddle.atan'], 'Acos': ['paddle.acos'], 'Asin': ['paddle.asin'], @@ -43,7 +42,7 @@ def __init__(self, decoder, paddle_graph): 'Acosh': ['paddle.acosh'], 'Asinh': ['paddle.asinh'], 'Tan': ['paddle.tan'], - } + }) @print_mapping_info def Unsqueeze(self, node): From 10e142c71a2a1efbe16dbd7491db4efb26ba0c98 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 11:02:03 +0800 Subject: [PATCH 029/101] Delete test_auto_scan_unarray_ops.py --- tests/onnx/test_auto_scan_unarray_ops.py | 86 ------------------------ 1 file changed, 86 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_unarray_ops.py diff --git a/tests/onnx/test_auto_scan_unarray_ops.py b/tests/onnx/test_auto_scan_unarray_ops.py deleted file mode 100644 index ebfc53a2a..000000000 --- a/tests/onnx/test_auto_scan_unarray_ops.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - -min_opset_version_map = { - "IsInf": 10, - "Elu": 7, - "IsNaN": 9, - "Log": 7, - "Cosh": 9, - "Cos": 7, - "Atan": 7, - "Asinh": 9, - "Asin": 7, - "Acosh": 9, - "Acos": 7, - "Exp": 7, - "Floor": 7, - "Tan": 7, -} - - -class TestUnarrayOpsConcert(OPConvertAutoScanTest): - """ - ONNX op: unarray ops - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - - config = { - "op_names": [ - "Elu", "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", - "Cosh", "Exp", "Floor", "Tan" - ], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - min_opset_versions = list() - for op_name in config["op_names"]: - min_opset_versions.append(min_opset_version_map[op_name]) - config["min_opset_version"] = min_opset_versions - - attrs = {} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From 72a69ec1e5a705f7b8d436503b3f7ac5318603e3 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 11:46:13 +0800 Subject: [PATCH 030/101] add test of elu --- tests/onnx/test_auto_scan_elu.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/onnx/test_auto_scan_elu.py diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py new file mode 100644 index 000000000..e69de29bb From 4f88ea34056defa234260ba3a2551ad149ae97a6 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 15:00:05 +0800 Subject: [PATCH 031/101] Update test_auto_scan_unary_ops.py --- tests/onnx/test_auto_scan_unary_ops.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index a4de031ca..d51e85883 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -53,7 +53,10 @@ def sample_convert_config(self, draw): input_dtype = draw(st.sampled_from(["float32"])) config = { - "op_names": ["Ceil"], + "op_names": [ + "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + "Exp", "Floor", "Tan" + ] "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From b83c14506175ebf8a8e36f5d5f4db73ac34ea51c Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 15:01:36 +0800 Subject: [PATCH 032/101] Update test_auto_scan_elu.py --- tests/onnx/test_auto_scan_elu.py | 64 ++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index e69de29bb..8cdab369c 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + + +class TestEluConvert(OPConvertAutoScanTest): + """ + ONNX op: Elu + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + for i in range(2): + alpha = random.random() + + config = { + "op_names": ["Elu"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {"alpha": alpha} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From 3cee6e31ea5b25ddd0ba5d2afe6bcb7aa60493d0 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 15:04:09 +0800 Subject: [PATCH 033/101] Update test_auto_scan_elu.py --- tests/onnx/test_auto_scan_elu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index 8cdab369c..74fb119d8 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -24,7 +24,7 @@ class TestEluConvert(OPConvertAutoScanTest): - """ + """ ONNX op: Elu OPset version: 7~15 """ From c74102ae9c7fb5616cc3c82b1471e555319d0bed Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:09:45 +0800 Subject: [PATCH 034/101] fixx --- tests/onnx/test_auto_scan_elu.py | 60 ++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index e69de29bb..3d0ae8c16 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + + +class TestEluConvert(OPConvertAutoScanTest): + """ + ONNX op: Elu + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + for i in range(2): + alpha = random.random() + + config = { + "op_names": ["Elu"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {"alpha": alpha} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) From 806aed495d0388213e6b8fdbcdaa19b1adbae7f7 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:17:42 +0800 Subject: [PATCH 035/101] test --- tests/onnx/test_auto_scan_unary_ops.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index a4de031ca..8f8c27f96 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -53,7 +53,10 @@ def sample_convert_config(self, draw): input_dtype = draw(st.sampled_from(["float32"])) config = { - "op_names": ["Ceil"], + "op_names": [ + "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + "Exp", "Floor", "Tan" + ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From c7d73223961b8ceaf3fcaee45bde030abe56290b Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:18:21 +0800 Subject: [PATCH 036/101] test --- tests/onnx/test_auto_scan_unary_ops.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 8f8c27f96..91ff503b5 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -34,7 +34,8 @@ "Exp": 7, "Floor": 7, "Tan": 7, - "Ceil": 7 + "Ceil": 7, + "Erf": 9, } @@ -55,7 +56,7 @@ def sample_convert_config(self, draw): config = { "op_names": [ "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan" + "Exp", "Floor", "Tan", "Erf" ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From aa30f6aa35b7942acfcc6133cd2e37f428c77ed6 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:24:11 +0800 Subject: [PATCH 037/101] test --- tests/onnx/test_auto_scan_elu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index 3d0ae8c16..f5d1d2ed4 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -25,7 +25,7 @@ class TestEluConvert(OPConvertAutoScanTest): """ - ONNX op: Elu + ONNX op: Elu OPset version: 7~15 """ From 019f43c9bb4a972666e4ca0cd0aa6995fb879e9c Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:24:30 +0800 Subject: [PATCH 038/101] test --- tests/onnx/test_auto_scan_elu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index f5d1d2ed4..3d0ae8c16 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -25,7 +25,7 @@ class TestEluConvert(OPConvertAutoScanTest): """ - ONNX op: Elu + ONNX op: Elu OPset version: 7~15 """ From 1636677588d0c346ce2165cc5563f4954f313ce7 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 15:26:10 +0800 Subject: [PATCH 039/101] add ref --- tests/onnx/test_auto_scan_unary_ops.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index d51e85883..91ff503b5 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -34,7 +34,8 @@ "Exp": 7, "Floor": 7, "Tan": 7, - "Ceil": 7 + "Ceil": 7, + "Erf": 9, } @@ -54,9 +55,9 @@ def sample_convert_config(self, draw): config = { "op_names": [ - "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan" - ] + "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + "Exp", "Floor", "Tan", "Erf" + ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From 2bb1c0610ed1631c75cf8730883d4713429e9414 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:53:32 +0800 Subject: [PATCH 040/101] add sinh + tanh --- tests/onnx/test_auto_scan_unary_ops.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 91ff503b5..706b51b61 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -36,6 +36,9 @@ "Tan": 7, "Ceil": 7, "Erf": 9, + "Sin": 7, + "Sinh": 9, + "Tanh": 7, } @@ -56,7 +59,7 @@ def sample_convert_config(self, draw): config = { "op_names": [ "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan", "Erf" + "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh" ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 70e11e8f1c4c29c579f5ff1d3c5d04f2f197f00d Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:57:41 +0800 Subject: [PATCH 041/101] add sinh + tanh --- tests/onnx/test_auto_scan_unary_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 706b51b61..4233a6a60 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -59,7 +59,7 @@ def sample_convert_config(self, draw): config = { "op_names": [ "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh" + "Exp", "Floor", "Tan", "Erf" ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 100c9ffbd8680a469b8eb3a2f7c321ec0f8fb7d8 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 15:59:55 +0800 Subject: [PATCH 042/101] add sinh + tanh --- tests/onnx/test_auto_scan_unary_ops.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 4233a6a60..71d3b79cd 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -58,8 +58,21 @@ def sample_convert_config(self, draw): config = { "op_names": [ - "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan", "Erf" + "Log", + "Cos", + "Atan", + "Asinh", + "Asin", + "Acosh", + "Acos", + "Cosh", + "Exp", + "Floor", + "Tan", + "Erf", + "Sin", + "Sinh", + "Tanh", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 48b899ee57edfe29cb3c876bdae955428e435e12 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 16:00:23 +0800 Subject: [PATCH 043/101] add sin Sinh tang --- tests/onnx/test_auto_scan_unary_ops.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 91ff503b5..71d3b79cd 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -36,6 +36,9 @@ "Tan": 7, "Ceil": 7, "Erf": 9, + "Sin": 7, + "Sinh": 9, + "Tanh": 7, } @@ -55,8 +58,21 @@ def sample_convert_config(self, draw): config = { "op_names": [ - "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan", "Erf" + "Log", + "Cos", + "Atan", + "Asinh", + "Asin", + "Acosh", + "Acos", + "Cosh", + "Exp", + "Floor", + "Tan", + "Erf", + "Sin", + "Sinh", + "Tanh", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From cda4839bd7b40c20731ec9ca88e861a2fc790ce0 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 16:02:25 +0800 Subject: [PATCH 044/101] Update opset7.py --- x2paddle/op_mapper/onnx2paddle/opset7.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 978757a21..d672c6558 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -36,11 +36,6 @@ def __init__(self, decoder, paddle_graph): 'Atan': ['paddle.atan'], 'Acos': ['paddle.acos'], 'Asin': ['paddle.asin'], - 'IsInf': ['paddle.isinf'], - 'IsNaN': ['paddle.isnan'], - 'Cosh': ['paddle.cosh'], - 'Acosh': ['paddle.acosh'], - 'Asinh': ['paddle.asinh'], 'Tan': ['paddle.tan'], }) From f41ee7259736aafc48ac4fed60c7882f6450ecb5 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 16:03:26 +0800 Subject: [PATCH 045/101] add :cosh,acosh,sinh,acosh --- x2paddle/op_mapper/onnx2paddle/opset9.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9.py b/x2paddle/op_mapper/onnx2paddle/opset9.py index 846e84719..5d8b58ffd 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset9.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9.py @@ -32,3 +32,10 @@ def run_mapping(*args, **kwargs): class OpSet9(OpSet8): def __init__(self, decoder, paddle_graph): super(OpSet9, self).__init__(decoder, paddle_graph) + self.directly_map_ops.update({ + 'Cosh': ['paddle.cosh'], + 'Sinh': ['paddle.sinh'], + 'Acosh': ['paddle.acosh'], + 'Asinh': ['paddle.asinh'], + }) + From 0ef8816424a9deb7e89894ad815ddd86f1b48b8c Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 16:31:51 +0800 Subject: [PATCH 046/101] try --- tests/onnx/test_auto_scan_unary_ops.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 71d3b79cd..7c9777030 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -39,6 +39,7 @@ "Sin": 7, "Sinh": 9, "Tanh": 7, + "Atanh": 9, } @@ -58,21 +59,8 @@ def sample_convert_config(self, draw): config = { "op_names": [ - "Log", - "Cos", - "Atan", - "Asinh", - "Asin", - "Acosh", - "Acos", - "Cosh", - "Exp", - "Floor", - "Tan", - "Erf", - "Sin", - "Sinh", - "Tanh", + "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh", "Atanh" ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From a43dd0895f57607dc00bd048d3e638f093e5db65 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 16:35:17 +0800 Subject: [PATCH 047/101] try --- x2paddle/op_mapper/onnx2paddle/opset9.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9.py b/x2paddle/op_mapper/onnx2paddle/opset9.py index 846e84719..1c3034cf9 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset9.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9.py @@ -32,3 +32,10 @@ def run_mapping(*args, **kwargs): class OpSet9(OpSet8): def __init__(self, decoder, paddle_graph): super(OpSet9, self).__init__(decoder, paddle_graph) + self.directly_map_ops.update({ + 'Cosh': ['paddle.cosh'], + 'Sinh': ['paddle.sinh'], + 'Acosh': ['paddle.acosh'], + 'Asinh': ['paddle.asinh'], + 'Atanh': ['paddle.atanh'], + }) From 9433d2e2cc4079a2f0c00e26a76dea30cd0b13f4 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 16:35:36 +0800 Subject: [PATCH 048/101] add Atanh --- x2paddle/op_mapper/onnx2paddle/opset9.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9.py b/x2paddle/op_mapper/onnx2paddle/opset9.py index 5d8b58ffd..1c3034cf9 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset9.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9.py @@ -37,5 +37,5 @@ def __init__(self, decoder, paddle_graph): 'Sinh': ['paddle.sinh'], 'Acosh': ['paddle.acosh'], 'Asinh': ['paddle.asinh'], + 'Atanh': ['paddle.atanh'], }) - From 92e0f68e873cdab183ce30f04f8cc630a0e4a350 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 16:36:13 +0800 Subject: [PATCH 049/101] add Atanh --- tests/onnx/test_auto_scan_unary_ops.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 71d3b79cd..7c9777030 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -39,6 +39,7 @@ "Sin": 7, "Sinh": 9, "Tanh": 7, + "Atanh": 9, } @@ -58,21 +59,8 @@ def sample_convert_config(self, draw): config = { "op_names": [ - "Log", - "Cos", - "Atan", - "Asinh", - "Asin", - "Acosh", - "Acos", - "Cosh", - "Exp", - "Floor", - "Tan", - "Erf", - "Sin", - "Sinh", - "Tanh", + "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh", "Atanh" ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 3836727ecdd966f9122a6439b82e9185a580c1bf Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 19:34:28 +0800 Subject: [PATCH 050/101] s --- tests/onnx/test_auto_scan_unary_ops.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 7c9777030..1a0662ec3 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -40,6 +40,7 @@ "Sinh": 9, "Tanh": 7, "Atanh": 9, + "Sqrt": 7 } @@ -58,10 +59,11 @@ def sample_convert_config(self, draw): input_dtype = draw(st.sampled_from(["float32"])) config = { - "op_names": [ - "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh", "Atanh" - ], + # "op_names": [ + # "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", + # "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh", "Atanh", + # ], + "op_names": ["Sqrt", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From db318c845c8e6b3931e3c2d6b6dfa3e2553ddae1 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 19:36:18 +0800 Subject: [PATCH 051/101] s --- tests/onnx/test_auto_scan_unary_ops.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 1a0662ec3..26bcc9e15 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -59,11 +59,25 @@ def sample_convert_config(self, draw): input_dtype = draw(st.sampled_from(["float32"])) config = { - # "op_names": [ - # "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - # "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh", "Atanh", - # ], - "op_names": ["Sqrt", ], + "op_names": [ + "Log", + "Cos", + "Atan", + "Asinh", + "Asin", + "Acosh", + "Acos", + "Cosh", + "Exp", + "Floor", + "Tan", + "Erf", + "Sin", + "Sinh", + "Tanh", + "Atanh", + "Sqrt", + ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From ebf9a89159fc1ba448088564093f2c69f2721082 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 19:36:52 +0800 Subject: [PATCH 052/101] add Sqrt --- tests/onnx/test_auto_scan_unary_ops.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 7c9777030..26bcc9e15 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -40,6 +40,7 @@ "Sinh": 9, "Tanh": 7, "Atanh": 9, + "Sqrt": 7 } @@ -59,8 +60,23 @@ def sample_convert_config(self, draw): config = { "op_names": [ - "Log", "Cos", "Atan", "Asinh", "Asin", "Acosh", "Acos", "Cosh", - "Exp", "Floor", "Tan", "Erf", "Sin", "Sinh", "Tanh", "Atanh" + "Log", + "Cos", + "Atan", + "Asinh", + "Asin", + "Acosh", + "Acos", + "Cosh", + "Exp", + "Floor", + "Tan", + "Erf", + "Sin", + "Sinh", + "Tanh", + "Atanh", + "Sqrt", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From cc8a9d0076451da5a6170f38e1ef19051c0f6cd7 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 19:52:38 +0800 Subject: [PATCH 053/101] s --- tests/onnx/test_auto_scan_unary_ops.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 26bcc9e15..4e85ec0cf 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -40,7 +40,8 @@ "Sinh": 9, "Tanh": 7, "Atanh": 9, - "Sqrt": 7 + "Sqrt": 7, + "Shape": 7, } @@ -77,6 +78,7 @@ def sample_convert_config(self, draw): "Tanh", "Atanh", "Sqrt", + "Shape", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From f1dbe66242435c88c4e5b5c987b2509c797ff6da Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 19:53:27 +0800 Subject: [PATCH 054/101] add shape --- tests/onnx/test_auto_scan_unary_ops.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 26bcc9e15..4e85ec0cf 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -40,7 +40,8 @@ "Sinh": 9, "Tanh": 7, "Atanh": 9, - "Sqrt": 7 + "Sqrt": 7, + "Shape": 7, } @@ -77,6 +78,7 @@ def sample_convert_config(self, draw): "Tanh", "Atanh", "Sqrt", + "Shape", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 67a1e3f9a77feb41a3ade5c450a1e0ae744944f4 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 4 Aug 2022 20:00:06 +0800 Subject: [PATCH 055/101] s --- tests/onnx/test_auto_scan_unary_ops.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 4e85ec0cf..33b98ebf2 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -42,6 +42,7 @@ "Atanh": 9, "Sqrt": 7, "Shape": 7, + "Sign": 9, } @@ -79,6 +80,7 @@ def sample_convert_config(self, draw): "Atanh", "Sqrt", "Shape", + "Sign", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From 69b1ac26a16e7a27c3feceab0ddaa4def80e4c21 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Thu, 4 Aug 2022 20:00:50 +0800 Subject: [PATCH 056/101] add sign --- tests/onnx/test_auto_scan_unary_ops.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 4e85ec0cf..33b98ebf2 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -42,6 +42,7 @@ "Atanh": 9, "Sqrt": 7, "Shape": 7, + "Sign": 9, } @@ -79,6 +80,7 @@ def sample_convert_config(self, draw): "Atanh", "Sqrt", "Shape", + "Sign", ], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], From d695daf738a05b63f06cb4366ecb141c754b0b02 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Fri, 5 Aug 2022 14:51:59 +0800 Subject: [PATCH 057/101] add_celu --- tests/onnx/test_auto_scan_celu.py | 63 +++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 tests/onnx/test_auto_scan_celu.py diff --git a/tests/onnx/test_auto_scan_celu.py b/tests/onnx/test_auto_scan_celu.py new file mode 100644 index 000000000..58bc0671e --- /dev/null +++ b/tests/onnx/test_auto_scan_celu.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + + +class TestCeluConvert(OPConvertAutoScanTest): + """ + ONNX op: Celu + OPset version: 12~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + input_dtype = draw(st.sampled_from(["float32"])) + for i in range(2): + alpha = random.random() + + config = { + "op_names": ["Celu"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 12, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {"alpha": alpha} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From 5587931b1950229cc3d44ba831827f2d993c0ced Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:01:38 +0800 Subject: [PATCH 058/101] Delete test_auto_scan_elu.py --- tests/onnx/test_auto_scan_elu.py | 60 -------------------------------- 1 file changed, 60 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_elu.py diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py deleted file mode 100644 index 3d0ae8c16..000000000 --- a/tests/onnx/test_auto_scan_elu.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - - -class TestEluConvert(OPConvertAutoScanTest): - """ - ONNX op: Elu - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - for i in range(2): - alpha = random.random() - - config = { - "op_names": ["Elu"], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "max_opset_version": 15, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - attrs = {"alpha": alpha} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) From 0451348d86f18a02f7a435f273d5d5aa8cc9fc37 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:01:52 +0800 Subject: [PATCH 059/101] Delete test_auto_scan_unary_ops.py --- tests/onnx/test_auto_scan_unary_ops.py | 109 ------------------------- 1 file changed, 109 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_unary_ops.py diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py deleted file mode 100644 index 33b98ebf2..000000000 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - -min_opset_version_map = { - "Log": 7, - "Cosh": 9, - "Cos": 7, - "Atan": 7, - "Asinh": 9, - "Asin": 7, - "Acosh": 9, - "Acos": 7, - "Exp": 7, - "Floor": 7, - "Tan": 7, - "Ceil": 7, - "Erf": 9, - "Sin": 7, - "Sinh": 9, - "Tanh": 7, - "Atanh": 9, - "Sqrt": 7, - "Shape": 7, - "Sign": 9, -} - - -class TestUnaryopsConcert(OPConvertAutoScanTest): - """ - ONNX op: unary ops - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - - config = { - "op_names": [ - "Log", - "Cos", - "Atan", - "Asinh", - "Asin", - "Acosh", - "Acos", - "Cosh", - "Exp", - "Floor", - "Tan", - "Erf", - "Sin", - "Sinh", - "Tanh", - "Atanh", - "Sqrt", - "Shape", - "Sign", - ], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - min_opset_versions = list() - for op_name in config["op_names"]: - min_opset_versions.append(min_opset_version_map[op_name]) - config["min_opset_version"] = min_opset_versions - - attrs = {} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From a3937ed0ee9911a7af60df218d5cb10341c39591 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:02:13 +0800 Subject: [PATCH 060/101] Delete opset7.py --- x2paddle/op_mapper/onnx2paddle/opset7.py | 63 ------------------------ 1 file changed, 63 deletions(-) delete mode 100644 x2paddle/op_mapper/onnx2paddle/opset7.py diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py deleted file mode 100644 index 978757a21..000000000 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .opset_legacy import OpSet - - -def print_mapping_info(func): - def run_mapping(*args, **kwargs): - node = args[1] - try: - res = func(*args, **kwargs) - except: - raise Exception("convert failed node:{}, op_type is {}".format( - node.name[9:], node.layer_type)) - else: - return res - - return run_mapping - - -class OpSet7(OpSet): - def __init__(self, decoder, paddle_graph): - super(OpSet7, self).__init__(decoder, paddle_graph) - self.directly_map_ops.update({ - 'Atan': ['paddle.atan'], - 'Acos': ['paddle.acos'], - 'Asin': ['paddle.asin'], - 'IsInf': ['paddle.isinf'], - 'IsNaN': ['paddle.isnan'], - 'Cosh': ['paddle.cosh'], - 'Acosh': ['paddle.acosh'], - 'Asinh': ['paddle.asinh'], - 'Tan': ['paddle.tan'], - }) - - @print_mapping_info - def Unsqueeze(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - axes = node.get_attr('axes') - # deal with scalar(0D) tensor - if len(val_x.out_shapes[0]) == 0 and len(axes) == 1 and axes[0] == 0: - self.paddle_graph.add_layer( - 'paddle.reshape', - inputs={"x": val_x.name}, - outputs=[node.name], - shape=[1]) - else: - self.paddle_graph.add_layer( - 'paddle.unsqueeze', - inputs={"x": val_x.name}, - axis=axes, - outputs=[node.name]) From abd1260cdcac8e1e3150be3b38b865e5b2d35e47 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:03:44 +0800 Subject: [PATCH 061/101] =?UTF-8?q?=E5=A4=8D=E5=8E=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- x2paddle/op_mapper/onnx2paddle/opset7.py | 59 ++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 x2paddle/op_mapper/onnx2paddle/opset7.py diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py new file mode 100644 index 000000000..76df5a16c --- /dev/null +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -0,0 +1,59 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .opset_legacy import OpSet + + +def print_mapping_info(func): + def run_mapping(*args, **kwargs): + node = args[1] + try: + res = func(*args, **kwargs) + except: + raise Exception("convert failed node:{}, op_type is {}".format( + node.name[9:], node.layer_type)) + else: + return res + + return run_mapping + + +class OpSet7(OpSet): + def __init__(self, decoder, paddle_graph): + super(OpSet7, self).__init__(decoder, paddle_graph) + self.directly_map_ops.update({ + 'Atan': ['paddle.atan'], + 'Acos': ['paddle.acos'], + 'Asin': ['paddle.asin'], + 'Tan': ['paddle.tan'], + 'Mean':['paddle.mean'] + }) + + @print_mapping_info + def Unsqueeze(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + # deal with scalar(0D) tensor + if len(val_x.out_shapes[0]) == 0 and len(axes) == 1 and axes[0] == 0: + self.paddle_graph.add_layer( + 'paddle.reshape', + inputs={"x": val_x.name}, + outputs=[node.name], + shape=[1]) + else: + self.paddle_graph.add_layer( + 'paddle.unsqueeze', + inputs={"x": val_x.name}, + axis=axes, + outputs=[node.name]) From f50943f097b9cd65686b28145eda815b11a253b8 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:05:26 +0800 Subject: [PATCH 062/101] add Celu --- x2paddle/op_mapper/onnx2paddle/opset12.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset12.py b/x2paddle/op_mapper/onnx2paddle/opset12.py index 69f69b012..8032cc217 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset12.py +++ b/x2paddle/op_mapper/onnx2paddle/opset12.py @@ -32,3 +32,16 @@ def run_mapping(*args, **kwargs): class OpSet12(OpSet11): def __init__(self, decoder, paddle_graph): super(OpSet12, self).__init__(decoder, paddle_graph) + + @print_mapping_info + def Celu(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + alphas = node.get_attr('alpha', 1.0) + layer_attrs = dict() + + self.paddle_graph.add_layer( + "paddle.nn.functional.celu", + inputs={"x": val_x.name}, + alpha=alphas, + outputs=[node.name], + **layer_attrs) From 00ca749a41e5f0eca7aa355b2d5107ee92d1f160 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:11:24 +0800 Subject: [PATCH 063/101] Update opset7.py --- x2paddle/op_mapper/onnx2paddle/opset7.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 76df5a16c..444446519 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -32,13 +32,6 @@ def run_mapping(*args, **kwargs): class OpSet7(OpSet): def __init__(self, decoder, paddle_graph): super(OpSet7, self).__init__(decoder, paddle_graph) - self.directly_map_ops.update({ - 'Atan': ['paddle.atan'], - 'Acos': ['paddle.acos'], - 'Asin': ['paddle.asin'], - 'Tan': ['paddle.tan'], - 'Mean':['paddle.mean'] - }) @print_mapping_info def Unsqueeze(self, node): From dae394378e6375f3ec1bdfc5493b9131cf18a103 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Fri, 5 Aug 2022 15:11:55 +0800 Subject: [PATCH 064/101] Update opset9.py --- x2paddle/op_mapper/onnx2paddle/opset9.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9.py b/x2paddle/op_mapper/onnx2paddle/opset9.py index 1c3034cf9..846e84719 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset9.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9.py @@ -32,10 +32,3 @@ def run_mapping(*args, **kwargs): class OpSet9(OpSet8): def __init__(self, decoder, paddle_graph): super(OpSet9, self).__init__(decoder, paddle_graph) - self.directly_map_ops.update({ - 'Cosh': ['paddle.cosh'], - 'Sinh': ['paddle.sinh'], - 'Acosh': ['paddle.acosh'], - 'Asinh': ['paddle.asinh'], - 'Atanh': ['paddle.atanh'], - }) From 8b4b42389c00ecb193feefd5f36dd002664f5eb8 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 15:09:41 +0800 Subject: [PATCH 065/101] add selu --- tests/onnx/test_auto_scan_selu.py | 64 +++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 tests/onnx/test_auto_scan_selu.py diff --git a/tests/onnx/test_auto_scan_selu.py b/tests/onnx/test_auto_scan_selu.py new file mode 100644 index 000000000..049d5035c --- /dev/null +++ b/tests/onnx/test_auto_scan_selu.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + + +class TestSeluConvert(OPConvertAutoScanTest): + """ + ONNX op: Selu + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + input_dtype = draw(st.sampled_from(["float32"])) + for i in range(2): + alpha = random.random() + + gamma =random.uniform(1.1, 3) + config = { + "op_names": ["Selu"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {"alpha": alpha,"gamma":gamma} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From 47b9979345ecd375f601808a1390da2d0630827e Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 15:11:03 +0800 Subject: [PATCH 066/101] Update opset7.py --- x2paddle/op_mapper/onnx2paddle/opset7.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 444446519..3f748790a 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -50,3 +50,17 @@ def Unsqueeze(self, node): inputs={"x": val_x.name}, axis=axes, outputs=[node.name]) + @print_mapping_info + def Selu(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + alphas = node.get_attr('alpha', 1.67326) + scales = node.get_attr('gamma', 1.0507) + layer_attrs = dict() + + self.paddle_graph.add_layer( + "paddle.nn.SELU", + inputs={"x": val_x.name}, + alpha=alphas, + scale=scales, + outputs=[node.name], + **layer_attrs) From 39162be26a6a4929fba71e7b9ad9904b8ee3caaa Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 17:21:22 +0800 Subject: [PATCH 067/101] add relu --- tests/onnx/test_auto_scan_relu.py | 58 +++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/onnx/test_auto_scan_relu.py diff --git a/tests/onnx/test_auto_scan_relu.py b/tests/onnx/test_auto_scan_relu.py new file mode 100644 index 000000000..fab20f9b8 --- /dev/null +++ b/tests/onnx/test_auto_scan_relu.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +from onnxbase import randtool +import hypothesis.strategies as st +import numpy as np +import unittest + + +class TestReluConvert(OPConvertAutoScanTest): + """ + ONNX op: Relu + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=2, max_value=6), min_size=2, max_size=5)) + + input_dtype = draw(st.sampled_from(["int32"])) + + config = { + "op_names": ["Relu"], + "test_data_shapes": [input_shape], + "test_data_types": [[input_dtype]], + "inputs_shape": [input_shape], + "min_opset_version": 14, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=30) + + +if __name__ == "__main__": + unittest.main() From e352bde2f8bfd8cfec8976af986a2724a9a5d891 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 17:26:16 +0800 Subject: [PATCH 068/101] update relu datatype --- tests/onnx/test_auto_scan_relu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_relu.py b/tests/onnx/test_auto_scan_relu.py index fab20f9b8..32420d37c 100644 --- a/tests/onnx/test_auto_scan_relu.py +++ b/tests/onnx/test_auto_scan_relu.py @@ -32,7 +32,7 @@ def sample_convert_config(self, draw): st.integers( min_value=2, max_value=6), min_size=2, max_size=5)) - input_dtype = draw(st.sampled_from(["int32"])) + input_dtype = draw(st.sampled_from(["int32", "float32"])) config = { "op_names": ["Relu"], From 0e9be714548a048308aed3c7ddd63f1f88bba5e3 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 17:28:12 +0800 Subject: [PATCH 069/101] Update opset14.py --- x2paddle/op_mapper/onnx2paddle/opset14.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset14.py b/x2paddle/op_mapper/onnx2paddle/opset14.py index 0a4f18a79..135376f8c 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset14.py +++ b/x2paddle/op_mapper/onnx2paddle/opset14.py @@ -13,6 +13,7 @@ # limitations under the License. from .opset13 import OpSet13 +from x2paddle.core.util import * def print_mapping_info(func): @@ -32,3 +33,23 @@ def run_mapping(*args, **kwargs): class OpSet14(OpSet13): def __init__(self, decoder, paddle_graph): super(OpSet14, self).__init__(decoder, paddle_graph) + + @print_mapping_info + def Relu(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + + # if val_x.dtypes!='float': + indices_cast = val_x.name + '_cast' + mid_relu = val_x.name + '_relu' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": val_x.name}, + outputs=[indices_cast], + dtype=string('float32')) + self.paddle_graph.add_layer( + 'paddle.nn.ReLU', inputs={"x": indices_cast}, outputs=[mid_relu]) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": mid_relu}, + outputs=[node.name], + dtype=string(val_x.dtype)) From 5f9174d98075559519800cf8a1c4b43191529525 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 17:44:33 +0800 Subject: [PATCH 070/101] fix code style --- tests/onnx/test_auto_scan_selu.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/onnx/test_auto_scan_selu.py b/tests/onnx/test_auto_scan_selu.py index 049d5035c..160527c76 100644 --- a/tests/onnx/test_auto_scan_selu.py +++ b/tests/onnx/test_auto_scan_selu.py @@ -37,8 +37,8 @@ def sample_convert_config(self, draw): input_dtype = draw(st.sampled_from(["float32"])) for i in range(2): alpha = random.random() - - gamma =random.uniform(1.1, 3) + + gamma = random.uniform(1.1, 3) config = { "op_names": ["Selu"], "test_data_shapes": [input_shape], @@ -52,7 +52,7 @@ def sample_convert_config(self, draw): "rtol": 1e-4 } - attrs = {"alpha": alpha,"gamma":gamma} + attrs = {"alpha": alpha, "gamma": gamma} return (config, attrs) From 409be3c9c67cbe513bc6e680388c05324221fc84 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Mon, 8 Aug 2022 18:14:06 +0800 Subject: [PATCH 071/101] fix code style --- x2paddle/op_mapper/onnx2paddle/opset7.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 3f748790a..dfedd6c62 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -33,6 +33,26 @@ class OpSet7(OpSet): def __init__(self, decoder, paddle_graph): super(OpSet7, self).__init__(decoder, paddle_graph) + @print_mapping_info + def Or(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.logical_or", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + + @print_mapping_info + def Xor(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + val_y = self.graph.get_input_node(node, idx=1, copy=True) + self.paddle_graph.add_layer( + "paddle.logical_xor", + inputs={"x": val_x.name, + "y": val_y.name}, + outputs=[node.name]) + @print_mapping_info def Unsqueeze(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) @@ -50,7 +70,8 @@ def Unsqueeze(self, node): inputs={"x": val_x.name}, axis=axes, outputs=[node.name]) - @print_mapping_info + + @print_mapping_info def Selu(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) alphas = node.get_attr('alpha', 1.67326) From 6f8693e986b0bb48617e9cb4e10cc1698f13c7ea Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 16:17:10 +0800 Subject: [PATCH 072/101] add_hardsigmoid --- tests/onnx/test_auto_scan_hardsigmoid.py | 67 ++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 tests/onnx/test_auto_scan_hardsigmoid.py diff --git a/tests/onnx/test_auto_scan_hardsigmoid.py b/tests/onnx/test_auto_scan_hardsigmoid.py new file mode 100644 index 000000000..2553e8111 --- /dev/null +++ b/tests/onnx/test_auto_scan_hardsigmoid.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import numpy as np +import unittest +import random + + +class TestHardSigmoidCovert(OPConvertAutoScanTest): + """ + ONNX op: HardSigmoid + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=2, max_value=6), min_size=2, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + + for i in range(2): + alpha = random.random() + + for i in range(2): + beta = random.random() + + config = { + "op_names": ["HardSigmoid"], + "test_data_shapes": [input_shape], + "test_data_types": [[input_dtype]], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = { + "alpha": alpha, + "beta": beta, + } + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=30) + + +if __name__ == "__main__": + unittest.main() From 21935f89daa0c1620da69364f4baf5f5f0d299d5 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 16:56:08 +0800 Subject: [PATCH 073/101] add_reduce_op --- tests/onnx/test_auto_scan_reduce_ops.py | 76 +++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 tests/onnx/test_auto_scan_reduce_ops.py diff --git a/tests/onnx/test_auto_scan_reduce_ops.py b/tests/onnx/test_auto_scan_reduce_ops.py new file mode 100644 index 000000000..5f012ca32 --- /dev/null +++ b/tests/onnx/test_auto_scan_reduce_ops.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + + +class TestReduceOpsConvert(OPConvertAutoScanTest): + """ + ONNX op: Reduce Ops + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32", "int32", "int64"])) + + axes = draw( + st.lists( + st.integers( + min_value=-len(input_shape), max_value=len(input_shape) - + 1), + min_size=1, + max_size=1)) + + keep_dim = draw(st.integers(min_value=0, max_value=1)) + + config = { + "op_names": ["ReduceL1"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4, + "enable_onnx_checker": False, + } + + attrs = { + "axes": axes, + "keepdims": keep_dim, + } + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From 62d5a01602d3d5a720875f53f1e2b6a0bee53c34 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 17:08:40 +0800 Subject: [PATCH 074/101] fix --- x2paddle/op_mapper/onnx2paddle/opset7.py | 67 +++++++++++++++++++ .../op_mapper/onnx2paddle/opset_legacy.py | 60 ++--------------- 2 files changed, 71 insertions(+), 56 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 6c0c22ece..31be1912b 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -13,6 +13,7 @@ # limitations under the License. from .opset_legacy import OpSet +from x2paddle.core.util import * def print_mapping_info(func): @@ -70,3 +71,69 @@ def Unsqueeze(self, node): inputs={"x": val_x.name}, axis=axes, outputs=[node.name]) + + @print_mapping_info + def ReduceL1(self, node): + output_name = node.name + layer_outputs = [output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + keepdims = False if node.get_attr('keepdims') == 0 else True + layer_attrs = {'p': 1, 'axis': axes, 'keepdim': keepdims} + if val_x.dtype == 'int32': + indices_cast = val_x.name + '_cast' + mid_norm = val_x.name + '_norm' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": val_x.name}, + outputs=[indices_cast], + dtype=string('float32')) + self.paddle_graph.add_layer( + "paddle.norm", + inputs={"x": indices_cast}, + outputs=[mid_norm], + **layer_attrs) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": mid_norm}, + outputs=[node.name], + dtype=string(val_x.dtype)) + else: + self.paddle_graph.add_layer( + "paddle.norm", + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) + + @print_mapping_info + def ReduceL2(self, node): + output_name = node.name + layer_outputs = [output_name] + val_x = self.graph.get_input_node(node, idx=0, copy=True) + axes = node.get_attr('axes') + keepdims = False if node.get_attr('keepdims') == 0 else True + layer_attrs = {'p': 2, 'axis': axes, 'keepdim': keepdims} + if val_x.dtype == 'int32': + indices_cast = val_x.name + '_cast' + mid_norm = val_x.name + '_norm' + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": val_x.name}, + outputs=[indices_cast], + dtype=string('float32')) + self.paddle_graph.add_layer( + "paddle.norm", + inputs={"x": indices_cast}, + outputs=[mid_norm], + **layer_attrs) + self.paddle_graph.add_layer( + 'paddle.cast', + inputs={"x": mid_norm}, + outputs=[node.name], + dtype=string(val_x.dtype)) + else: + self.paddle_graph.add_layer( + "paddle.norm", + inputs={"x": val_x.name}, + outputs=layer_outputs, + **layer_attrs) diff --git a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py index f1c483854..808dd39fb 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset_legacy.py +++ b/x2paddle/op_mapper/onnx2paddle/opset_legacy.py @@ -213,14 +213,7 @@ def directly_map(self, node, *args, **kwargs): attrs_name_map_dict = op_info[1] for onnx_attr_name, pd_attr_name in attrs_name_map_dict.items(): if onnx_attr_name in onnx_attrs: - # trans 1 to True, 0 to False - if onnx_attr_name == "keepdims": - if onnx_attrs[onnx_attr_name] == 1: - layer_attrs[pd_attr_name] = True - else: - layer_attrs[pd_attr_name] = False - else: - layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] + layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name] else: layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name] if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op: @@ -357,43 +350,6 @@ def _interpolate(self, node): # which is the same as the rank of input. attrs['scale_factor'] = self.weights[val_scales.name].tolist()[ 2:] - if len(val_x_shape) == 3: - val_scales = self.graph.get_input_node( - node, idx=2, copy=True) - val_scales_values = _const_weight_or_none(val_scales) - - attrs = { - "align_corners": False, - "mode": string(node.get_attr('mode', 'nearest')), - "scale_factor": - self.weights[val_scales.name].tolist()[1:] - } - mode = node.get_attr('mode', 'nearest') - if mode == "linear": - attrs["mode"] = string("bilinear") - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'pytorch_half_pixel': - attrs["align_corners"] = False - attrs["align_mode"] = 0 - if node.get_attr('coordinate_transformation_mode', - 'half_pixel') == 'align_corners': - attrs["align_corners"] = True - self.paddle_graph.add_layer( - 'paddle.unsqueeze', - inputs={"x": val_x.name}, - outputs=[val_x.name], - axis=0) - self.paddle_graph.add_layer( - kernel="paddle.nn.functional.interpolate", - inputs=inputs, - outputs=[node.name], - **attrs) - self.paddle_graph.add_layer( - 'paddle.squeeze', - inputs={"x": node.name}, - outputs=[node.name], - axis=0) - return elif len(node.layer.input) == 4: # opset 11 val_sizes = self.graph.get_input_node(node, idx=3, copy=True) @@ -1669,6 +1625,7 @@ def Flatten(self, node): def Gemm(self, node): val_a = self.graph.get_input_node(node, idx=0, copy=True) val_b = self.graph.get_input_node(node, idx=1, copy=True) + val_c = self.graph.get_input_node(node, idx=2, copy=True) alpha = node.get_attr('alpha', 1.) # optional beta = node.get_attr('beta', 1.) # optional @@ -1685,19 +1642,10 @@ def Gemm(self, node): inputs=matmul_inputs, outputs=[val_mm], **attr_matmul) - if beta != 0: - self.paddle_graph.add_layer( - "paddle.scale", - inputs={"x": val_mm}, - outputs=[val_mm], - scale=alpha) - else: - self.paddle_graph.add_layer( - "paddle.scale", inputs={"x": val_mm}, outputs=[node.name]) + self.paddle_graph.add_layer( + "paddle.scale", inputs={"x": val_mm}, outputs=[val_mm], scale=alpha) if beta != 0: - # when beta is equal to 0, there is no val_c - val_c = self.graph.get_input_node(node, idx=2, copy=True) if beta == 1.: add_inputs = {"x": val_mm, "y": val_c.name} self.paddle_graph.add_layer( From 0fc1532a17d0f53302bbdf160a16a98e692d2fbf Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 17:09:46 +0800 Subject: [PATCH 075/101] fix --- x2paddle/decoder/onnx_decoder.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/x2paddle/decoder/onnx_decoder.py b/x2paddle/decoder/onnx_decoder.py index 045530010..57e5cbe38 100755 --- a/x2paddle/decoder/onnx_decoder.py +++ b/x2paddle/decoder/onnx_decoder.py @@ -336,14 +336,7 @@ def build_connection(self, layer_name, node): break else: first_i = node.inputs.index(nd.name) - # deal with Multiple outputs correspond to one node - if self.node_map[nd.name].outputs.count( - layer_name) > 1: - new_child_name = "{}/{}".format(nd.name, - idx) - node.which_child[new_child_name] = idx - else: - node.which_child[nd.name] = idx + node.which_child[nd.name] = idx self.node_map[nd.name].index = 0 break if flag == 1: From 721dab21cc76bdd70421cfd6785fc568a9cecaf1 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 17:13:40 +0800 Subject: [PATCH 076/101] add reduceL2 --- tests/onnx/test_auto_scan_reduce_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_reduce_ops.py b/tests/onnx/test_auto_scan_reduce_ops.py index 5f012ca32..446cfc99d 100644 --- a/tests/onnx/test_auto_scan_reduce_ops.py +++ b/tests/onnx/test_auto_scan_reduce_ops.py @@ -48,7 +48,7 @@ def sample_convert_config(self, draw): keep_dim = draw(st.integers(min_value=0, max_value=1)) config = { - "op_names": ["ReduceL1"], + "op_names": ["ReduceL1", "ReduceL2"], "test_data_shapes": [input_shape], "test_data_types": [input_dtype], "inputs_shape": [input_shape], From 9885114ec7587fec2776688969d2fa00d9bb26f0 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 19:14:43 +0800 Subject: [PATCH 077/101] remove log and exp --- tests/onnx/test_auto_scan_unary_ops.py | 2 -- x2paddle/op_mapper/onnx2paddle/opset7.py | 1 - 2 files changed, 3 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index 8d6f9ff80..ee7e721fc 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -23,7 +23,6 @@ import random min_opset_version_map = { - "Log": 7, "Cosh": 9, "Cos": 7, "Atan": 7, @@ -40,7 +39,6 @@ "Sinh": 9, "Tanh": 7, "Atanh": 9, - "Sqrt": 7, "Shape": 7, "Sign": 9, } diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index 0cd923124..c933f5aaf 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -13,7 +13,6 @@ # limitations under the License. from .opset_legacy import OpSet -from x2paddle.core.util import * def print_mapping_info(func): From 46657e09eb3b21365219fdf02a5609c30dce91cd Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 19:20:21 +0800 Subject: [PATCH 078/101] add selu in opset7 --- x2paddle/op_mapper/onnx2paddle/opset7.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index c933f5aaf..a81108c8f 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -76,3 +76,18 @@ def Unsqueeze(self, node): inputs={"x": val_x.name}, axis=axes, outputs=[node.name]) + + @print_mapping_info + def Selu(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + alphas = node.get_attr('alpha', 1.67326) + scales = node.get_attr('gamma', 1.0507) + layer_attrs = dict() + + self.paddle_graph.add_layer( + "paddle.nn.SELU", + inputs={"x": val_x.name}, + alpha=alphas, + scale=scales, + outputs=[node.name], + **layer_attrs) From 374f0e584d1b73c99d9d4b3df31c64d7d1f03ad6 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:22:19 +0800 Subject: [PATCH 079/101] Delete test_auto_scan_celu.py --- tests/onnx/test_auto_scan_celu.py | 63 ------------------------------- 1 file changed, 63 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_celu.py diff --git a/tests/onnx/test_auto_scan_celu.py b/tests/onnx/test_auto_scan_celu.py deleted file mode 100644 index 58bc0671e..000000000 --- a/tests/onnx/test_auto_scan_celu.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - - -class TestCeluConvert(OPConvertAutoScanTest): - """ - ONNX op: Celu - OPset version: 12~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - input_dtype = draw(st.sampled_from(["float32"])) - for i in range(2): - alpha = random.random() - - config = { - "op_names": ["Celu"], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 12, - "max_opset_version": 15, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - attrs = {"alpha": alpha} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From 944b7abcec05086c3bd561bf3f9594a1d3e6a3b4 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:22:32 +0800 Subject: [PATCH 080/101] Delete test_auto_scan_relu.py --- tests/onnx/test_auto_scan_relu.py | 58 ------------------------------- 1 file changed, 58 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_relu.py diff --git a/tests/onnx/test_auto_scan_relu.py b/tests/onnx/test_auto_scan_relu.py deleted file mode 100644 index 32420d37c..000000000 --- a/tests/onnx/test_auto_scan_relu.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -from onnxbase import randtool -import hypothesis.strategies as st -import numpy as np -import unittest - - -class TestReluConvert(OPConvertAutoScanTest): - """ - ONNX op: Relu - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=2, max_value=6), min_size=2, max_size=5)) - - input_dtype = draw(st.sampled_from(["int32", "float32"])) - - config = { - "op_names": ["Relu"], - "test_data_shapes": [input_shape], - "test_data_types": [[input_dtype]], - "inputs_shape": [input_shape], - "min_opset_version": 14, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - attrs = {} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=30) - - -if __name__ == "__main__": - unittest.main() From 48088dd768dc1292688babe9cebc04ee4a78d225 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:22:51 +0800 Subject: [PATCH 081/101] Delete test_auto_scan_reduce_ops.py --- tests/onnx/test_auto_scan_reduce_ops.py | 76 ------------------------- 1 file changed, 76 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_reduce_ops.py diff --git a/tests/onnx/test_auto_scan_reduce_ops.py b/tests/onnx/test_auto_scan_reduce_ops.py deleted file mode 100644 index 446cfc99d..000000000 --- a/tests/onnx/test_auto_scan_reduce_ops.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - - -class TestReduceOpsConvert(OPConvertAutoScanTest): - """ - ONNX op: Reduce Ops - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32", "int32", "int64"])) - - axes = draw( - st.lists( - st.integers( - min_value=-len(input_shape), max_value=len(input_shape) - - 1), - min_size=1, - max_size=1)) - - keep_dim = draw(st.integers(min_value=0, max_value=1)) - - config = { - "op_names": ["ReduceL1", "ReduceL2"], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "max_opset_version": 15, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4, - "enable_onnx_checker": False, - } - - attrs = { - "axes": axes, - "keepdims": keep_dim, - } - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From 12a8ec1396e2b4d08259b550ba98ba3428ad652b Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:23:02 +0800 Subject: [PATCH 082/101] Delete test_auto_scan_hardsigmoid.py --- tests/onnx/test_auto_scan_hardsigmoid.py | 67 ------------------------ 1 file changed, 67 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_hardsigmoid.py diff --git a/tests/onnx/test_auto_scan_hardsigmoid.py b/tests/onnx/test_auto_scan_hardsigmoid.py deleted file mode 100644 index 2553e8111..000000000 --- a/tests/onnx/test_auto_scan_hardsigmoid.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import numpy as np -import unittest -import random - - -class TestHardSigmoidCovert(OPConvertAutoScanTest): - """ - ONNX op: HardSigmoid - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=2, max_value=6), min_size=2, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - - for i in range(2): - alpha = random.random() - - for i in range(2): - beta = random.random() - - config = { - "op_names": ["HardSigmoid"], - "test_data_shapes": [input_shape], - "test_data_types": [[input_dtype]], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - attrs = { - "alpha": alpha, - "beta": beta, - } - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=30) - - -if __name__ == "__main__": - unittest.main() From b613e439f137b1171d627fc65314b56ad5a31776 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 19:24:17 +0800 Subject: [PATCH 083/101] add selu in opset7 --- x2paddle/op_mapper/onnx2paddle/opset7.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index a81108c8f..c933f5aaf 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -76,18 +76,3 @@ def Unsqueeze(self, node): inputs={"x": val_x.name}, axis=axes, outputs=[node.name]) - - @print_mapping_info - def Selu(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - alphas = node.get_attr('alpha', 1.67326) - scales = node.get_attr('gamma', 1.0507) - layer_attrs = dict() - - self.paddle_graph.add_layer( - "paddle.nn.SELU", - inputs={"x": val_x.name}, - alpha=alphas, - scale=scales, - outputs=[node.name], - **layer_attrs) From 2b00d7b3ce6a47085e2f38d2410e49fad10d2ccc Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 19:24:53 +0800 Subject: [PATCH 084/101] add selu in opset12 --- x2paddle/op_mapper/onnx2paddle/opset12.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset12.py b/x2paddle/op_mapper/onnx2paddle/opset12.py index 8032cc217..69f69b012 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset12.py +++ b/x2paddle/op_mapper/onnx2paddle/opset12.py @@ -32,16 +32,3 @@ def run_mapping(*args, **kwargs): class OpSet12(OpSet11): def __init__(self, decoder, paddle_graph): super(OpSet12, self).__init__(decoder, paddle_graph) - - @print_mapping_info - def Celu(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - alphas = node.get_attr('alpha', 1.0) - layer_attrs = dict() - - self.paddle_graph.add_layer( - "paddle.nn.functional.celu", - inputs={"x": val_x.name}, - alpha=alphas, - outputs=[node.name], - **layer_attrs) From 24e66c0fe183c667adf774f802346f1e25303be1 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 19:25:19 +0800 Subject: [PATCH 085/101] add selu in opset14 --- x2paddle/op_mapper/onnx2paddle/opset14.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset14.py b/x2paddle/op_mapper/onnx2paddle/opset14.py index 9e9c89590..6337263b9 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset14.py +++ b/x2paddle/op_mapper/onnx2paddle/opset14.py @@ -33,24 +33,3 @@ def run_mapping(*args, **kwargs): class OpSet14(OpSet13): def __init__(self, decoder, paddle_graph): super(OpSet14, self).__init__(decoder, paddle_graph) - - @print_mapping_info - def Relu(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - if val_x.dtype != 'float': - indices_cast = val_x.name + '_cast' - mid_relu = val_x.name + '_relu' - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": val_x.name}, - outputs=[indices_cast], - dtype=string('float32')) - self.paddle_graph.add_layer( - 'paddle.nn.ReLU', - inputs={"x": indices_cast}, - outputs=[mid_relu]) - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": mid_relu}, - outputs=[node.name], - dtype=string(val_x.dtype)) From 0ab52309a8553622b0f247010204bf1f6b4b63dc Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:28:47 +0800 Subject: [PATCH 086/101] Update opset7.py --- x2paddle/op_mapper/onnx2paddle/opset7.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index a81108c8f..c933f5aaf 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -76,18 +76,3 @@ def Unsqueeze(self, node): inputs={"x": val_x.name}, axis=axes, outputs=[node.name]) - - @print_mapping_info - def Selu(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - alphas = node.get_attr('alpha', 1.67326) - scales = node.get_attr('gamma', 1.0507) - layer_attrs = dict() - - self.paddle_graph.add_layer( - "paddle.nn.SELU", - inputs={"x": val_x.name}, - alpha=alphas, - scale=scales, - outputs=[node.name], - **layer_attrs) From d72db3571242085d22ef7eeb049b9e18cea6b336 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:29:10 +0800 Subject: [PATCH 087/101] Update opset12.py --- x2paddle/op_mapper/onnx2paddle/opset12.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset12.py b/x2paddle/op_mapper/onnx2paddle/opset12.py index 8032cc217..69f69b012 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset12.py +++ b/x2paddle/op_mapper/onnx2paddle/opset12.py @@ -32,16 +32,3 @@ def run_mapping(*args, **kwargs): class OpSet12(OpSet11): def __init__(self, decoder, paddle_graph): super(OpSet12, self).__init__(decoder, paddle_graph) - - @print_mapping_info - def Celu(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - alphas = node.get_attr('alpha', 1.0) - layer_attrs = dict() - - self.paddle_graph.add_layer( - "paddle.nn.functional.celu", - inputs={"x": val_x.name}, - alpha=alphas, - outputs=[node.name], - **layer_attrs) From 60c500cc0d084acd35ede67a908287f5cea10482 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:29:42 +0800 Subject: [PATCH 088/101] Update opset14.py --- x2paddle/op_mapper/onnx2paddle/opset14.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset14.py b/x2paddle/op_mapper/onnx2paddle/opset14.py index 9e9c89590..6337263b9 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset14.py +++ b/x2paddle/op_mapper/onnx2paddle/opset14.py @@ -33,24 +33,3 @@ def run_mapping(*args, **kwargs): class OpSet14(OpSet13): def __init__(self, decoder, paddle_graph): super(OpSet14, self).__init__(decoder, paddle_graph) - - @print_mapping_info - def Relu(self, node): - val_x = self.graph.get_input_node(node, idx=0, copy=True) - if val_x.dtype != 'float': - indices_cast = val_x.name + '_cast' - mid_relu = val_x.name + '_relu' - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": val_x.name}, - outputs=[indices_cast], - dtype=string('float32')) - self.paddle_graph.add_layer( - 'paddle.nn.ReLU', - inputs={"x": indices_cast}, - outputs=[mid_relu]) - self.paddle_graph.add_layer( - 'paddle.cast', - inputs={"x": mid_relu}, - outputs=[node.name], - dtype=string(val_x.dtype)) From 2718c0b97dcefeb64c46f215fbe3d8b41914ca2e Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:31:14 +0800 Subject: [PATCH 089/101] Update test_auto_scan_unary_ops.py --- tests/onnx/test_auto_scan_unary_ops.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py index ee7e721fc..81d37d113 100644 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ b/tests/onnx/test_auto_scan_unary_ops.py @@ -39,7 +39,6 @@ "Sinh": 9, "Tanh": 7, "Atanh": 9, - "Shape": 7, "Sign": 9, } @@ -75,7 +74,6 @@ def sample_convert_config(self, draw): "Sinh", "Tanh", "Atanh", - "Shape", "Sign", ], "test_data_shapes": [input_shape], From 77a8e08f4fc571ed7734e22f96cc02f02c575847 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:31:48 +0800 Subject: [PATCH 090/101] Update opset14.py --- x2paddle/op_mapper/onnx2paddle/opset14.py | 1 - 1 file changed, 1 deletion(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset14.py b/x2paddle/op_mapper/onnx2paddle/opset14.py index 6337263b9..0a4f18a79 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset14.py +++ b/x2paddle/op_mapper/onnx2paddle/opset14.py @@ -13,7 +13,6 @@ # limitations under the License. from .opset13 import OpSet13 -from x2paddle.core.util import * def print_mapping_info(func): From 076999ff194018e5a167f817dc5d32ceeb7a4b9e Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Tue, 9 Aug 2022 19:37:53 +0800 Subject: [PATCH 091/101] add elu --- tests/onnx/test_auto_scan_elu.py | 60 ++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 tests/onnx/test_auto_scan_elu.py diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py new file mode 100644 index 000000000..cd5c9c347 --- /dev/null +++ b/tests/onnx/test_auto_scan_elu.py @@ -0,0 +1,60 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import onnx +from onnx import helper +from onnx import TensorProto +import numpy as np +import unittest +import random + + +class TestEluConvert(OPConvertAutoScanTest): + """ + ONNX op: Elu + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + for i in range(2): + alpha = random.random() + + config = { + "op_names": ["Elu"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {"alpha": alpha} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) From 2ff088b70a0238cdd255499b28cd91fcf68d4f84 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 20:24:27 +0800 Subject: [PATCH 092/101] fix style --- tests/onnx/test_auto_scan_elu.py | 72 +++++++++++++++++--------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index cd5c9c347..b772055f6 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -24,37 +24,41 @@ class TestEluConvert(OPConvertAutoScanTest): - """ - ONNX op: Elu - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - for i in range(2): - alpha = random.random() - - config = { - "op_names": ["Elu"], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "max_opset_version": 15, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - attrs = {"alpha": alpha} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) + """ + ONNX op: Elu + OPset version: 7~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + + input_dtype = draw(st.sampled_from(["float32"])) + for i in range(2): + alpha = random.random() + + config = { + "op_names": ["Elu"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 7, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4 + } + + attrs = {"alpha": alpha} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From 59d3758262b672d44271d33fc2a5b6af9ae2db11 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Tue, 9 Aug 2022 20:26:57 +0800 Subject: [PATCH 093/101] fix style --- tests/onnx/test_auto_scan_elu.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py index b772055f6..23aee7e74 100644 --- a/tests/onnx/test_auto_scan_elu.py +++ b/tests/onnx/test_auto_scan_elu.py @@ -15,9 +15,6 @@ from auto_scan_test import OPConvertAutoScanTest from hypothesis import reproduce_failure import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto import numpy as np import unittest import random @@ -36,8 +33,7 @@ def sample_convert_config(self, draw): min_value=20, max_value=30), min_size=3, max_size=5)) input_dtype = draw(st.sampled_from(["float32"])) - for i in range(2): - alpha = random.random() + alpha = random.random() config = { "op_names": ["Elu"], From a024017a501136980fdd1626a6d68f8c89c7dd3b Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 10 Aug 2022 15:25:45 +0800 Subject: [PATCH 094/101] add test auto scan of isinf --- tests/onnx/test_auto_scan_isinf.py | 58 ++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/onnx/test_auto_scan_isinf.py diff --git a/tests/onnx/test_auto_scan_isinf.py b/tests/onnx/test_auto_scan_isinf.py new file mode 100644 index 000000000..72291a82b --- /dev/null +++ b/tests/onnx/test_auto_scan_isinf.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from auto_scan_test import OPConvertAutoScanTest +from hypothesis import reproduce_failure +import hypothesis.strategies as st +import numpy as np +import unittest +import random + + +class TestIsInfConvert(OPConvertAutoScanTest): + """ + ONNX op: IsInf + OPset version: 10~15 + """ + + def sample_convert_config(self, draw): + input_shape = draw( + st.lists( + st.integers( + min_value=20, max_value=30), min_size=3, max_size=5)) + input_dtype = draw(st.sampled_from(["float32"])) + config = { + "op_names": ["IsInf"], + "test_data_shapes": [input_shape], + "test_data_types": [input_dtype], + "inputs_shape": [input_shape], + "min_opset_version": 10, + "max_opset_version": 15, + "inputs_name": ["x"], + "outputs_name": ["y"], + "delta": 1e-4, + "rtol": 1e-4, + "run_dynamic": True, + } + + attrs = {} + + return (config, attrs) + + def test(self): + self.run_and_statis(max_examples=50) + + +if __name__ == "__main__": + unittest.main() From a86ae5ad36dcba8cd333ee16e06ae613f6a6b436 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 10 Aug 2022 15:27:20 +0800 Subject: [PATCH 095/101] add op_mapper of isinf --- x2paddle/op_mapper/onnx2paddle/opset10.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/x2paddle/op_mapper/onnx2paddle/opset10.py b/x2paddle/op_mapper/onnx2paddle/opset10.py index 48df1e4bc..53822e9f3 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset10.py +++ b/x2paddle/op_mapper/onnx2paddle/opset10.py @@ -34,3 +34,15 @@ def __init__(self, decoder, paddle_graph): super(OpSet10, self).__init__(decoder, paddle_graph) # Support Mod op Since opset version >= 10 self.elementwise_ops.update({"Mod": "paddle.mod"}) + + @print_mapping_info + def IsInf(self, node): + val_x = self.graph.get_input_node(node, idx=0, copy=True) + if node.get_attr('detect_negative') != None or node.get_attr( + 'detect_positive') != None: + raise Exception( + "x2addle does not currently support IsINF with attributes 'detect_negative' and 'detect_positive'." + ) + else: + self.paddle_graph.add_layer( + 'paddle.isinf', inputs={"x": val_x.name}, outputs=[node.name]) From 0792193289a6208a9764c0731197411908c33454 Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Wed, 10 Aug 2022 15:33:53 +0800 Subject: [PATCH 096/101] Delete test_auto_scan_elu.py --- tests/onnx/test_auto_scan_elu.py | 60 -------------------------------- 1 file changed, 60 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_elu.py diff --git a/tests/onnx/test_auto_scan_elu.py b/tests/onnx/test_auto_scan_elu.py deleted file mode 100644 index 23aee7e74..000000000 --- a/tests/onnx/test_auto_scan_elu.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import numpy as np -import unittest -import random - - -class TestEluConvert(OPConvertAutoScanTest): - """ - ONNX op: Elu - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - alpha = random.random() - - config = { - "op_names": ["Elu"], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "max_opset_version": 15, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - attrs = {"alpha": alpha} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From 802d7a8af547996ea201a167d95a4b84fe521a0f Mon Sep 17 00:00:00 2001 From: qqj1130247885 <51647379+qqj1130247885@users.noreply.github.com> Date: Wed, 10 Aug 2022 15:34:03 +0800 Subject: [PATCH 097/101] Delete test_auto_scan_unary_ops.py --- tests/onnx/test_auto_scan_unary_ops.py | 103 ------------------------- 1 file changed, 103 deletions(-) delete mode 100644 tests/onnx/test_auto_scan_unary_ops.py diff --git a/tests/onnx/test_auto_scan_unary_ops.py b/tests/onnx/test_auto_scan_unary_ops.py deleted file mode 100644 index 81d37d113..000000000 --- a/tests/onnx/test_auto_scan_unary_ops.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License" -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from auto_scan_test import OPConvertAutoScanTest -from hypothesis import reproduce_failure -import hypothesis.strategies as st -import onnx -from onnx import helper -from onnx import TensorProto -import numpy as np -import unittest -import random - -min_opset_version_map = { - "Cosh": 9, - "Cos": 7, - "Atan": 7, - "Asinh": 9, - "Asin": 7, - "Acosh": 9, - "Acos": 7, - "Exp": 7, - "Floor": 7, - "Tan": 7, - "Ceil": 7, - "Erf": 9, - "Sin": 7, - "Sinh": 9, - "Tanh": 7, - "Atanh": 9, - "Sign": 9, -} - - -class TestUnaryopsConcert(OPConvertAutoScanTest): - """ - ONNX op: unary ops - OPset version: 7~15 - """ - - def sample_convert_config(self, draw): - input_shape = draw( - st.lists( - st.integers( - min_value=20, max_value=30), min_size=3, max_size=5)) - - input_dtype = draw(st.sampled_from(["float32"])) - - config = { - "op_names": [ - "Cos", - "Atan", - "Asinh", - "Asin", - "Acosh", - "Acos", - "Cosh", - "Exp", - "Floor", - "Tan", - "Erf", - "Sin", - "Sinh", - "Tanh", - "Atanh", - "Sign", - ], - "test_data_shapes": [input_shape], - "test_data_types": [input_dtype], - "inputs_shape": [input_shape], - "min_opset_version": 7, - "inputs_name": ["x"], - "outputs_name": ["y"], - "delta": 1e-4, - "rtol": 1e-4 - } - - min_opset_versions = list() - for op_name in config["op_names"]: - min_opset_versions.append(min_opset_version_map[op_name]) - config["min_opset_version"] = min_opset_versions - - attrs = {} - - return (config, attrs) - - def test(self): - self.run_and_statis(max_examples=50) - - -if __name__ == "__main__": - unittest.main() From ef59555921b2275b12674d8b2d16cf96a56a141f Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 10 Aug 2022 15:34:53 +0800 Subject: [PATCH 098/101] remove --- x2paddle/op_mapper/onnx2paddle/opset7.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset7.py b/x2paddle/op_mapper/onnx2paddle/opset7.py index c933f5aaf..6c0c22ece 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset7.py +++ b/x2paddle/op_mapper/onnx2paddle/opset7.py @@ -32,12 +32,6 @@ def run_mapping(*args, **kwargs): class OpSet7(OpSet): def __init__(self, decoder, paddle_graph): super(OpSet7, self).__init__(decoder, paddle_graph) - self.directly_map_ops.update({ - 'Atan': ['paddle.atan'], - 'Acos': ['paddle.acos'], - 'Asin': ['paddle.asin'], - 'Tan': ['paddle.tan'], - }) @print_mapping_info def Or(self, node): From 39038505e404e5896d5e5df7d0ade814c90dae04 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Wed, 10 Aug 2022 15:35:13 +0800 Subject: [PATCH 099/101] remove --- x2paddle/op_mapper/onnx2paddle/opset9.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset9.py b/x2paddle/op_mapper/onnx2paddle/opset9.py index 1c3034cf9..846e84719 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset9.py +++ b/x2paddle/op_mapper/onnx2paddle/opset9.py @@ -32,10 +32,3 @@ def run_mapping(*args, **kwargs): class OpSet9(OpSet8): def __init__(self, decoder, paddle_graph): super(OpSet9, self).__init__(decoder, paddle_graph) - self.directly_map_ops.update({ - 'Cosh': ['paddle.cosh'], - 'Sinh': ['paddle.sinh'], - 'Acosh': ['paddle.acosh'], - 'Asinh': ['paddle.asinh'], - 'Atanh': ['paddle.atanh'], - }) From 22f003e29955783387854ebdf105ffb44307f5b4 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 11 Aug 2022 19:06:02 +0800 Subject: [PATCH 100/101] add default value --- x2paddle/op_mapper/onnx2paddle/opset10.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/x2paddle/op_mapper/onnx2paddle/opset10.py b/x2paddle/op_mapper/onnx2paddle/opset10.py index 53822e9f3..73476bba8 100644 --- a/x2paddle/op_mapper/onnx2paddle/opset10.py +++ b/x2paddle/op_mapper/onnx2paddle/opset10.py @@ -40,9 +40,11 @@ def IsInf(self, node): val_x = self.graph.get_input_node(node, idx=0, copy=True) if node.get_attr('detect_negative') != None or node.get_attr( 'detect_positive') != None: - raise Exception( - "x2addle does not currently support IsINF with attributes 'detect_negative' and 'detect_positive'." - ) + if node.get_attr('detect_negative') != 1 or node.get_attr( + 'detect_positive') != 1: + raise Exception( + "x2addle does not currently support IsINF with attributes 'detect_negative' and 'detect_positive'." + ) else: self.paddle_graph.add_layer( 'paddle.isinf', inputs={"x": val_x.name}, outputs=[node.name]) From a9b58d92a97dcc73b66225be69c4f10b7a5522b5 Mon Sep 17 00:00:00 2001 From: qiaoqijing Date: Thu, 18 Aug 2022 10:46:14 +0800 Subject: [PATCH 101/101] retest --- tests/onnx/test_auto_scan_isinf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/onnx/test_auto_scan_isinf.py b/tests/onnx/test_auto_scan_isinf.py index 72291a82b..9938b07ee 100644 --- a/tests/onnx/test_auto_scan_isinf.py +++ b/tests/onnx/test_auto_scan_isinf.py @@ -47,7 +47,6 @@ def sample_convert_config(self, draw): } attrs = {} - return (config, attrs) def test(self):