From 5a6729fddf346234e89c2c3e944a3fa65af89a6e Mon Sep 17 00:00:00 2001 From: megemini Date: Sun, 13 Oct 2024 10:21:16 +0000 Subject: [PATCH 1/5] [Fix] version check --- requirements.txt | 1 + x2paddle/convert.py | 19 +++++++------------ x2paddle/utils.py | 29 ++++++++++++++++++++++------- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/requirements.txt b/requirements.txt index ded0ee752..7127bf47d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ sympy +packaging diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 3372c5bc1..0a7e83a29 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -14,7 +14,7 @@ from six import text_type as _text_type from x2paddle import program -from x2paddle.utils import ConverterCheck +from x2paddle.utils import ConverterCheck, check_version import argparse import sys import logging @@ -449,20 +449,15 @@ def main(): assert args.save_dir is not None, "--save_dir is not defined" try: - import platform - v0, v1, v2 = platform.python_version().split('.') - if not (int(v0) >= 3 and int(v1) >= 5): - logging.info("[ERROR] python>=3.5 is required") + if not sys.version_info >= (3, 8): + logging.error("[ERROR] python>=3.8 is required") return + import paddle - v0, v1, v2 = paddle.__version__.split('.') - logging.info("paddle.__version__ = {}".format(paddle.__version__)) - if v0 == '0' and v1 == '0' and v2 == '0': - logging.info( - "[WARNING] You are use develop version of paddlepaddle") - elif int(v0) != 2 or int(v1) < 0: - logging.info("[ERROR] paddlepaddle>=2.0.0 is required") + if not check_version('2.0.0'): + logging.error("[ERROR] paddlepaddle>=2.0.0 is required") return + except: logging.info( "[ERROR] paddlepaddle not installed, use \"pip install paddlepaddle\"" diff --git a/x2paddle/utils.py b/x2paddle/utils.py index 2117a40c7..b51dda7f4 100644 --- a/x2paddle/utils.py +++ b/x2paddle/utils.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging + +from packaging.version import Version + import paddle import x2paddle import hashlib @@ -30,15 +34,26 @@ def string(param): return "\'{}\'".format(param) -def check_version(): - version = paddle.__version__ - v0, v1, v2 = version.split('.') - if not ((v0 == '0' and v1 == '0' and v2 == '0') or - (int(v0) >= 2 and int(v1) >= 1)): - return False - else: +def check_version(base_version: str = '2.1.0') -> bool: + """ + Return `True` if the current version is equal or bigger than `base_version`. + The default version `2.1.0` is used for checking `is_new_version`. + """ + is_new = False + + dev_version = Version('0.0.0') + cur_version = Version(paddle.__version__) + + if cur_version == dev_version: + logging.info("[WARNING] You are use develop version of paddlepaddle") + return True + if cur_version >= Version(base_version): + return True + + return False + def _md5(text: str): '''Calculate the md5 value of the input text.''' From d8ba1b969d1b756d16acf46d7e9b70fc353368c3 Mon Sep 17 00:00:00 2001 From: megemini Date: Tue, 15 Oct 2024 18:24:18 +0800 Subject: [PATCH 2/5] [Fix] ops torch aten::pad --- test_benchmark/PyTorch/ACG_UnitTest/pd_infer.py | 8 ++------ x2paddle/op_mapper/pytorch2paddle/aten.py | 3 +++ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/test_benchmark/PyTorch/ACG_UnitTest/pd_infer.py b/test_benchmark/PyTorch/ACG_UnitTest/pd_infer.py index 6e26f94b6..1fcfbf1db 100644 --- a/test_benchmark/PyTorch/ACG_UnitTest/pd_infer.py +++ b/test_benchmark/PyTorch/ACG_UnitTest/pd_infer.py @@ -1,4 +1,3 @@ -import paddle.fluid as fluid import paddle import numpy as np import sys @@ -11,11 +10,8 @@ exe = paddle.static.Executor(paddle.CPUPlace()) # test dygraph - [prog, inputs, outputs - ] = fluid.io.load_inference_model(dirname="pd_model/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model/inference_model/model", executor=exe) data = np.load('../dataset/ACG_UnitTest/input.npy') result = exe.run(prog, feed={inputs[0]: data}, fetch_list=outputs) diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index bd4dada4a..28788765b 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -4850,6 +4850,9 @@ def aten_replication_pad1d(mapper, graph, node): return current_inputs, current_outputs +aten_pad = aten_replication_pad1d + + def aten_reshape(mapper, graph, node): """ 构造调整大小的PaddleLayer。 TorchScript示例: From 523975a926fcbd0e936b9119357f0d97c05075dc Mon Sep 17 00:00:00 2001 From: megemini Date: Wed, 16 Oct 2024 21:57:31 +0800 Subject: [PATCH 3/5] [Add] pytorch aten::pad and fix models --- .../PyTorch/Mobilestereonet/pd_infer.py | 5 +- .../PyTorch/Saicinpainting_LaMa/pd_infer.py | 8 +- test_benchmark/PyTorch/opadd/deploy_infer.py | 1 - test_benchmark/PyTorch/opadd/pd_infer.py | 8 +- x2paddle/op_mapper/pytorch2paddle/aten.py | 94 ++++++++++++++++++- 5 files changed, 96 insertions(+), 20 deletions(-) diff --git a/test_benchmark/PyTorch/Mobilestereonet/pd_infer.py b/test_benchmark/PyTorch/Mobilestereonet/pd_infer.py index 8ba7699aa..bf511d334 100644 --- a/test_benchmark/PyTorch/Mobilestereonet/pd_infer.py +++ b/test_benchmark/PyTorch/Mobilestereonet/pd_infer.py @@ -11,10 +11,7 @@ # test dygraph [prog, inputs, outputs] = paddle.static.load_inference_model( - path_prefix="pd_model/inference_model", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + path_prefix="pd_model/inference_model/model", executor=exe) dummy_input_left = np.load("../dataset/Mobilestereonet/input_left.npy") dummy_input_right = np.load("../dataset/Mobilestereonet/input_right.npy") result = exe.run(prog, diff --git a/test_benchmark/PyTorch/Saicinpainting_LaMa/pd_infer.py b/test_benchmark/PyTorch/Saicinpainting_LaMa/pd_infer.py index 65b216c4a..0025d86e3 100644 --- a/test_benchmark/PyTorch/Saicinpainting_LaMa/pd_infer.py +++ b/test_benchmark/PyTorch/Saicinpainting_LaMa/pd_infer.py @@ -1,4 +1,3 @@ -import paddle.fluid as fluid import paddle import numpy as np import sys @@ -10,11 +9,8 @@ exe = paddle.static.Executor(paddle.CPUPlace()) # test dygraph - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) data = np.load('../dataset/Saicinpainting_LaMa/input.npy') result = exe.run(prog, feed={inputs[0]: data}, fetch_list=outputs) diff --git a/test_benchmark/PyTorch/opadd/deploy_infer.py b/test_benchmark/PyTorch/opadd/deploy_infer.py index 803d98557..eb8332301 100644 --- a/test_benchmark/PyTorch/opadd/deploy_infer.py +++ b/test_benchmark/PyTorch/opadd/deploy_infer.py @@ -4,7 +4,6 @@ import numpy as np import paddle -import paddle.fluid as fluid from paddle.inference import Config from paddle.inference import create_predictor diff --git a/test_benchmark/PyTorch/opadd/pd_infer.py b/test_benchmark/PyTorch/opadd/pd_infer.py index 75a1c50fd..a7ec7c003 100644 --- a/test_benchmark/PyTorch/opadd/pd_infer.py +++ b/test_benchmark/PyTorch/opadd/pd_infer.py @@ -1,5 +1,4 @@ from __future__ import print_function -import paddle.fluid as fluid import paddle import sys import os @@ -14,11 +13,8 @@ # trace paddle.enable_static() exe = paddle.static.Executor(paddle.CPUPlace()) - [prog, inputs, outputs] = fluid.io.load_inference_model( - dirname="pd_model_trace/inference_model/", - executor=exe, - model_filename="model.pdmodel", - params_filename="model.pdiparams") + [prog, inputs, outputs] = paddle.static.load_inference_model( + path_prefix="pd_model_trace/inference_model/model", executor=exe) result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs) df = pytorch_output - result if numpy.max(numpy.fabs(df)) > 1e-04: diff --git a/x2paddle/op_mapper/pytorch2paddle/aten.py b/x2paddle/op_mapper/pytorch2paddle/aten.py index 28788765b..1572e02f6 100755 --- a/x2paddle/op_mapper/pytorch2paddle/aten.py +++ b/x2paddle/op_mapper/pytorch2paddle/aten.py @@ -4850,9 +4850,6 @@ def aten_replication_pad1d(mapper, graph, node): return current_inputs, current_outputs -aten_pad = aten_replication_pad1d - - def aten_reshape(mapper, graph, node): """ 构造调整大小的PaddleLayer。 TorchScript示例: @@ -6553,3 +6550,94 @@ def aten_topk(mapper, graph, node): **layer_attrs) return current_inputs, current_outputs + + +def aten_pad(mapper, graph, node): + """ + TorchScript Code: + %input.23 : Tensor = aten::pad(%input.21, %116, %114, %113) + Parameter meaning: + %input.21 (Tensor): Input Tensor + %116 (list): pad + %114 (str): pad mode + %113 (float): value + """ + scope_name = mapper.normalize_scope_name(node) + op_name = name_generator("pad", mapper.nn_name2id) + output_name = mapper._get_outputs_name(node)[0] + layer_inputs = {} + layer_attrs = {} + inputs_name, inputs_node = mapper._get_inputs_name(node) + # Output list + current_outputs = [output_name] + # process Input Tensor + mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, + scope_name) + + # process pad + padding_attr = None + if inputs_name[1] in mapper.attrs: + padding_attr = mapper.attrs[inputs_name[1]] + else: + mapper._check_input(graph, inputs_node[1], inputs_name[1], + current_outputs, scope_name) + layer_inputs["pad"] = inputs_name[1] + + # process `mode` + _pad_mode = mapper.attrs[inputs_name[2]] + layer_attrs["mode"] = _pad_mode + + # process value, try to conver to `float` + # with `None` which raise exception, make `value` be `0` as default. + _pad_value = mapper.attrs[inputs_name[3]] + _pad_value = _pad_value or 0 + try: + _pad_value = float(_pad_value) + except ValueError: + _pad_value = 0 + layer_attrs["value"] = _pad_value + + # process `data_format` + # TODO(megemini): the lastest version of `Paddle v3`, + # just make `data_format = string("None")` + # because `paddle.nn.functional.pad` can infer from input `x` + data_format = string("None") + if inputs_name[0] in mapper.attrs: + x_dim = len(mapper.attrs[inputs_name[0]]) + if x_dim == 3: + data_format = string("NCL") + elif x_dim == 4: + data_format = string("NCHW") + elif x_dim == 5: + data_format = string("NCDHW") + else: + if len(padding_attr) == 2: + data_format = string("NCL") + elif len(padding_attr) == 4: + data_format = string("NCHW") + elif len(padding_attr) == 6: + data_format = string("NCDHW") + layer_attrs["data_format"] = data_format + + # process `pad` + if padding_attr is not None: + layer_attrs["pad"] = padding_attr + if 'constant' in _pad_mode: + if len(padding_attr) == 2: + layer_attrs["pad"] = [0, 0, 0, 0, 0, 0] + padding_attr + elif len(padding_attr) == 4: + layer_attrs["pad"] = [0, 0, 0, 0] + padding_attr + elif len(padding_attr) == 6: + layer_attrs["pad"] = [0, 0] + padding_attr + + # input and kernel + layer_inputs["x"] = inputs_name[0] + kernel_name = "paddle.nn.functional.pad" + + graph.add_layer(kernel_name, + inputs=layer_inputs, + outputs=[output_name], + scope_name=scope_name, + **layer_attrs) + current_inputs = list(layer_inputs.values()) + return current_inputs, current_outputs From 929674f854979078b8e9db7a5fb7617a3787cb13 Mon Sep 17 00:00:00 2001 From: megemini Date: Wed, 23 Oct 2024 15:27:46 +0800 Subject: [PATCH 4/5] [Update] black.list --- test_benchmark/PyTorch/black.list | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test_benchmark/PyTorch/black.list b/test_benchmark/PyTorch/black.list index 28f2493d5..53f23063b 100644 --- a/test_benchmark/PyTorch/black.list +++ b/test_benchmark/PyTorch/black.list @@ -1,4 +1,3 @@ -ACG_UnitTest BertForMaskedLM_dccuchile BertModel_SpanBert CamembertForQuestionAnswering @@ -9,13 +8,10 @@ EasyOCR_recognizer FCN_ResNet50 GRU MiniFasNet -Mobilestereonet MockingBird Roberta -Saicinpainting_LaMa SwinTransformer XLMRobertaForTokenClassification -opadd dataset tools output From edb48d9f8de493583e868a1c9ba83556335e8817 Mon Sep 17 00:00:00 2001 From: megemini Date: Wed, 23 Oct 2024 17:25:37 +0800 Subject: [PATCH 5/5] [Update] torch load map_location --- test_benchmark/PyTorch/ACG_UnitTest/convert.py | 3 ++- test_benchmark/PyTorch/Mobilestereonet/convert.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test_benchmark/PyTorch/ACG_UnitTest/convert.py b/test_benchmark/PyTorch/ACG_UnitTest/convert.py index 2bcc07e9a..1a7caec52 100644 --- a/test_benchmark/PyTorch/ACG_UnitTest/convert.py +++ b/test_benchmark/PyTorch/ACG_UnitTest/convert.py @@ -14,7 +14,8 @@ def main(): num_blocks=2, time_window=3) - ckpt = torch.load("../dataset/ACG_UnitTest/model_best.pth") + ckpt = torch.load("../dataset/ACG_UnitTest/model_best.pth", + map_location=torch.device('cpu')) state_dict = ckpt['model'] model.load_state_dict(state_dict) diff --git a/test_benchmark/PyTorch/Mobilestereonet/convert.py b/test_benchmark/PyTorch/Mobilestereonet/convert.py index 43a3556ef..2d6b78ed5 100644 --- a/test_benchmark/PyTorch/Mobilestereonet/convert.py +++ b/test_benchmark/PyTorch/Mobilestereonet/convert.py @@ -7,7 +7,8 @@ model = __models__['MSNet2D'](192) # state_dict = torch.load('./MSNet2D_SF_DS_KITTI2015.ckpt', map_location=torch.device('cpu')) state_dict = torch.load( - '../dataset/Mobilestereonet/MSNet2D_SF_DS_KITTI2015.ckpt') + '../dataset/Mobilestereonet/MSNet2D_SF_DS_KITTI2015.ckpt', + map_location=torch.device('cpu')) param_dict = state_dict['model'] new_param_dict = {} for k, v in param_dict.items():