Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PaddleV3] 修复 pytorchaten_linear 的算子映射并修复相关模型 #1079

Merged
merged 5 commits into from
Nov 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test_benchmark/PyTorch/MiniFasNet/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import torch
import numpy as np
from torchvision.models import AlexNet
from torchvision.models.utils import load_state_dict_from_url
# from torchvision.models.utils import load_state_dict_from_url
from x2paddle.convert import pytorch2paddle
# 构建输入,pytorch-to-paddle
input_data = np.random.rand(1, 3, 80, 80).astype("float32")
Expand Down
1 change: 0 additions & 1 deletion test_benchmark/PyTorch/MiniFasNet/deploy_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.inference import Config
from paddle.inference import create_predictor

Expand Down
8 changes: 2 additions & 6 deletions test_benchmark/PyTorch/MiniFasNet/pd_infer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from __future__ import print_function
import paddle.fluid as fluid
import paddle
import sys
import os
Expand All @@ -13,11 +12,8 @@
# trace
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
[prog, inputs, outputs] = fluid.io.load_inference_model(
dirname="pd_model_trace/inference_model/",
executor=exe,
model_filename="model.pdmodel",
params_filename="model.pdiparams")
[prog, inputs, outputs] = paddle.static.load_inference_model(
path_prefix="pd_model_trace/inference_model/model", executor=exe)
result = exe.run(prog, feed={inputs[0]: img}, fetch_list=outputs)
df = pytorch_output - result
if np.max(np.fabs(df)) > 1e-03:
Expand Down
3 changes: 2 additions & 1 deletion test_benchmark/PyTorch/SwinTransformer/convert_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@
model_name = "swin_tiny_patch4_window7_224"
torch_module = SwinTransformer(**swin_model_cfg_map[model_name])
torch_state_dict = torch.load(
"../dataset/SwinTransformer/{}.pth".format(model_name))["model"]
"../dataset/SwinTransformer/{}.pth".format(model_name),
map_location=torch.device('cpu'))["model"]
torch_module.load_state_dict(torch_state_dict)
model_name = "pd_model_trace"
# 设置为eval模式
Expand Down
1 change: 0 additions & 1 deletion test_benchmark/PyTorch/SwinTransformer/deploy_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.inference import Config
from paddle.inference import create_predictor

Expand Down
8 changes: 2 additions & 6 deletions test_benchmark/PyTorch/SwinTransformer/pd_infer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
from __future__ import print_function
import paddle.fluid as fluid
import paddle
import sys
import os
Expand All @@ -14,11 +13,8 @@
# trace
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
[prog, inputs, outputs] = fluid.io.load_inference_model(
dirname="pd_model_trace/inference_model/",
executor=exe,
model_filename="model.pdmodel",
params_filename="model.pdiparams")
[prog, inputs, outputs] = paddle.static.load_inference_model(
path_prefix="pd_model_trace/inference_model/model", executor=exe)
result = exe.run(prog, feed={inputs[0]: input_data}, fetch_list=outputs)
df = pytorch_output - result
if numpy.max(numpy.fabs(df)) > 1e-04:
Expand Down
2 changes: 0 additions & 2 deletions test_benchmark/PyTorch/black.list
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
EasyOCR_detector
EasyOCR_recognizer
MiniFasNet
MockingBird
SwinTransformer
dataset
tools
output
19 changes: 17 additions & 2 deletions x2paddle/op_mapper/pytorch2paddle/aten.py
Original file line number Diff line number Diff line change
Expand Up @@ -3441,8 +3441,23 @@ def aten_linear(mapper, graph, node):
scope_name=scope_name,
**layer_attrs)
if len(inputs_name) == 3:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)
# make `bias` dtype like `weight`, and shape is `1` for broadcast
if (inputs_name[2] not in mapper.pytorch_params
and inputs_name[1] in mapper.pytorch_params):
param = mapper.pytorch_params[inputs_name[1]]
dtype = string(str(param.dtype))
shape = (1, )
mapper._check_input(graph,
inputs_node[2],
inputs_name[2],
current_outputs,
scope_name,
dtype=dtype,
shape=shape)
else:
mapper._check_input(graph, inputs_node[2], inputs_name[2],
current_outputs, scope_name)

graph.add_layer("paddle.add",
inputs={
"x": output_name,
Expand Down
48 changes: 34 additions & 14 deletions x2paddle/op_mapper/pytorch2paddle/pytorch_op_mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,14 @@ def _get_outputs_name(self, node, attr_name=None):
outputs_name.append(output_name)
return outputs_name

def _check_input(self, graph, node, output_name, node_outputs, scope_name):
def _check_input(self,
graph,
node,
output_name,
node_outputs,
scope_name,
dtype=None,
shape=None):
if node.kind() == "prim::GetAttr":
param = self.pytorch_params[output_name]
if isinstance(param, np.ndarray):
Expand Down Expand Up @@ -231,19 +238,32 @@ def _check_input(self, graph, node, output_name, node_outputs, scope_name):
value=string(param) if isinstance(
param, str) else param)
node_outputs.append(output_name)
elif node.kind(
) == "prim::Constant" and output_name in self.pytorch_params:
param = self.pytorch_params[output_name]
self.paddle_params[output_name] = param
layer_id = graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[output_name],
scope_name=scope_name,
dtype=string(str(param.dtype)),
shape=param.shape,
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
self.output2id[output_name] = layer_id
elif node.kind() == "prim::Constant":
if output_name in self.pytorch_params:
param = self.pytorch_params[output_name]
self.paddle_params[output_name] = param
layer_id = graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[output_name],
scope_name=scope_name,
dtype=string(str(param.dtype)),
shape=param.shape,
default_initializer=
"paddle.nn.initializer.Constant(value=0.0)")
self.output2id[output_name] = layer_id
else:
if dtype is not None and shape is not None:
layer_id = graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[output_name],
scope_name=scope_name,
dtype=dtype,
shape=shape,
default_initializer=
"paddle.nn.initializer.Constant(value=0.0)")
self.output2id[output_name] = layer_id

def _get_inputs_name(self, node):
inputs_name = []
Expand Down