Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support paddle_yolov5s transform to MLIR #142

Open
wants to merge 16 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion python/samples/detect_yolov5.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import sys
import argparse
import cv2
from tools.model_runner import mlir_inference, model_inference, onnx_inference, torch_inference
from tools.model_runner import mlir_inference, model_inference, onnx_inference, torch_inference,paddle_inference
from utils.preprocess import supported_customization_format

COCO_CLASSES = ("person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck",
Expand Down Expand Up @@ -230,6 +230,7 @@ def preproc(img, input_size, pixel_format, channel_format, fuse_pre, swap=(2, 0,
padded_img = np.ones(input_size, dtype=np.uint8) * 114 # 114

r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])

resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
Expand All @@ -241,6 +242,7 @@ def preproc(img, input_size, pixel_format, channel_format, fuse_pre, swap=(2, 0,

if (channel_format == 'nchw'):
padded_img = padded_img.transpose(swap) # HWC to CHW
# if (pixel_format == 'rgb'):
if (pixel_format == 'rgb'):
padded_img = padded_img[::-1] # BGR to RGB

Expand Down Expand Up @@ -336,6 +338,8 @@ def main():
output = onnx_inference(data, args.model, False)
elif args.model.endswith('.pt') or args.model.endswith('.pth'):
output = torch_inference(data, args.model, False)
elif args.model.endswith('.pdmodel'):
output = paddle_inference(data, args.model, False)
elif args.model.endswith('.mlir'):
output = mlir_inference(data, args.model, False)
elif args.model.endswith(".bmodel"):
Expand All @@ -355,6 +359,7 @@ def main():
raise RuntimeError("model:[{}] nothing detect out:{}".format(args.model, args.input))
final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
final_boxes /= ratio

fix_img = vis(origin_img,
final_boxes,
final_scores,
Expand Down
46 changes: 41 additions & 5 deletions python/tools/model_runner.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ def pack_bmodel_context_generator(model_file, net):
out_dir = model_file.rsplit(".", maxsplit=1)[0]
tensor_loc = model_file + ".json"
if not os.path.isfile(tensor_loc):
yield None
return
return iter([None])
os.makedirs(out_dir, exist_ok=True)
shutil.copy(model_file, os.path.join(out_dir, "compilation.bmodel"))
shutil.copy(tensor_loc, os.path.join(out_dir, "tensor_location.json"))
Expand All @@ -55,8 +54,8 @@ def model_inference(inputs: dict, model_file: str, dump_all = True) -> dict:
chip = get_chip_from_model(model_file)
# trick for runtime link chip cmodel
lib_so = 'libcmodel_1684x.so'
if chip == 'BM1688' or chip == 'CV186X':
lib_so = 'libcmodel_1688.so'
if chip == 'BM1686' or chip == 'CV186X':
lib_so = 'libcmodel_1686.so'

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if chip == 'BM1688' or chip == 'CV186X':
lib_so = 'libcmodel_1688.so'

Copy link
Author

@hlt-bkx hlt-bkx Nov 29, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

您好,请问那个昨天的INT8量化问题后面解决了吗?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

解决了,不存在问题,我就撤回了comment。

elif chip == 'BM1684':
lib_so = 'libcmodel_1684.so'
elif chip == "SG2260":
Expand Down Expand Up @@ -104,7 +103,6 @@ def model_inference(inputs: dict, model_file: str, dump_all = True) -> dict:
size = os.path.getsize(model_file)
pack_bmodel_context = (iter([None]) if is_cv18xx else pack_bmodel_context_generator(
model_file, net))

next(pack_bmodel_context) # save input_data

if size > 0x10000000:
Expand Down Expand Up @@ -267,6 +265,41 @@ def generate_onnx_with_all(onnx_file: str):
return dict(filter(lambda x: isinstance(x[1], np.ndarray), zip(output_keys, outs)))


def paddle_inference(inputs : dict, paddle_file : str, dump_all : bool = True) -> dict:
import paddle
paddle.enable_static()
all_valid_nodes = dict()
if paddle_file.endswith('.pdmodel'):
paddle_file = paddle_file[:-len('.pdmodel')]
exe = paddle.static.Executor(paddle.CPUPlace())
[inference_program, feed_target_names, fetch_targets] = (
paddle.static.load_inference_model(paddle_file, exe))
out_name = list()
for o in fetch_targets:
out_name.append(o.name)
for op in inference_program.block(0).ops:
if 'Out' in op.output_names:
output_vars = op.output('Out')
elif 'Output' in op.output_names:
output_vars = op.output('Output')
all_valid_nodes[output_vars[0]] = op.type
for name in all_valid_nodes:
if name not in feed_target_names and name != 'fetch':
out_name.append(name)
feed_dict = dict()
for i in inputs:
feed_dict[feed_target_names[0]] = inputs[i].astype(np.float32)
out_data = exe.run(
inference_program,
feed = feed_dict,
fetch_list = out_name
)
outputs = dict()
for i in range(len(out_name)):
new_name = out_name[i] + f'_{all_valid_nodes[out_name[i]]}'
outputs[new_name] = out_data[i]
return outputs

def caffe_inference(inputs: dict, prototxt: str, caffemodel: str, dump_all: bool = True) -> dict:
import caffe
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
Expand Down Expand Up @@ -420,6 +453,9 @@ def torch_outputs(outputs: dict, names: list, tensors):
output = onnx_inference(data, args.model, args.dump_all_tensors)
elif args.model.endswith(".tflite"):
output = tflite_inference(data, args.model, args.dump_all_tensors)
elif args.model.endswith(".pdmodel"):
print("pdmodel!\n")
output = paddle_inference(data,args.model)
elif args.model.endswith(".prototxt") and args.weight.endswith(".caffemodel"):
output = caffe_inference(data, args.model, args.weight, args.dump_all_tensors)
elif args.model.endswith(".pt") or args.model.endswith(".pth"):
Expand Down
22 changes: 22 additions & 0 deletions python/tools/model_transform.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,25 @@ def origin_inference(self, inputs: dict):
return torch_inference(inputs, self.model_def)


class PaddleTransformer(ModelTransformer):
def __init__(self,
model_name,
model_def,
input_shapes: list = [],
output_names:list = [],
preprocessor: dict = {}
):
super().__init__(model_name, model_def)
from transform.paddleConverter import PaddleConverter
self.converter = PaddleConverter(self.model_name,self.model_def,input_shapes,output_names,
preprocessor)

def origin_inference(self,inputs:dict):
from tools.model_runner import paddle_inference
return paddle_inference(inputs,self.model_def)



def get_model_transform(args):
preprocessor = preprocess()
preprocessor.config(**vars(args))
Expand All @@ -231,6 +250,9 @@ def get_model_transform(args):
elif args.model_def.endswith('.pt'):
tool = TorchTransformer(args.model_name, args.model_def, args.input_shapes,
args.input_types, args.output_names, preprocessor.to_dict())
elif args.model_def.endswith('.pdmodel'):
args.model_def = args.model_def[:-len('.pdmodel')]
tool = PaddleTransformer(args.model_name,args.model_def,args.input_shapes,args.output_names,preprocessor.to_dict())
else:
# TODO: support more AI model types
raise RuntimeError("unsupport model:{}".format(args.model_def))
Expand Down
3 changes: 2 additions & 1 deletion python/transform/MLIRImporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ class Platform:
TFLITE = "TFLITE"
CAFFE = "CAFFE"
TPULANG = "TPULANG"
PADDLE = "PADDLE"


def get_weight_file(model_name: str, state: str, chip: str):
Expand All @@ -33,7 +34,7 @@ def __init__(self,
input_shapes: list,
output_shapes: list,
model_name: str,
platform: str = Platform.ONNX,
platform: str = Platform.PADDLE,
input_types: list = [],
output_types: list = [],
state: str = State.TOP_F32,
Expand Down
Loading