From c3c76009673589d5783ac569b346e665c03872c1 Mon Sep 17 00:00:00 2001 From: "MingJian.L" <45811724+matrixgame2018@users.noreply.github.com> Date: Mon, 14 Feb 2022 14:28:15 +0800 Subject: [PATCH 01/17] Create How to support new model.md (#147) * Create How to support new model.md Translated the document into Chinese and create the doc in the /docs/zh_cn/tutorials/How to support new model.md * Update and rename How to support new model.md to How_to_support_new_model.md * Update and rename How_to_support_new_model.md to how_to_support_new_model.md * Update how_to_support_new_model.md --- .../tutorials/how_to_support_new_model.md | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 docs/zh_cn/tutorials/how_to_support_new_model.md diff --git a/docs/zh_cn/tutorials/how_to_support_new_model.md b/docs/zh_cn/tutorials/how_to_support_new_model.md new file mode 100644 index 0000000000..f873e90bda --- /dev/null +++ b/docs/zh_cn/tutorials/how_to_support_new_model.md @@ -0,0 +1,86 @@ +## 如何支持新的模型 + +我们提供了多种工具来支持模型转换 + +### 函数的重写器 + +PyTorch 神经网络是用 python 编写的,可以简化算法的开发。但与此同时 Python 的流程控制和第三方库会使得网络导出为中间语言的过程变得困难。为此我们提供了一个“MonKey path”工具将不支持的功能重写为另一个可支持中间语言导出的功能。下述是一个具体的使用例子: + +```python +from mmdeploy.core import FUNCTION_REWRITER +@FUNCTION_REWRITER.register_rewriter( + func_name='torch.Tensor.repeat', backend='tensorrt') +def repeat_static(ctx, input, *size): + origin_func = ctx.origin_func + if input.dim() == 1 and len(size) == 1: + return origin_func(input.unsqueeze(0), *([1] + list(size))).squeeze(0) + else: + return origin_func(input, *size) +``` + +使用函数重写器是十分容易的,只需添加一个带参数的装饰器即可: + +- `func_name`是需要被重载的函数,它可以是其他PyTorch 的函数或者是自定义的函数。模块中的方法也可以通过工具进行重载。 +- `backend`是推理引擎。当模型被导入到引擎的时候,函数会被重载。如果没有给出,重载默认的参数就是重载的参数。如果后端的重载的参数不存在,将会按照预设的默认模式进行重载。 +当参数与原始的参数相同时,除了把上下文信息`ctx` 作为第一的参数外,上下文也提供了一些有用的信息,例如:部署的配置`ctx.cfg` 和原始的函数(已经被重载)`ctx.origin_func`。 + +### 模型重载器 + +如果您想用另一个模块替换整个模块,我们还有另一个重载器,如下所示: + +```python +@MODULE_REWRITER.register_rewrite_module( + 'mmedit.models.backbones.sr_backbones.SRCNN', backend='tensorrt') +class SRCNNWrapper(nn.Module): + def __init__(self, + module, + cfg, + channels=(3, 64, 32, 3), + kernel_sizes=(9, 1, 5), + upscale_factor=4): + super(SRCNNWrapper, self).__init__() + self._module = module + module.img_upsampler = nn.Upsample( + scale_factor=module.upscale_factor, + mode='bilinear', + align_corners=False) + def forward(self, *args, **kwargs): + """Run forward.""" + return self._module(*args, **kwargs) + def init_weights(self, *args, **kwargs): + """Initialize weights.""" + return self._module.init_weights(*args, **kwargs) +``` + +就像函数重载器一样,可添加一个带参数的装饰器: + +- `module_type` 要重载的模块类。 +- `backend` 是推理引擎。当模型被导入到引擎的时候,函数会被重载。如果没有给出,重载默认的参数就是重载的参数。如果后端的重载的参数不存在,将会按照预设的默认模式进行重载。 + +网络中模块的所有实例都将替换为这个新类的实例。原始模块和部署配置将作为前两个参数进行传递。 + +### 符号函数重写 + +PyTorch 和 ONNX 之间的映射是通过 PyTorch 中的符号函数进行定义的。自定义符号函数可以帮助我们绕过一些推理引擎不支持的 ONNX 节点。 + +```python +@SYMBOLIC_REWRITER.register_symbolic('squeeze', is_pytorch=True) +def squeeze_default(ctx, g, self, dim=None): + if dim is None: + dims = [] + for i, size in enumerate(self.type().sizes()): + if size == 1: + dims.append(i) + else: + dims = [sym_help._get_const(dim, 'i', 'dim')] + return g.op('Squeeze', self, axes_i=dims) +``` + +装饰器的参数 + +- `func_name`要添加符号的函数名称。如果是自定义的,请使用完整路径`torch.autograd.Function`。或者如果它是 PyTorch 内置函数,则只用写一个名称即可。 +- `backend`是推理引擎。当模型被导入到引擎的时候,函数会被重载。如果没有给出,重载默认的参数就是重载的参数。如果后端的重载的参数不存在,将会按照预设的默认模式进行重载。 +- 如果函数是 PyTorch 内置函数,则为True。 +- `arg_descriptors` 符号函数参数的描述符,将被传递给`torch.onnx.symbolic_helper._parse_arg`。 + +就像函数重载器的`ctx`一样,第一个参数会提供上下文信息。上下文中了一些有用的信息,例如部署配置ctx.cfg和原始功能(已被重载)`ctx.origin_func`。请注意, `ctx.origin_func`只能在`is_pytorch==False`时使用。 From 668fb16abce4add98094a6ba4ebbac4c61e9a714 Mon Sep 17 00:00:00 2001 From: Yifan Zhou Date: Mon, 14 Feb 2022 17:49:49 +0800 Subject: [PATCH 02/17] [Doc] Update Nvidia Jetson TX2 benchmark (#109) * Update benchmark * Update table format * Fix content * format * format --- docs/en/benchmark.md | 69 ++++++++++++++++++++++++++++++++++++----- docs/zh_cn/benchmark.md | 69 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 124 insertions(+), 14 deletions(-) diff --git a/docs/en/benchmark.md b/docs/en/benchmark.md index 638c0e2e5e..f9d2418185 100644 --- a/docs/en/benchmark.md +++ b/docs/en/benchmark.md @@ -32,7 +32,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. MMCls - TensorRT + TensorRT PPLNN NCNN @@ -45,6 +45,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. Input T4 JetsonNano2GB + Jetson TX2 T4 SnapDragon888 Adreno660 @@ -56,6 +57,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. int8 fp32 fp16 + fp32 fp16 fp32 fp32 @@ -77,6 +79,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. FPS latency (ms) FPS + latency (ms) + FPS ResNet @@ -92,6 +96,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. 16.86 30.54 32.75 + 24.13 + 41.44 1.30 768.28 33.91 @@ -114,6 +120,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. 11.35 49.18 20.13 + 37.45 + 26.70 1.36 737.67 133.44 @@ -136,6 +144,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. 13.41 48.78 20.50 + 29.62 + 33.76 1.91 524.07 107.84 @@ -158,6 +168,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. 65.54 10.23 97.77 + 7.37 + 135.73 4.69 213.33 9.55 @@ -175,13 +187,14 @@ Users can directly test the speed through [how_to_measure_performance_of_models.
MMDet
+ - + - + @@ -190,6 +203,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + @@ -197,6 +211,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + @@ -208,6 +223,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -219,6 +236,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -233,6 +252,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -247,6 +268,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -263,6 +286,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -275,6 +300,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -289,6 +316,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -303,6 +332,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -374,13 +405,14 @@ Users can directly test the speed through [how_to_measure_performance_of_models.
MMEdit
+
MMDetTensorRTTensorRT PPLNN
Dataset Input T4Jetson TX2 T4 model config file
fp32 fp16 int8fp32 fp16
FPS latency (ms) FPSlatency (ms)FPS
YOLOv340.13 24.92 40.13-- 18.07 55.35 $MMDET_DIR/configs/yolo/yolov3_d53_320_273e_coco.py108.56 8.04 124.381.281.28 19.72 50.71 $MMDET_DIR/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py38.78 16.88 59.23780.481.28 38.34 26.08 $MMDET_DIR/configs/retinanet/retinanet_r50_fpn_1x_coco.py56.57 - --- $MMDET_DIR/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py
47.58 13.50 74.08-- 30.41 32.89 $MMDET_DIR/configs/fsaf/fsaf_r50_fpn_1x_coco.py37.70 19.14 52.23733.811.36 65.40 15.29 $MMDET_DIR/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py4.14 - --- 86.80 11.52 $MMDET_DIR/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py
- + - + @@ -388,6 +420,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + @@ -395,6 +428,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + @@ -406,6 +440,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -416,6 +452,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -429,12 +467,15 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + +
MMEditTensorRTTensorRT PPLNN
Model Input T4Jetson TX2 T4 model config file
fp32 fp16 int8fp32 fp16
FPS latency (ms) FPSlatency (ms)FPS
ESRGAN80.50 12.45 80.35-- 7.67 130.39 $MMEDIT_DIR/configs/restorers/esrgan/esrgan_psnr_x4c64b23g32_g1_1000k_div2k.py2836.62 0.26 3850.4558.8616.99 0.56 1775.11 $MMEDIT_DIR/configs/restorers/srcnn/srcnn_x4k915_g1_1000k_div2k.py
+
@@ -527,13 +568,14 @@ Users can directly test the speed through [how_to_measure_performance_of_models.
MMSeg
+ - + - + @@ -542,6 +584,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + @@ -549,6 +592,7 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + @@ -560,6 +604,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -571,6 +617,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -585,6 +633,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -599,6 +649,8 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + + @@ -613,12 +665,15 @@ Users can directly test the speed through [how_to_measure_performance_of_models. + +
MMSegTensorRTTensorRT PPLNN
Dataset Input T4Jetson TX2 T4 model config file
fp32 fp16 int8fp32 fp16
FPS latency (ms) FPSlatency (ms)FPS
FCN41.72 18.13 55.151682.540.59 27.00 37.04 $MMSEG_DIR/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py41.49 16.33 61.231586.190.63 27.26 36.69 $MMSEG_DIR/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py31.45 19.85 50.38-- 36.01 27.77 $MMSEG_DIR/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py21.26 50.38 26.672534.960.39 34.80 28.74 $MMSEG_DIR/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py
+
diff --git a/docs/zh_cn/benchmark.md b/docs/zh_cn/benchmark.md index 5ac9268397..f15629d9a9 100644 --- a/docs/zh_cn/benchmark.md +++ b/docs/zh_cn/benchmark.md @@ -33,7 +33,7 @@ GPU: ncnn, TensorRT, PPLNN MMCls - TensorRT + TensorRT PPLNN NCNN @@ -46,6 +46,7 @@ GPU: ncnn, TensorRT, PPLNN Input T4 JetsonNano2GB + Jetson TX2 T4 SnapDragon888 Adreno660 @@ -57,6 +58,7 @@ GPU: ncnn, TensorRT, PPLNN int8 fp32 fp16 + fp32 fp16 fp32 fp32 @@ -78,6 +80,8 @@ GPU: ncnn, TensorRT, PPLNN FPS latency (ms) FPS + latency (ms) + FPS ResNet @@ -93,6 +97,8 @@ GPU: ncnn, TensorRT, PPLNN 16.86 30.54 32.75 + 24.13 + 41.44 1.30 768.28 33.91 @@ -115,6 +121,8 @@ GPU: ncnn, TensorRT, PPLNN 11.35 49.18 20.13 + 37.45 + 26.70 1.36 737.67 133.44 @@ -137,6 +145,8 @@ GPU: ncnn, TensorRT, PPLNN 13.41 48.78 20.50 + 29.62 + 33.76 1.91 524.07 107.84 @@ -159,6 +169,8 @@ GPU: ncnn, TensorRT, PPLNN 65.54 10.23 97.77 + 7.37 + 135.73 4.69 213.33 9.55 @@ -176,13 +188,14 @@ GPU: ncnn, TensorRT, PPLNN
MMDet
+ - + - + @@ -191,6 +204,7 @@ GPU: ncnn, TensorRT, PPLNN + @@ -198,6 +212,7 @@ GPU: ncnn, TensorRT, PPLNN + @@ -209,6 +224,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -220,6 +237,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -234,6 +253,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -248,6 +269,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -264,6 +287,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -276,6 +301,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -290,6 +317,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -304,6 +333,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -375,13 +406,14 @@ GPU: ncnn, TensorRT, PPLNN
MMEdit
+
MMDetTensorRTTensorRT PPLNN
Dataset Input T4Jetson TX2 T4 model config file
fp32 fp16 int8fp32 fp16
FPS latency (ms) FPSlatency (ms)FPS
YOLOv340.13 24.92 40.13-- 18.07 55.35 $MMDET_DIR/configs/yolo/yolov3_d53_320_273e_coco.py108.56 8.04 124.381.281.28 19.72 50.71 $MMDET_DIR/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py38.78 16.88 59.23780.481.28 38.34 26.08 $MMDET_DIR/configs/retinanet/retinanet_r50_fpn_1x_coco.py56.57 - --- $MMDET_DIR/configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py
47.58 13.50 74.08-- 30.41 32.89 $MMDET_DIR/configs/fsaf/fsaf_r50_fpn_1x_coco.py37.70 19.14 52.23733.811.36 65.40 15.29 $MMDET_DIR/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py4.14 - --- 86.80 11.52 $MMDET_DIR/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py
- + - + @@ -389,6 +421,7 @@ GPU: ncnn, TensorRT, PPLNN + @@ -396,6 +429,7 @@ GPU: ncnn, TensorRT, PPLNN + @@ -407,6 +441,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -417,6 +453,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -430,12 +468,15 @@ GPU: ncnn, TensorRT, PPLNN + +
MMEditTensorRTTensorRT PPLNN
Model Input T4Jetson TX2 T4 model config file
fp32 fp16 int8fp32 fp16
FPS latency (ms) FPSlatency (ms)FPS
ESRGAN80.50 12.45 80.35-- 7.67 130.39 $MMEDIT_DIR/configs/restorers/esrgan/esrgan_psnr_x4c64b23g32_g1_1000k_div2k.py2836.62 0.26 3850.4558.8616.99 0.56 1775.11 $MMEDIT_DIR/configs/restorers/srcnn/srcnn_x4k915_g1_1000k_div2k.py
+
@@ -528,13 +569,14 @@ GPU: ncnn, TensorRT, PPLNN
MMSeg
+ - + - + @@ -543,6 +585,7 @@ GPU: ncnn, TensorRT, PPLNN + @@ -550,6 +593,7 @@ GPU: ncnn, TensorRT, PPLNN + @@ -561,6 +605,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -572,6 +618,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -586,6 +634,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -600,6 +650,8 @@ GPU: ncnn, TensorRT, PPLNN + + @@ -614,12 +666,15 @@ GPU: ncnn, TensorRT, PPLNN + +
MMSegTensorRTTensorRT PPLNN
Dataset Input T4Jetson TX2 T4 model config file
fp32 fp16 int8fp32 fp16
FPS latency (ms) FPSlatency (ms)FPS
FCN41.72 18.13 55.151682.540.59 27.00 37.04 $MMSEG_DIR/configs/fcn/fcn_r50-d8_512x1024_40k_cityscapes.py41.49 16.33 61.231586.190.63 27.26 36.69 $MMSEG_DIR/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py31.45 19.85 50.38-- 36.01 27.77 $MMSEG_DIR/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py21.26 50.38 26.672534.960.39 34.80 28.74 $MMSEG_DIR/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py
+
From a42c44008a4760e4e6a88a6e4d174004082dbbcb Mon Sep 17 00:00:00 2001 From: uniyushu Date: Tue, 15 Feb 2022 14:30:11 +0800 Subject: [PATCH 03/17] [Docs] Update Chinese how_to_write_config.md (#137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add zh-cn documentations * Update how_to_write_config.md * Update how_to_write_config.md * Update how_to_write_config.md * add zh-cn documentation * refactor doc * refactor doc * [DOC] add zh-cn tutorials * [Feature] Add Chinese how to write config doc * Update how_to_write_config.md * Update how_to_write_config.md * Update how_to_write_config.md * Update how_to_write_config.md * Update how_to_write_config.md * Update how_to_write_config.md * Update how_to_write_config.md * [Docs] Update Chinese how_to_write_config.md * [Docs] Update Chinese how_to_write_config.md * [Docs] Update Chinese how_to_write_config.md * [Docs] Update Chinese how_to_write_config.md * [Docs] Update Chinese how_to_write_config.md Co-authored-by: yushu Co-authored-by: uniyushu --- docs/zh_cn/tutorials/how_to_write_config.md | 193 ++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 docs/zh_cn/tutorials/how_to_write_config.md diff --git a/docs/zh_cn/tutorials/how_to_write_config.md b/docs/zh_cn/tutorials/how_to_write_config.md new file mode 100644 index 0000000000..ff8717e39c --- /dev/null +++ b/docs/zh_cn/tutorials/how_to_write_config.md @@ -0,0 +1,193 @@ +# 如何编写配置文件 + + +这篇教程介绍了如何编写模型转换和部署的配置文件。部署配置文件由`ONNX配置信息`, `代码库配置信息`, `推理框架配置信息`组成。 + + + +- [如何编写配置文件](#如何编写配置文件) + - [1. 如何编写ONNX配置信息](#1-如何编写onnx配置信息) + - [ONNX配置信息参数说明](#onnx配置信息参数说明) + - [示例](#示例) + - [动态尺寸输入和输出配置](#动态尺寸输入和输出配置) + - [示例](#示例-1) + - [2. 如何编写代码库配置信息](#2-如何编写代码库配置信息) + - [代码库配置信息参数说明](#代码库配置信息参数说明) + - [示例](#示例-2) + - [3. 如何编写推理框架配置信息](#3-如何编写推理框架配置信息) + - [示例](#示例-3) + - [4. 部署配置信息完整示例](#4-部署配置信息完整示例) + - [5. 部署配置文件命名规则](#5-部署配置文件命名规则) + - [示例](#示例-4) + - [6. 如何编写模型配置文件](#6-如何编写模型配置文件) + - [7. 注意事项](#7-注意事项) + - [8. 常见问题](#8-常见问题) + + + +## 1. 如何编写ONNX配置信息 + +ONNX 配置信息描述了如何将PyTorch模型转换为ONNX模型。 + +### ONNX配置信息参数说明 + +- `type`: 配置信息类型。 默认为 `onnx`。 +- `export_params`: 如果指定,将导出模型所有参数。如果你只想导出未训练模型将此项设置为 False。 +- `keep_initializers_as_inputs`: +如果为 True,则所有初始化器(通常对应为参数)也将作为输入导出,添加到计算图中。 如果为 False,则初始化器不会作为输入导出,不添加到计算图中,仅将非参数输入添加到计算图中。 + +- `opset_version`: ONNX的算子集版本,默认为11。 +- `save_file`: 输出ONNX模型文件。 +- `input_names`: 模型计算图中输入节点的名称。 +- `output_names`: 模型计算图中输出节点的名称。 +- `input_shape`: 模型输入张量的高度和宽度。 + +#### 示例 + +```python +onnx_config = dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['output'], + input_shape=None) +``` + +### 动态尺寸输入和输出配置 + +如果模型要求动态尺寸的输入和输出,你需要在ONNX配置信息中加入dynamic_axes配置。 + +- `dynamic_axes`: 描述输入和输出的维度信息。 + +#### 示例 + +```python + dynamic_axes={ + 'input': { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'dets': { + 0: 'batch', + 1: 'num_dets', + }, + 'labels': { + 0: 'batch', + 1: 'num_dets', + }, + } +``` + +## 2. 如何编写代码库配置信息 + +代码库主要指OpenMMLab 系列模型代码库,代码库配置信息由OpenMMLab 系列模型代码库的简称和OpenMMLab 系列模型任务类型组成。 + +### 代码库配置信息参数说明 + +- `type`: OpenMMLab 系列模型代码库的简称, 包括 `mmcls`, `mmdet`, `mmseg`, `mmocr`, `mmedit`。 +- `task`: OpenMMLab 系列模型任务类型, 具体请参考 [OpenMMLab 系列模型任务列表](#list-of-tasks-in-all-codebases)。 + +#### 示例 + +```python +codebase_config = dict(type='mmcls', task='Classification') +``` + +## 3. 如何编写推理框架配置信息 + +推理框架配置信息主要用于指定模型运行在哪个推理框架,并提供模型在推理框架运行时所需的信息,具体参考 [ONNX Runtime](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/backends/onnxruntime.md), [TensorRT](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/backends/tensorrt.md), [NCNN](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/backends/ncnn.md), [PPLNN](https://github.com/open-mmlab/mmdeploy/blob/master/docs/en/backends/pplnn.md)。 + +- `type`: 模型推理框架, 包括 `onnxruntime`, `ncnn`, `pplnn`, `tensorrt`, `openvino`。 + +#### 示例 + +```python +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=False, max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 512, 1024], + opt_shape=[1, 3, 1024, 2048], + max_shape=[1, 3, 2048, 2048]))) + ]) +``` + +## 4. 部署配置信息完整示例 + +这里我们提供了一个以TensorRT为推理框架的基于mmcls图像分类任务的完整部署配置信息示例。 + +```python + +codebase_config = dict(type='mmcls', task='Classification') + +backend_config = dict( + type='tensorrt', + common_config=dict( + fp16_mode=False, + max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 224, 224], + opt_shape=[4, 3, 224, 224], + max_shape=[64, 3, 224, 224])))]) + +onnx_config = dict( + type='onnx', + dynamic_axes={ + 'input': { + 0: 'batch', + 2: 'height', + 3: 'width' + }, + 'output': { + 0: 'batch' + } + }, + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['output'], + input_shape=[224, 224]) +``` + +## 5. 部署配置文件命名规则 + +我们遵循以下样式来命名配置文件。建议贡献者遵循相同的风格。 + +```bash +(task name)_(backend name)_(dynamic or static).py +``` + +- `task name`: 模型任务类型。 +- `backend name`: 推理框架名称。注意:如果你使用了量化,你需要指出量化类型。例如 `tensorrt-int8`。 +- `dynamic or static`: 动态或者静态尺寸导出。 注意:如果推理框架需要明确的形状信息,您需要添加输入大小的描述,格式为`高度 x 宽度`。 例如 `dynamic-512x1024-2048x2048`, 这意味着最小输入形状是`512x1024`,最大输入形状是`2048x2048`。 + +#### 示例 + +```bash +detection_tensorrt-int8_dynamic-320x320-1344x1344.py +``` + +## 6. 如何编写模型配置文件 + +请根据模型具体任务的代码库,编写模型配置文件。 模型配置文件用于初始化模型,详情请参考[MMClassification](https://github.com/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/config.md),[MMDetection](https://github.com/open-mmlab/mmdetection/blob/master/docs/zh_cn/tutorials/config.md), [MMSegmentation](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/tutorials/config.md), [MMOCR](https://github.com/open-mmlab/mmocr/blob/main/docs/en/tutorials/config.md),[MMEditing](https://github.com/open-mmlab/mmediting/blob/master/docs/zh_cn/config.md)。 + +## 7. 注意事项 + +None + +## 8. 常见问题 + +None From 787ebc23921971918a21fe66d67e203f5a7b974e Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Wed, 16 Feb 2022 11:03:12 +0800 Subject: [PATCH 04/17] [Feature]: Support mmpose (#94) * add mmpose code * update * update * add rewrites * test trt * test litehrnet with trt * revert unused change * add docs about mmpose * add docstring and staticmethod * update * update * update docs * fix config name and docs * add pose_detection ut * add pose data * fix lint of model.py * add pose_detection_model ut * fix docs and docstrinf * add test_mmpose_models.py * fix yapf * fix lint * fix create input * support ort ut * fix yapf * fix docs * fix createinput * test ci bug * rm test1.py * fix yapf * fix flake8 * fix yapf * add config and update benchmark * fix table format * update mmpose benchmark * update benchmark for mmpose * run mmpose tests seperately in ci * fix lint * resolve comments * add trt ut config * fix test * fix tests * resolve comments * resolve comments * update tests Co-authored-by: VVsssssk Co-authored-by: hanrui1sensetime --- .github/workflows/build.yml | 9 +- .../pose-detection_ncnn_static-256x192.py | 3 + .../pose-detection_onnxruntime_static.py | 3 + .../pose-detection_openvino_static-256x192.py | 5 + .../pose-detection_pplnn_static-256x192.py | 5 + configs/mmpose/pose-detection_static.py | 3 + ...-detection_tensorrt-fp16_static-256x192.py | 13 + ...-detection_tensorrt-int8_static-256x192.py | 13 + .../pose-detection_tensorrt_static-256x192.py | 13 + demo/resources/human-pose.jpg | Bin 0 -> 39020 bytes docs/en/benchmark.md | 102 ++++++ docs/en/codebases/mmpose.md | 39 +++ docs/zh_cn/benchmark.md | 101 ++++++ mmdeploy/codebase/mmedit/deploy/mmediting.py | 3 +- mmdeploy/codebase/mmpose/__init__.py | 5 + mmdeploy/codebase/mmpose/deploy/__init__.py | 5 + mmdeploy/codebase/mmpose/deploy/mmpose.py | 132 ++++++++ .../codebase/mmpose/deploy/pose_detection.py | 297 ++++++++++++++++++ .../mmpose/deploy/pose_detection_model.py | 168 ++++++++++ mmdeploy/codebase/mmpose/models/__init__.py | 5 + .../mmpose/models/backbones/__init__.py | 5 + .../mmpose/models/backbones/litehrnet.py | 29 ++ .../mmpose/models/detectors/__init__.py | 5 + .../mmpose/models/detectors/top_down.py | 25 ++ .../codebase/mmpose/models/heads/__init__.py | 10 + .../heads/topdown_heatmap_multi_stage_head.py | 26 ++ .../heads/topdown_heatmap_simple_head.py | 27 ++ mmdeploy/utils/constants.py | 2 + requirements/optional.txt | 1 + .../test_mmcls/test_classification_model.py | 2 +- .../test_mmcls/test_mmcls_models.py | 2 +- .../annotations/person_keypoints_val2017.json | 1 + tests/test_codebase/test_mmpose/data/model.py | 250 +++++++++++++++ .../test_mmpose/test_mmpose_models.py | 285 +++++++++++++++++ .../test_mmpose/test_pose_detection.py | 148 +++++++++ .../test_mmpose/test_pose_detection_model.py | 112 +++++++ 36 files changed, 1847 insertions(+), 7 deletions(-) create mode 100644 configs/mmpose/pose-detection_ncnn_static-256x192.py create mode 100644 configs/mmpose/pose-detection_onnxruntime_static.py create mode 100644 configs/mmpose/pose-detection_openvino_static-256x192.py create mode 100644 configs/mmpose/pose-detection_pplnn_static-256x192.py create mode 100644 configs/mmpose/pose-detection_static.py create mode 100644 configs/mmpose/pose-detection_tensorrt-fp16_static-256x192.py create mode 100644 configs/mmpose/pose-detection_tensorrt-int8_static-256x192.py create mode 100644 configs/mmpose/pose-detection_tensorrt_static-256x192.py create mode 100644 demo/resources/human-pose.jpg create mode 100644 docs/en/codebases/mmpose.md create mode 100644 mmdeploy/codebase/mmpose/__init__.py create mode 100644 mmdeploy/codebase/mmpose/deploy/__init__.py create mode 100644 mmdeploy/codebase/mmpose/deploy/mmpose.py create mode 100644 mmdeploy/codebase/mmpose/deploy/pose_detection.py create mode 100644 mmdeploy/codebase/mmpose/deploy/pose_detection_model.py create mode 100644 mmdeploy/codebase/mmpose/models/__init__.py create mode 100644 mmdeploy/codebase/mmpose/models/backbones/__init__.py create mode 100644 mmdeploy/codebase/mmpose/models/backbones/litehrnet.py create mode 100644 mmdeploy/codebase/mmpose/models/detectors/__init__.py create mode 100644 mmdeploy/codebase/mmpose/models/detectors/top_down.py create mode 100644 mmdeploy/codebase/mmpose/models/heads/__init__.py create mode 100644 mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_multi_stage_head.py create mode 100644 mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_simple_head.py create mode 100644 tests/test_codebase/test_mmpose/data/annotations/person_keypoints_val2017.json create mode 100644 tests/test_codebase/test_mmpose/data/model.py create mode 100644 tests/test_codebase/test_mmpose/test_mmpose_models.py create mode 100644 tests/test_codebase/test_mmpose/test_pose_detection.py create mode 100644 tests/test_codebase/test_mmpose/test_pose_detection_model.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e3b6f45f90..d6e8263b69 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -51,7 +51,8 @@ jobs: run: rm -rf .eggs && pip install -e . - name: Run unittests and generate coverage report run: | - coverage run --branch --source mmdeploy -m pytest -rsE tests/ + coverage run --branch --source mmdeploy -m pytest -rsE tests --ignore=tests/test_codebase/test_mmpose + coverage run --branch --source mmdeploy --append -m pytest -rsE tests/test_codebase/test_mmpose coverage xml coverage report -m @@ -95,7 +96,8 @@ jobs: python tools/check_env.py - name: Run unittests and generate coverage report run: | - coverage run --branch --source mmdeploy -m pytest -rsE tests/ + coverage run --branch --source mmdeploy -m pytest -rsE tests --ignore=tests/test_codebase/test_mmpose + coverage run --branch --source mmdeploy --append -m pytest -rsE tests/test_codebase/test_mmpose coverage xml coverage report -m @@ -139,7 +141,8 @@ jobs: python tools/check_env.py - name: Run unittests and generate coverage report run: | - coverage run --branch --source mmdeploy -m pytest -rsE tests/ + coverage run --branch --source mmdeploy -m pytest -rsE tests --ignore=tests/test_codebase/test_mmpose + coverage run --branch --source mmdeploy --append -m pytest -rsE tests/test_codebase/test_mmpose coverage xml coverage report -m - name: Upload coverage to Codecov diff --git a/configs/mmpose/pose-detection_ncnn_static-256x192.py b/configs/mmpose/pose-detection_ncnn_static-256x192.py new file mode 100644 index 0000000000..573cbe8944 --- /dev/null +++ b/configs/mmpose/pose-detection_ncnn_static-256x192.py @@ -0,0 +1,3 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/ncnn.py'] + +onnx_config = dict(input_shape=[192, 256]) diff --git a/configs/mmpose/pose-detection_onnxruntime_static.py b/configs/mmpose/pose-detection_onnxruntime_static.py new file mode 100644 index 0000000000..545b4afbca --- /dev/null +++ b/configs/mmpose/pose-detection_onnxruntime_static.py @@ -0,0 +1,3 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/onnxruntime.py'] + +onnx_config = dict(input_shape=None) diff --git a/configs/mmpose/pose-detection_openvino_static-256x192.py b/configs/mmpose/pose-detection_openvino_static-256x192.py new file mode 100644 index 0000000000..a8ec113359 --- /dev/null +++ b/configs/mmpose/pose-detection_openvino_static-256x192.py @@ -0,0 +1,5 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/openvino.py'] + +onnx_config = dict(input_shape=[192, 256]) +backend_config = dict( + model_inputs=[dict(opt_shapes=dict(input=[1, 3, 256, 192]))]) diff --git a/configs/mmpose/pose-detection_pplnn_static-256x192.py b/configs/mmpose/pose-detection_pplnn_static-256x192.py new file mode 100644 index 0000000000..ad48665797 --- /dev/null +++ b/configs/mmpose/pose-detection_pplnn_static-256x192.py @@ -0,0 +1,5 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/pplnn.py'] + +onnx_config = dict(input_shape=[192, 256]) + +backend_config = dict(model_inputs=dict(opt_shape=[1, 3, 256, 192])) diff --git a/configs/mmpose/pose-detection_static.py b/configs/mmpose/pose-detection_static.py new file mode 100644 index 0000000000..c3e9dbcaa6 --- /dev/null +++ b/configs/mmpose/pose-detection_static.py @@ -0,0 +1,3 @@ +_base_ = ['../_base_/onnx_config.py'] + +codebase_config = dict(type='mmpose', task='PoseDetection') diff --git a/configs/mmpose/pose-detection_tensorrt-fp16_static-256x192.py b/configs/mmpose/pose-detection_tensorrt-fp16_static-256x192.py new file mode 100644 index 0000000000..f551d3929b --- /dev/null +++ b/configs/mmpose/pose-detection_tensorrt-fp16_static-256x192.py @@ -0,0 +1,13 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/tensorrt-fp16.py'] + +onnx_config = dict(input_shape=[192, 256]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 256, 192], + opt_shape=[1, 3, 256, 192], + max_shape=[1, 3, 256, 192]))) + ]) diff --git a/configs/mmpose/pose-detection_tensorrt-int8_static-256x192.py b/configs/mmpose/pose-detection_tensorrt-int8_static-256x192.py new file mode 100644 index 0000000000..0a0aac31de --- /dev/null +++ b/configs/mmpose/pose-detection_tensorrt-int8_static-256x192.py @@ -0,0 +1,13 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/tensorrt-int8.py'] + +onnx_config = dict(input_shape=[192, 256]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 256, 192], + opt_shape=[1, 3, 256, 192], + max_shape=[1, 3, 256, 192]))) + ]) diff --git a/configs/mmpose/pose-detection_tensorrt_static-256x192.py b/configs/mmpose/pose-detection_tensorrt_static-256x192.py new file mode 100644 index 0000000000..75a03ed301 --- /dev/null +++ b/configs/mmpose/pose-detection_tensorrt_static-256x192.py @@ -0,0 +1,13 @@ +_base_ = ['./pose-detection_static.py', '../_base_/backends/tensorrt.py'] + +onnx_config = dict(input_shape=[192, 256]) +backend_config = dict( + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 256, 192], + opt_shape=[1, 3, 256, 192], + max_shape=[1, 3, 256, 192]))) + ]) diff --git a/demo/resources/human-pose.jpg b/demo/resources/human-pose.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8de401563e83541f016fa475bfcbf44fda25a38c GIT binary patch literal 39020 zcmbTdWmKC_)IA!CyA*c{h2rjkl(u+*;x5JA-GUS?PJu#^QfRRP#ogWAHMm3203rPI zd*8M0{c=CtdnfCem8{H@oW15bbI$Cu=XvpY6F{USrzi(NK>+|zkT1aV3gA891E5VdjSkcJJJ8gJfyMz+fZJhqM>78 zBF(@>4yYqSnvaT#G#VXgF>-Ve@_PUpF**qozYNAp_0O2E+(-pNlk>5d-&J*yX-r+P z2%5WxVdIcfP*PF9W@UTB&LJc$A}S^>@&1FXoVw}+TPjS+dnuwf?i%--`w88?jQccg#tkRUs%Zg{}C5464#6W<^c0QTqrMm zkQXX38afj{28oP1=4ZE;uLMG|NZ%#rS9M`C3u;`DnY&Nnkh2IuUPJ$b_CLt}-vJBz z|Ap-T0``C7S_a^uq97*^l^6g5Jjs%9{KLgNTwwEI7|*tG4yCv*69a?o1CD`eV>R@?1@yu z^EM25z;8JLt+9e!#$u?IYyMZVAw{^25iddD82 zxBukH)lrNVT<1?nbfB9+zF|;nu_QYj27cjTqS~!{pCg#uh5HOxAjP%IR3##kq&6Ai zDbTQ~vhlxrgm5?@xGle=e%0TA%&5Med zQiWE^UklF6q7+1BUJfYK{ItW{v9J!)8L-QmrLySPXMma7^20DlaI|&mns7zpy%jTF z2}*^A;jan+7jPZhDIcZJW225C_8DM4L3uRv2eH6#VGN>&-9C(=5&61oUgHDTt|H+FWuBnw1!@SIPY6ecuOw zx!F2y@#37r6A#Y-!uy9*?E#nLOPg5+0BG}9uq5&(7vJ%TUIyw8^TcHvNJHpehc*C4$Fw}%Y{K(s7~(p@MS-j=LDv%~N&9($Vid!7ut z+SzjNjA61{KQO|sy2gsdvy^?cbx9Vb26Oc+-wgsU!_5}MR$rBmG^Az0NgVd-UUAE= zIbAwj)U`X9{17~SIXL7paX;;jX_BV2(%VkkVwJFBROju|9Qg_Rw#U2@ps-pHzKS>9 zEZ6W+6;p1Yj){eRz`1qOE&FAjKL&rIOqj>|_g=J}3wNnQ8tH*t+a36N0cI99?qm?V z7fKV$zO$R$dX#wA&8e!nya@=$c4bOlX8Ak5bT*-Z+&&upf8lxIhDe$1@;7YX{TZ^+>H``(6Xc&yb~f&(?fhe)5CgW`}{Y^OPTjm??_(7BNpEN5!p5P{B#%B!FL|UCFj~8I={S9cQ>H z@)YFDe}3E}D%gmz^u)wDtrN+Y^0taTli4cbUjrChM4{#jCd1PUcMb6=W+NGAlZ!bc z{B#p0lHtWQ_1mc`>l+K}8dkG1%N^DXqH>L31miT8u&Cp&x1i&1O$d9Am40mxW{}Bm zd*B+@j0rYme{;Jx)V*ZXaqBEF_Bo*BHx`hkJbTPoB{@fmQuu=xyt zlRMmoLJ3b@9xY*&J+~&@iK-Vi-73~D*axnICh}QZBD|&R2Hm9n#%C**DGF1&=*@|O z4(PE--wFzZBGQ&j`%LRH;6&jN?wu^r9?;^;UWC*o_EL#&0pmxvL$Dt+%EtGMZ0O== z`DXz7OQc82AM-w?Zgs#|tn)rwFX6lWuFqr{R5NnC)O!l->I+_O>RiglY3FjhR8Ot5*cSOOAirbtUh7w3<^TO}MGImxqsNuxzP)yys4I z!)XUGF&4H(_XTlrX;qI8>oEZ!i61==KT?_ala6}da%T&Qmju!H9<89kve+pe zGpK!CX4}CA({-?nEwMuU0Q~G|iF>~9iwMcZ;Iy%rA8Br3YTAt3S4E5wLo^{`2Pjk} zx1Wqb{C~!$8Db=iAO?2Mr%&L?0xyPWM?{+QO7DZp88DB!l;KR-fz;_4kodC#_2j64 zzs2S$o$3jz@DmE^cqe9J1~SlV9CY$b9pTz}@!``q07EWu-lL!e6KDCP3cRScr@)l{ z#s3Vb)=lrde7C3UgQEsz!vsYbhW8?vt5MA7Y?j+l#hGski`%#NMB%0v7M<-9&}TqI z@M~DMy~hy(FL%#{K$*T2`re5Pdhf3A`7MbKdnXUU)C^|U!Geld=bYCI%-^@5B(nbZ zP_|xvhIJtl0LJ@H6wuu`$8JX-`=s@`tlWeqX`kB-0Y0zAe1CPhMxh0j$gFloN{hFN zuG2&Pc+m+CfAhml)iIbCHjVp6rbp<0K(0CI86fi40UZ7Cx{Kbad{&=!_kH5z{&ktF z??4X)^UmD1sa;y?4KqWzT8(l(aDhjqXi0PvhCdP_UG1mqGoYgy4AqvZ z&$sXv)jJ|mo`6?pDuN>IU0y!}n%H*bSprb@{)Q&F69&2z^7Bz$s(K`cs#<=RJ}pf2 zBDIR5*F*EUyfS9BKgBSUNn#C&H@^*z42#A_154|%z!gCR)aij5E8vHrG(%C49S@reEz;n8`m>WhYlix2e zk4@>pUFy$(&0p(ePYJ@jQSG>ypVL=NpibHAz-HSK8bU(s54tFrnw;mnx!wczupzeu zM=Mi{Nax!iDSxnW!YC=7(xKQor|v-m9Q5*v0U`>2d8Gv7hQ0z*!p&g?R^_ZwpWS%E z|JW@HEq2TGTnc@`-o_*6ag~EKE)2^H z^v|~<|G8uEiGz~1LQEcOQdM+s?x0q=tYgm!{sN|cVR7p>MLpB)uXQfiS*2KVw7~7* zCJ^`8K>i>k?%tlv9i@wr`XUisKe#_v?5SLckixfUD`+<%+iliwtuV*3(gHH8wrsr;Zmc>xK zma}AbC3A%NTLv*0xYHlp(_c6ByF+_!tM}n|NM5{@>mNydJ}_7s+s^;5WufvFOJwQG z61BTI!@a8~@Zg>v_P+KyA*ON`x3sf%^<{lV$Mzmrww(&Lf^^ianmGD`b- ze$MkUlBcK2vl`O_BfT|SnB$227nJRDS<(gW?#Quv!|QSFuuM8J>@ZX%vhf~p;9JO` z#f4aw=2HQyJte_EqaJ4wR5;sQv{$-#W=?s_a*rmdw4j%7I7aY~x|88H8J#|f@RSda zE3kpMVtZEd5N$#9rm=d>w0Cp1PihHUv>C)b_l5twoc@&DeO+29m>%=AduUxTmAaGl zLbD&1)+Byytk^FnCl4fEnVtPZ*8i?)|0iHBQ}u)bOVxLc^7cH>-a_J39AM`5?;oP> zz&Mvd11ORDxbR5=I#lT9+FFnRZX38N!e(_uW$HZhE4r^{A9_pp*oe?F(wPmVR_SPS zd`R86BAvI(Km~mo7B6Yt%>dQ|uFqARDq4IzZ-m&2@m_p#hlUXXE8HHYU7U|vZ}GHd ze*r5-FC?zw5L&QXw)dTs7M%8{9b5$+)?z!n3>~O=79r<`nGH*~ef{SgqZj*Xxfm6Bw%7 z6>l*LGWDG7KM}6!xeNG#2B)d0+T9o#KF;;dR9H3IqPNCDE@bGQKm<3O2N|*Nq^hW# zGl3o(3^PLaNi4$vu?bB24MeF~44J<7-9ZosY>JmfvwAkAFb>b-`j1M1$vZ7&Te*{M z+Wmyg!#wAXL{ej%(ZW|6ZdI7{wWYBGAw_xTSq-f5rScIeL!pdj&70tx61n)mXll7g zV7?h$^mdUIXg_z>7-bx`(K`jxe4C+}r<$^p!W>EVIXyXnltiI(Xy+<4HGI0`Wru?r zj_*DKhYXo9$(n{N=Zwe{+GTLNZM}3TYCUx*Tq;%esHZpZ%l7*uPmi(R6z7E7n7F{Z z4BA%v(n`EZr>h@isL-TB!tdu>UiKl>+p3$4%9r8u$_lmYEfHa+VKRQ7rm_!+oWz56tYNF3Mvf#m)jGL~xohPQ^*3NigZ)NCk7?a_$>STu zG(9cso+5I=uxcvnOtSpd)M?hK>^?RW+0;C#$;ZION5tysH;7i}VlsSu8`B#@I7lU@Jx)_kjuGC?!`ulVc$YT*s8wk7ZLgNMAaR`pI=neOuO zTJeKt0ML2iskPzj&GZB7gkI;TU~gvHUAqLW%{QPBVXG`73*xv0xtTt`+K6CFO3cz7 z-JgLn`rY9nt#?})Bi|}VFx)iL@RPLr3D*hmh36CozV`qV(?gg^l)^FAns3x&9X<}6 z^$&ZDAI!$OX-)7BG{?S6ZUXR41hv_PAfySb$`j~mDy`~jr z{L+}l5Q*GYkAbT`O=?T;d>eDZiMn<_jP=#e2@3GGtG4LHiimq* zhuFlh``hzz4{hNI0+?w(HTro}hJI81qf<+vkAX=kb5p%}5lJs3SYGoAV}eqAbJ}jj zJgp*e)CI;P8U|#Bbl`PW9&Atrs(2wCobo7aMF9rJOI9Np@>Zn>pY*y1Y~DLYU4#uSk8Cc7Mi}F>%F}Gp*)5DJtuFR7Fghyo5bLOCNTIXU0Ce@cbr`CuIw2gC~h*qW>-3 z5Kp>ysr_>uC!_+q+a`PgTiHJE|8W?|yIFQ@m`J=KJeQk67Hv0Aj=862hUxegG+f-4 z_K5^NaGh1XOUEOt?vb~}q@iW;F|LTM-;I_jvb#w1SNlkrQ(H=Wdd0v=Wh`j3;2ChC zHU@_L-tVjYVoH83vrVU_wvz9wX3|_Yea_h0^wpYG*Q{TbBrk*UWeP65rGw@f;7CY2 zv}GFK++EQ|pD)$>qo^@VW$Hfmh4f5{fJmvU8i$|o{syt*X>H=mk*DN}59Qt+2NG60 z>Tj}0KgA9whcR<${WEHy+^sPpj9>F|c)DF&B4wvY-#pXpvPfPGN65Op$GM`eiuG18 zxxJn%a#}W&v<(icJ_@EA{v-0Y?YEvVI&MFFdVS_=z`F-AIg`u465-&|0ad%Rz^cps zsD0<1hbMnR`vajLXvuef-@b~E2 zr+dJ4MU4{d^Y)l`-#Jixlg>xIA;LpyKJV8AP(4w{Q{!i_aD;GK_aXUA(+~Rx2LD@T zq4lWA{s{6_CYDuCJN>2GpjZrnMK+-4O4pcYE*h)@O1PO&X9MMhbi{k%V;WDnt_3w9d#O7|y_AE|`Klu@}gAX}!35>R(I!u1h8R}M= z2A&LH;w;)tf78@Z(Jw3VsxfXUxI0H+gOJ$?|HVF>loJv$pS%^stUMFU>}x*H4pjML zXmAq#sotDem{V(LiOQl{FZ2~Vl{Y+vkwNb(4VBdrYYvt<45y_@?1X?2tmcn!2AlIP z%WFNQNlWT-wX2%%$A;L_hzbWvBvL8xCX{Bj8EgI>Si0<1X z^ADOAF28b}W8F`4%BJ3s=H_MxbYRioA&|DTVrvwiU|eX**vipscK30pMAEpZE{zCY zD06rQjy2R%yLetz9dWF)m-wsMVQ9rrfZw5rkL$sbkvPqbQ^fD~uut4Go>lhggh^&x zPhscL3~%RAd;8C!xuyq3rjQ5Gd%B-t(jidwr)aE?)iR=4x@do$Y9HM_mfLtNuF778 z+1_dvefPA+jZ5Jn5ImeOa3cJOpLcR7rz*fe^~9;f4SdC}|IwkvgQ+3uYNlBH8X0(| z@tJ7J5OWzR(hv10m_QOt+J~vJ;=geN7q>|S2Gcuc7wX~A<2amXU;n}WX_v-s3#JSd zdB-qLH&2l)8IpA>y)cTqI1uW@)t^6^Ko+S2CT-@uE7iAW(z$C-C0FRlv7Y!`?HXr0 zHjKgN;_&peKtbChno|MVxcvmM(!4Rz=-++6F=C$;bcs^5`&1l>w3tNTXXebBR00dRh-mTzw<3%1ZF7ctmM77}SMY7ik{W`XB z)3xreYB+XM1SIgT4+02-?RJ)Xx5!!*#^;Cw zDw%k!{CYK|OkN}p`=h57W03SWdvve(oI(Ywxa^Rh*(9!nP?VL0*aQdTY7EcB6p{2` zH<&jqG&Wn=jv3|FyfQ^0(wjEG(z8#|zC?J{AW&h1rTQ;_$#7`nrYryPDS~&M_$mWV zT60@B)nPHQRB>!vb8(aKdrOUcgeMf@B&c1d?^KxfGex;%F9xBXAGu~6=hT}5R8P`g z)ZN@mhH_kWHyaPc1=HH;5RC&x&6NJi>W&9hDfN5??5<`5FpP*WFr<;`bJ(BYH7H^} zkJ`)k8f&*Us;iyHfdSiYhFv}kS2T+)Mb*`0@7>uVmHMla@lJ=`JX8Z;AIN+e_*bAl z6bxrhaxh@Y$R(|5oZ=D2EsIW<5fc2)bYBqV=sd&;Wt~(v4*qa~&#;a0rujv15mFOJPc`RNDxV9#-d4XH|CHNW<-L%ls94vwVt14G0zq%v zkf8!~*pl6O@cD90QSrqyjogm};WlFLmN7Z`^z7X*%BsMl$^ zxSgvt4W*~7y|5&wc4Xz2C$=|O84;>L5zQcqK8;+=8D8oFj8vGszoN}c$Ri78g@r>{ z%RHbKeHmGGrxBO>vUIy6K<^7Rw8dB!BNh&Q2{|&w%c~@GzlL5F>EVFcnjG>iWd%qA z^t(+^_Xku#yi36r)LGHb;T!2aJe^jTeX62%D-*x})*Ie+P zmZ%E9rmQb;@e^fd`7Kp%$9YOt#&`1!SbljM2K!ppS9#n)4fiQt?cteiZ-dtV>VG-w zXd#B>J1{a>X=WOaBwBt$eh5DmI`T3v&@tDW#!!<*{Rr?7=e|4=QF=Qo(*Lt<6+<#P z$=G>CcoZ=PuCa*&+Y`om=Me;QlGfKNQZ>%kMKo*>WLSQ|%!G!f^jXNVxTMG$#ifXL z*mo>xZs~UB*bC3sa=Q=o2-q*w(~~Y#e6)B}62n12(?yu{%Hk3;#9ReyljoHAOJWPS zt_bi3$rzqC$1=mY3u#_c5X!KR^#5a@{|aDd^bcbO4hTcl%ml0 zZRb`!PG=%SnhW-Iv*?Q#A%Sl@B~TFpA{tIHfxYvi!haeCzcQ6^tNnr7n99+BQ`x2v zsg=c><_s(7%yQa~q}j^rj(0Zp=vMJgUl1iR#H#dksMpY$P!CncdMzgTT4Pcbl`=cc zcLvW1`_s>|$-9y^=Z^9YZkPIwKNpqJRb$6osp3&{(Cd}BUkIu?cr!A1xA+8Y?bPpG zAD3`DlPlZ3HN%=f1{ctDhb58#4P$8dlLKXj&<&h~Zau;VVt`Eh8!$a1QFyTJ_~ULU zfnR`K&iKatu87F{C0Gw}%hrp@sm(cwR4c1Tep76?yNm|1NrrCaNfRLL#O()(58Mo1oYAmO7DoEk~{ z=0#>CkL1J3RFqq6bIs1}m;fqv>=Q5FGy3MJiGOkx!1qp41iIZT?7VO#uA1rCP8@E( z6!^0Fn@$rH&7n~uS5PE0n_6S6X;^e1N6{xx%&>e5O_BB?CLeG+%-@pXy#VfW|(SXzkQgXNOvdY0ifkd5UjNwiT!FYtgH=+Ft9TIrB9lTcilC3u9Fu1 z@BVf)u0TDI_!5gdX221?F<&B~WNhKxibtBg3Uqscbp#mNr*pMm{;RG^+Zk@Z|C&!` z!sDawv?wp;(zX?T%nSTZo5Uag_9-#4X5QNG1_ViC^E4)tCafUfeSH`6pidX=+%>eD(J2k*b6joTJtq*9UVL2kRcnB$;@Z;GgGl zz%IMB@9^{EGoXGWN_AF0F2_7Xg!ndl(HDPuijQ0JCF5sLg?&Of{=Q8^4bp^K-a1(NP-JnwCX1T!G}`8 zt|8QVzsj#2o0Sap_4yYLoiUFn%t9yS#N+hsMx7q}j;mW&Frv zZ*&Syh6YdY0{B92Vrk>n|DrZM4_mk~5WF2Qb)^Cw^5k z+*$$%H;vt>^EnmXR(bh;7A{%gyJ!z0ZB^r*fO8?Kq)#tZ3O*b#H&wsPXEZVX2eMo2 zUFrO_!ZD*7v%7UP*Q@gR-?=VSF(?h;Vi}y)ohgUQ($mf~ez=|u3Rlv3diro$>VK)} z{jH*I%Dy(xG#bGiCKRv);FvrrK{639F&nVjP3(x zxZs7M9LzDF+z+Vs(mwVt-@!R&t#c3KXR?OdPUiXIK!+(uwx@F86+?F7;3Y^IO%8~F z3^$PlJiwhGV@T@?x>`?m1BH^(r+f*%)D>BE^>2CFh4-iMKX?6l&j1KFOtQq@jm7(p z(9TBcLHV+kVm_e?zo*zZ)6s`(X`BzZtv9QwKDR%+z_MY`BekpmkhWkau_OSj7Ew|% zmeA5JI8?&2f_!>(ToBMGU@)KSCK-;1M%LxQ>R4GF_yB_yM4kb0}sng0`(*|Av90uV{5qrCy{lK@!~yR|H%0M`U-y zNRt1{tn&_ghDdrG*zrZ8-UViF&^r~X>&}O4|JvlRTQwJ&^hVnrz|Mz?1OZVV0l7Km zLGWf|2N_&k!WS7v*rJ`$TNmC<^!fwy|3UKc9b7T#(ha?to69`=9hN(scq2hCfmRna z{U8FC^kvO01_D!k)&l##rTw;<#4$qyquL76n<<>9F!17*yOQTl#orkE?WvFa>$Dln|Wip0Lp2*7PM@q=9_O=&QS^em+%zT@Om<4qk%a=7pM4 zem-nsi}z%0J9LW|cSDYcrQX5ern&x6CAY*HCZKQpcEItX3Dr|=Dy*5zOWsuV>5DY| z;l@h@Df~T*v-kjN02_Eei!8idPjLm<=*KCe)Q#|A{n5*$6>;rTQ=dTDlH%2y4Rk9u zu+Ivt!WB=#WhW}prP5%K>fvR)?h8ECH;4JxD6i*pE=eam@=RPAoCV!bQ?Y&ZLkDVW z9qxVGrBrfL*7}wwXka2?JNKK4NAIy&6gFU54IM4ehJhi5c{oocxQa!lYftr(c8a@Z zNukWLPa9%Xq^@J^G)oUQVK<35Fb+ttkefY>@eS&pWam7FzHmaLW%DumVMw399;_{6UPeXC% z{e~E4ph|^N0aosjdPCIbD@J#1{*+yJu@|jztkU%JYD==+YT6^!mqC&U9gxZxjHk?cS!)f@{jx}VukaF1DRmS zE@Wp#f9!*GbL@4zYjah?-QR<4xHJk#U-Nv%&UT3#cr1YM(!fjyf+?$@3n6gj-wqG- zWmKuk^PUdH`?alKES+ONyjE-8cRlD*U(VKb`LLF$8L$JzE9Y{6-OEuikJph`9%L&0NEtctl2#{^>!P! zp{jpdKWr{zzc&cb$LLAo-;>`R21UwN`fWqfw`jF<*!9btD=TtaR`yrnq7+?X<02#J zoJ#ufY#iDJEL){YKN^ImcQxu#Q|^pvrx!qfsMx;qFwGPM_X0U5T)yAJ1yxcY3+y~U zs<3y>eajK=q^az zF?9F#DTP5jc?>YjZBXcGFk2uw)H`!4k6`^wY^|3^-rfdI{gpu;=;h@(Wk$wE#vflPm}{?^$On~ChoR7Nv#oFe};#VTMtBU$hf@7#}A09G{v-U zSe|(XI2||Bx4AMlRr6JUU{(xQbmCBE`))ew`q5U4nJ{B(BKjFX5!Gw> z;z2T$C$O8JGSLF*FrGwiLl)Ve9loS{w?--llBQBI-bzJRruFSF#*A_3>`o)V7oeAS zqv73gQM^CPENF}LuM9-16MNIE{DBn@f0p=eecTW=%cNqm5ZVir^MU-cjzL|*+emTo4%?^G8#sjv zr{2SxiwV4j_fILfi^t>zu-crR%I4Hm71mpyT)!JKi}Wr!?a73I{qRrm=39%TxdX0@ zyT8T>opU?V)QT2Y#k6Nuhiv^nJOd=($zOzLpI(oS#1RY6fS23|(m>)1 z?)kb>1+)xS0p_s&4Ifbo&35L7FW{gFw%t#o?{Yajf9u#2#V=jol&U6B>t)piWxuM} zWo^80TMO%`{vWGNiF3xwsR}7_fqE zBAlgO&o^Bb`I&N%%JIJ8l&VmEJ8`M9Czx%fF1c9wx9`tY6H_v#ke2lm!CVmXUOd?fuDqc zfDlg6bIT3`Z-JL61FbGFqlBIaz!--~wl#;#DskQy%0-!gYHfv1?MGnI$R`Y7OC;*q zo66!|GY{;kgjZ!o*Yi2c$3F8TP7ik=Hb^@kL^so1%mb53g0g=ruLoUcefx{?&bOTc zIzOkgC6>eiQzFCmnxaMD@zpaY)ftL{KhDgrO@YqZ@a!k zMvHm{40y|UM+p)&*=yWQM4N{%AdR~BMl>D{XR!%&g4-=Tb_wsk#c z%wP-AM5Jc;weSNwQlRMd)bs;#7i;m94R@(P=7&I2cZQzS)sag(gO?+}#_xKX$QuhiTI!(tvO;U1G6&T6Q(nu?#xH-3Q0?EZQ?w|Jo}-gJ+@b@`58 zQ_Ny6lc+i^O%VuqDv>7bsSK2!DKDCbTS%H#MyCLrh5iuqD_CN=>yAlMi;SSZZ%D#^ zLV-PA@OD1~sDo+qUckb-hxICWfFoa3-ffIFq=+yMkw$m!Dp2-Igw9aW7Da%q&rt#; zXSR{zfu9BjT}0jQt*mH?qv$AE>`6WQUDfE05BW*ib-rrqeq93{SKT8FGtH| z1qR7v#p@N)=nsyNsMpYje(9I6Z7VQR00`M1m>@Y~AS#$=(1b-5`993AuRO}lzWQGI zm)premT$S5<2$+H>sVH_idhTO5(bKAfc1H#DGgANq1%)a_Sre`@mq6{yJo#HhvkrN z=%ARHG^~HXG&z)VV8`ZE#p+Qi5ID~YLsBs>gdHSTRBrtjqQMYg#<+?i)pp*=yd1F$ zDJP@ZzjmY2_6lUi(9u>= zYx!tNMpAU^<$PH#1Lg%44y%kBX;ZMpLFxzMzw~*GIRe|qcstfCHuOJ!y9aw=MU@lw6YVuHw z_$_5B&m3L(?B8oGKfc7ni#{oEB^<~RZ1xh#^no>lnV+(}%8XS`f&7ogaQA7(x|Za? zI%wOU!WtB&en_d^-3PbO2u6!IomyLYY!&s$k&YkIf&!yZs&*Sg3X@?k2En@NmXkX8 z#CuNxc2**Hw@B6R2(1YHMRqn@%_oS( zon-Q7qQ*3fILC5D9%;Px9CrIS{+Q%Bf9f`Iq?J~Is+25VA|?)bLW zD|Kwv`{`?;_puf8o%}S|UB|Kirj2So=YybU%)?)>^XYUk?1 zk>?f&PujMP^_^N@023*F$CqcGUOCCWYI~E^`XRnchap|SyDHC**>+&B4_Um+L(;pr zj}G>Ebx|iBP?hVlHMigu)?^RoD?OLog_D#nKg%=5<}{txF2bQhm)XP(_IDc>x{1q3 zK>sDvQLydXa9BX;Vo>bNCkcY+h6m$1IH$#nx^U($FWO{5S(h_b+joX&SE&i6K~8pCtBk%U_?|o4Tebfmu2#GxN79*?A&5sG(92Z`^Ex!591VHIzD@*WxQvW5JWOIcx1eJ_r})F65BhKk&Cc?`3f1vxxT#Zc zcgMv$B(Z*XTQyVBYPB_j3A2ItPX54G?r%mCV@Ij{l!gIFv2iqBdS<=+B)dnF^aKof zJOX0^PJ1ynnw57d2|d?>1_W+cPPZ~ydw#WI3z=3COV_;zhiU`Hed;+OZ!8<(@AuWK zgRbGCaKaQ$EIxnmy$6F1Pv#*cK_PNcVz8^sx4{Ag*4xgP^s@DYu z@Z3twFGWN`Fj-RGcAm#R*dVa`kg=#2R`uu_JndiEtzs~udC?IC6m(~UMU=1FSy@lb zmOKMwiIpgxMC5fo#6TMePDJ`W9`PZjFeaqCzwGG*ledDP;GPcrR$2Uk$jZ|?TzopX z1@IwY&2X@iw0eMp%9J2K)-RL>l`~B1DLCOP*WJ4{a^;fN$0juUN;i?ocFQjSh3C6LsF z`+J`O;bsWFLlCvb#`~w=a1P{IATs0|&<&94WMoUwkAA zT30=0LHDs8@q!IB0(JsH6xk2HENdzf@KcQQ5(qf2VFtIf4$E8Pb zgtJWz7J+PEX;3L|unD@U5&w%eNu*Fo+pFs_UlB7j+v^uLv%bzId$M|ukP7+xP`KE@ zJ28;KQHsXbxO$tf8QTuKjSqUwJTpaa#-*$5m65y)I)o6XG7PS`)qz(=c{+cH_BOtE zd0qDQs~sjSq&|CreTx!rf!M*d_=V@wrNx16sc8Wx&f~zI6yv zb$UTGbwct%|Gd^t6&$Iom`mmyv5mXjZr;+=*V4yKLa+JiYV|^Xfsq>C!e4kGu9<$= z(FRsK2TjvP@ptX%)4o=G%uvbiRI@{_+Nb7G0jF?r?AW-=t;hBo$20MD4V2bHUV=;o0ZIx{Avo1R0s z4<>d0I1tg8l1H)Y)@rC!>u<8!kk&rBebz9o113#=Ohr1!)>AF=q=EzJivvWB+CDVU z3xX`eEu=-#Xw?lW)S;t>jd~Y$60r1Gkdy}zdfgIZH`Hm{FyFZ)H;73;1JWfHkYdG0 zj^Jrzyd#7PD!?K(rSzR{+Q?OI`^^nFSKM1H>oa9&rY6&@jU64eoC4L;#g65|QO2!Q z+v9g(Y!;jiL3vc-4u!)JDe`(}cXsH5*%BwOm0CmpBeBhK*gepWZDCxgVj8S#La*wBNn>;-QR z2&e9x#t^x+362M|MU~&Y;SsJXP@$`PTrPXlsQ244i7VIOpT>LqKHtikKq=%DFFfTU zP=Z&eT{=ydueSDMHemrdg zvaaIH3BNf~!XHIC#^-~xt_{5S`dkw}M~)n9Yo*5P4J%3C3Eny@l}-siwi^y#>Z-V} zt??o^Jz?(=LrK9?$XW_GERb=XE_YkLYA%1iRoM* zEqQfE&9y^GfA(BUMgHt9;c&>(Z`TPZN_Y12X@B5O3#?ci;_`QwK%^Dc`AF;V*El6b zaVhZm#qw#T;}H;IlBTj-ih;TC0GM#^g? z%wktNTNv~GUh-k0TblGKuLACKv7o;IWGIs`lKG>Wp8hy*+1PC8-K4*34Pm4>f%Q?$QF;fbf(bvFw96V04=* z6g_ufg1WD!w05_-lv8F^rmJ~unojA1rQ`Dzm+7Ccf-*g(W9w+`L>lCSHQ%L+3KhI7 zQ&d%wSYPmGJuRIp_d2(!x(|3HM`mp-brUdl-jHBMxb6i-w+&3RurdlRZnr7gmiUR9 zwW0hzna=!7v@w29WP(<3@LzAF*zeXzq1WcZkDSQI^5p4jAW}t#27CG;MpSLMW@aqBS-Dchw1g~u1G(Naz zQq6TpFeTMuB$sLY-QN;MfahREJFX1cRF9)lNq2MY3B~2OVQ8-8gR9-2-=s5>SG%g7 zdk|skT8EmY%H#5phws$!D<*0pw|*G=$wO8_Ka&*E5*8;Y2mDI|FG{QbjZb#8dP_Eq zUoPFdB}qQ){d{!1T~2zG&)iv+t7=RhdLtvrz&X?(n}~0V!eUtr((u0BLhq5>0w#Iza0+bV)~@d9Pb(^<6In`&bcXy}sKYf2 zoHAET@Atzxb@8M8^rY>gXM=_U+bupe07n}K%KTXo(#iV!>Wnhmtf$p#`xy|T*hcu6 z_Vj-MO+m80OZel#o*ekC;B9N+pNP^Kw7(PR_gY4v8+Hs=P_!&gI^aHjpklw4od;O> zh5I!8VewzW4N5y0d_mx^5b8R1H4P@(>_mL#NrEN9I)+&hnOF_VS&ETgDa4p4&I-QD z`q_N1((cy#w?C#l6XNQz!@Ray$tM`y*){J@5pK<;AKz@B#T9K4d&6H0J|XzWMY{19 zhI}1;bEjEHBi?Fj2HRMI0?Z80i1%Sm7k1-}0Be=ibo(u0(oH*5fhU4xDMKbJ%3 zM?+h_E%En^e1CRz4;y%%{{UB)%q|;Ix@khO01~Rl3&0%!=~+su+1#t`iuj6>p$45B z-F1J3lH2cR^fW2raa89FVuOs8xl(OgW}A9S{{Z1jrH?V!{1d8pJ5~FAgpx~USUfja z%fQ*;Umf|tZv8#0=}*`z_RH}f?91^k*T*&Kb+b0Dv`vsM(`WXCWwaDai-^S-)(Ks(KuK zMSVsSA3WBaV&%(-r>rHnHP& z&q(-<`yl*6@T@vyx|XEA9+OkjEUnaUON&NZgpdUoQW7~ZI*`me*TH}ANw3?cKZc$x zviJw%{bE1(Lp)KaZPPEW2|w6cgugFy@eO`8-NY6PBxPzjF@{#)Z{OfZu?0^Q~1~M zL&UxWm&W%I{5#cdZuIM0`-RglQr<}S+a$-xd`Y$WK&O=lk(^-H+@J75FNyv;u>Syp zig+VR_*-Ri?XSgqVd5J};SsEwH8Rh2V+j({y9v2mb&`yh~)vx>T$fX6o4xv`^2UY5`K_0$=XL zo(rYfH<|+sL{i}V{rKd5zSZ%k{1r#`5Ah$xDC|FMpV{~AI&Z|!215keyXjXC6xywY zvELocWdU!tS#1(VKqg2~#8vi2huwc-zuHey{ii+%Sa{>a+G<0tT9k)ap3=<4VW~dQ zS5UGnK5J>lSO8C!;z;=lnfEBFkbIKWEuZ=MuD_+x`0jUyz~Unu#{iUR)cC2d`E^sQ zRsO`jDt(jWZMZgipfLhOX(&}-IU9o$*-m-E_37_PBn-?NC48%$&OqCa-40Lk_o<4a znEd5s!(uQP4y6A8_5T0??M!F1Sry!FUF5Dt;-q#QW8bxO+Q-iTutsntRWd2dkTQOQ zrYbhJwJc1w5Hn}yV<6=E^r^Snvt@?TNhD)5g`s1+?MI*XzxwoDfd%9;HKevydSsEi zM!sXs8!>k1PH};Zoad!Y28nB_cs}dG9wfDj>cZwZ?R6_~p~RNg5g6o*hQPxsIM1mg zBBekUZ!KcB$!Y@698GV zO?_Pj&D3{EGwE#G<2jhltk1^P$!w0J9-_WN_|t0_?79B{1(En`@xNln;g8xQ!BcoM z#0*9wI_96{n>Ujy!@N&uzQ5@1Il-^9ZghKlCZvOG^5KE z%8IL2t|GkZRCkn}+wXmE_uQv-b9F9~wz28XfFbi0S33@Igz;aMqLHFfvBIsMF$`&)pbUeN zoUp<6>zr{{8qi9CU`5`|x=18kpF@B~Kqrvg=cYJ0ZN;>#L4r6#D*^ycgCl|HdUMCq zG?PKpgiUxOGDT_&fC~Ag2_TL@!Sv_2^`)22R}wU_BFD%AAP6J5>?ya&d3iCC=FPZV zGqu{_0&&xfXV7=!-kEc5_mlabWw?!)sWZs2zWiq#l1@3MnApi@OQ_Z{sUz&j0KVQ! z5>5g7RPwBo1)ZKq*9AwHC>S93BRuh(gHgOd2C#zGP1r2A6GyRFfY`}EKRn}+OL8Qc zR>C%CFop91me^RYEC>fVIqk&&gMHknV*6BZMItbS0h}-c<|ij3(;mFn^LzgQ1+(}i zs(#O(@KxU)z2(fZ+4#Qsd{d*{$q`R73(#jOKirhh3H9=iPHXx(HoBaWe`iHy3}}*n zuJghi9=o_F9@sVc1^)mCUU+lCrpw@;i!^;&(D<^{YZlYmMEJOEKXts2NJewziOtMm zjsYydXAO=l!iukilw1C`{ZFspe8#3Puul@k}{`*Cx>vQuZwOd33Y@4_Tidf%Q zyZNEF5Gg+^&C4Idx8qz~{{R&<9|~FGODQj|*dLM*08dTN$o~MoL9SO-_@m>`5i`eh zl4SVA2YKNAJ*(kx+&ho2QLbK9-e02M@3KFqGjA6-V=KbOa;)mqX+LL5 z&fZd;`B!Jk+fSoCV^P&K4K0LPzOp2g9&N)GKlj3O`PVh#j~uQ2gql{Dpd!IC5Yt}e zcpVBN^TFsz9XfjQ-W%|J)!Y&47jNaXfHLI+ZTG?If8NOT2DD?*EUjYmAdW|J$8^%U zcW=c106;70uv{^N!%0r0oaDYr{)_z2#`ufIt|HDT&kuy73KXQ8dUMUEqfS%O&2MNY z=3_%HscT{(OY4Uc5$8l@D;svf0|5S2UM)IlZXWHVL}>{p$QUe7QePQjN&SBs*P7Hw z{bS+}#-gfZa%57GXM(;hOW;pIZ!MU^YZa^|Z z>tCw&w^o-BrNyn_Xk>|Gidb$|SmsAi$msm5t1AXl2@IqWkVaOqZY-5_`Ck73n(hYD z@+g2-w%eQ)RFebPjoJJ&L5ow*{CwE{D)lU`%fxa9aq|x1lP; zG=B6xF08f!DYS@c!S5ku*SLZ2f;AT-X{1> zt62O%@arlUm1NVLo866#*)~$#vBw9=T%IfFZ-RdobU%(i2RtwFv%|AS_FgT}Ej3ue zupm6~hLt*=lKF}CQ^j;TzLnu^e#YZQ@otot_7`^YTk0^fZdvW5LmbF@{EZth>CW$C zUmgDd!5k#jJ{o*9_?Pwf}1BwlD%%`jsn#*Z;GSkAot(;gorf6HJ_DA0*% zB>rEj{eJW8qcR4_k-8|z&jTHR>BqGqUOdl;%z<|DK`MPfAdlA+$G3T{(Jil86+u=z zMtY3no-wp?4n1>@2<_omx7m zP60g!W6*kjlyuZOlbOI)0UcjB`;Et;7&$-ZuUaH%6Kqorxg#OLQNZ>7y(%|LrYf^U zWnJr*5;j#pAm;!U;~)c!^PEyN4Gq)A45gG}Knm>ujsY0SK7i-HS~LbMlf~zl)uDDc zWdV-hea18R(wq{^%7sBY2qY8VzfScVd1fpmlBgR#Xu_)xK*aVr_4KI|5&WxjBQvK8 zu>rS^*dM8*K5K!qQ#b-QC9wOA3sZd0g#{00%61QIp!gmk;H^k56zO)VMbUN-xyj^$I*z>Hb^L0gucJC*x%^(v)6 zKA<1Rr}$N`2H(Mb;#*A@Q?s3y9&4F19>AT>FG>r)yvW z4stmayZb+B!sp{oji+7R;dk6Mq>Py+Ww(&VgP<%=L;dVk%SCgZ`kgiGc9!rpw2!q) zknNr^>5u-sU~0D#-4h;}G-BOXJa^+6Bv)7QBf<-$>k`@}(#rxeD6+ScdjqsF4BZrd zY<&k>C%aSB;3rJ}BQ+QHMYOjLlOcca z4nHraO26gX!n;s8Kkba4&Y(%71(rwJ7@QA7@9R#+X_{JN$7>o{*vGQ~WPk=gz}MRU z0PsU!gc`razxXLf!&|F+2Dr5Fo$cR=^;cFZA%@DjGP)6y6s0c|0MKadAf{RS{`w~m|-=}9a$@)SuYb=o$r$uW;& zLC>e*QtWAK9&h_o{>wiLe{7$DI@j#$@uo=Nu+`&9tZ!|~eX~iwk0#$xw`20G(J>?f zaUwL4HdW1hP5%G{yZxrVEB?se8*cvq;E#W|72&tmJUud}!YzGW#kQwt;pBqG25w#n zquni$Co74pn-3IXzerv%ghKvew#+F5gY@m6W6!9sm4D!=-|$WiC-&_4A#HW=@uAYb zDtIWp*Td~?CHpD4a?2~}b4)(ub4ZCKQuVjFSmhW7Kw4bMF1QW;QIQ%(Id62wwvvob~VTK(mFVB-ydsfZNpabJr*C zoQ|WOl`XSMkcT0RU=RsIxQ=@E&;I~gnLYGLH;`bFom4Yzk1Dx50#5**IH(?kF(icp!{a}QyZ-=YZ}=?dfxaWkh7S$?(Y^$m!TRH~e(C%{rR+Ac zHza0DceGx5`ES<-zSoj$AoC&J$jERGJweYyUnhUTX8sv!-Y)%!H7|^Q1d3zguY+DA z)BYg%c4$!VL16oDQE&kPtmT=`2POsl1ltC$7MA}2uTRAKl4~n#i=~_FqCmI{6Q{}n zekSQZ8$K8Kf8yVTF4{dm#TpH! zt8XMoxWw@Z8GdYl70a%F**!W|^n0!XPi{bwUo9Y%5^z7+7(DVv0CIiltwk23u7WqR zvKty$<3D*)OORQ8hJNThI(Mg=mWpWAuGABjD-bv;Pf~Dp`f^53tw(tUu!|GG%AuEI ziMFWfdkp7*c|V6W71hZiHPfC;lF|@lX~`UP+!S@d>Dc;EwH?g{DiFkN%Z&s4X$tvD%r&t!bD`%-?;`q#w|h+5Z-d_{Vi?!BvO zbKmOMlZI##=2;AmLXVe%c=Q1WY*&P8+P15y-Wjj20b!4k07X&3zypC+HO*Z>bjC_Wb$tWn}Xn+lDz)_`l)m+ zMolZk+Geu^?;F|b5K1Ig!A-IPNbWe#dX3%5+L#{LsJ_K-aHb<0<;DR(Ne7?mMK`ID z^iAi&8~fy+Uh#gs$5Xsw_WFI4m6N+pm>xDraSIXJ*M!TxoseGJX74cWDo#CLA!Wl0r>KKcCX`W*iN zgMD<1{XhN*r=sg~T*i^>-ZSwQr)CU?Da(p+jo(W}(WS3VGG?Lvr zC?{)us!M=DKf}#qJ!X-dtm;L4{{T~T7;W+uelw6bApUv9Luy^z*=hk^R9(l;g+O1fGIBkAsiRPaE5EX>k&Kt(Xya|N z5!^D2j-dYljSA+MB#!Cj`PRx@k_kKaJSe05(;WRvN| z4KK85}0BeJcEwlf{vv6S+q*}r>FF1i`h&!u>$cm21>|Fyt#-;kdu@}m z;h*?8)BXw3sd#Ts)I4FO{8Q33dF*17RMNEX2Ham;*<7*%ED>Czg}6j=%Ce{d!iF3g z`;2ZfEyab!sygJEKE@%@b{0VwbShU$1mjDLxbeNnc9)$?LecoH%>KPva|gE00ByxtFJ#R?`{7827S?` z-&`%Rc_K2hu?p}yWSsZG#&PUARjG9lVo+_a8HmY^V?Qa+uTM_(g151?%12J{w5%2sm4D#)v>#RLA2cY#$4g#|-F_q73e!a7rO2gG`e3kzI1a#DVLEvwK zel`7ve08jD@fW}kh+2<@ThBnvHkM{-tfv_tb<=YLfH3R${eEJ+l0XP4~piK?A{#xrM?~=Cs6XA2;L;oLU~;{GD~gtLDQd~t$l-M ztXtgM`E$FnKH$zGkwYQpA2&U_4rtf0D$#B~*1G=y!1)r&l14n+OH!wHLd4)P>IWzM z{#62LR`9b*lH4maUaA}Bg9Z!&bJF9 ze3D!?)maproxps{$;R9amJeH4~)mK=k$0#6^2=Dd&g+Wo2gH~Sp^ z&D!_In~gT!_fuAfPw?f9#?q4Nvj-3(4oO4`^2!(fS;WJ*H0^Q5E^f=I@;Ce!ll}@H zYw$ba#NV;^!<}B*J$J+wD>scU>|7SJ(_~Q4*%netOK`Y(SF`~aq{I)w?-=-|@5CCM znueosmU$2Gf1D1SS8@ADd|~m&#lMM~-;91C>sNQ0mG+}O)$QJ)C+{xhb}aKT#t!0g z0N@S=0L6Jt<>uk%#!1J1=iaU+y&Hfqq%kdxK<<^touZcI8w_XXoxj21F8%4K{dDRF6 zM8;$hx#Cc+$&>+)GRD^b07$)QS87_b2ANOr?K`8^?D*#R~0Ra9rwV9pl zhwBR34}$d<7sOp%2%%ydpDe%3zli-aU(nb58?=SS&yo<{r_?jI9w<451lan;S}#yD5>8gUREH)3?&2y^v09pp@M*u`$nr`%m#UG5Pg8 z0aW#AU|VrCma@x(zTOWp;{}Kthfv4VoN@@qvgYPB5)_)}Q+t)zBbNZ8e7mqQ!Rj&x z`OPt}#l6OLC59)F{`<;71pfedH%xl;JaJj6VI-5i_4b5`vBP}QGx>OI62-I6K7)>N z%};)i&2H};R09`{+GHy0{rvx%bvq0oSbwYrB)E!UpSG5)vv z;NX#8O86hfI!DC60_fg6@UcJG{w30_^*bPui4)11Au58(a&Q}Od=ZW+O-n}bzl^*+ zsQ3fMI(!moTIH3-nW@?&N&b~*JWmY6hHL{Ij5C~iWLL~z@JGKE>$(r@&8mLNUlFwe z{w4SeA?`lORe2c2&W&en5t$qx}?4{?Avq1;Z_Pv!cr)cU#`JD8X4pKo}#xP6ZT zfLMS4Af7lFzRF~zuBiAMFrI2a1Mi030XBY}@jEAw0a z4p`KDMF+uuhI&q$e$ZK6Yr5pjU~&)IqqVZ~{{VzzY*J5vGxwUmOrCk7eX9W7vw$Y~ zh7SY)27bMI*Ti4&U$2EN@elR|{{V#ZP18h@{5$c^ovmMae?OODa*}Dcl5#Q?e$@lF zAM(#Vs-oS^DOprsKgj-n)5R9L?4o{)#^V>-g$a19!$PL_d_8!%Ne;vNJZnpZ1A2kLd zpZKspg#8Bqo^ip?7_^eumwS*jnp~iWAh!xZ-Q|D*_TsZOIc%W0TX(V%M*jdTa}^s4 zXO%vOp&6!XdL_-SUNUijRH5Xbd{s5nuB{pEZ*9%Y55$O9pKiTG0Kg4wSNW_dW z=g167b}Xj6WBv%4{{RG+)c*kBq+bwb@xHs`tqL!Oz6jT>*T;HT)MQ<5{@v5gj@B5F z0FF8Qw}jlZi?eLIL2xVjNAUHRhx`$s*!WvRu+sFcCr{HOw9#~JEfPChD|pZ(Fvf|H zvBsb%PM;{wNzFu?k(zwZ`umU15BNEIN3nl`Uk2}ChS~vX;{96A-qr{e65iP9QAnr) z@T#H&3YY)@003ZD;lGVFPwYY9|3E)Gh@m>Y@eNn^BnZ71Hpcfa7`{{RUc zU-l^QcZpz06ZS+kSF@~)e3adMG{xETKcR^(@C z^dJ$D>sb>|aKUZuU3pMR131P%&-uk#yStTTd)wI=rICz|?tW$`zxnkwRy(;BF%TRM zNa#52`Bs}4Xvs8P6VB9<=To@z896)8%AWQ6wf_JH%X~(U#Xqt)kG>w;TFDytYvOO8 zZG5ANMfRTrfqCFEHKYC#DK+>snz5EcV`Cv{Tzu|98&~oDf2Dr;f5EUn4|Sgee!$)- z@!HwTEBMFaz0g4#v37#O#@5v18~|l`hJ6MrDx0tCqcW0Q(e?cq`a(5H+so81Lzw56 zaS4UoM@;lyx%AFPYQ@af{v*_6xzvlqT|{gmY^WoF^KsN*V<#V6Rz|rdq;(frt_`?J zaEeh|Y-f^lynMZ|ym8c1CYtUkqk=n01SQxcvjt7Wj#wyd*!uI@mo4=+xnlCh;@;LV zr0KFpDsD@MDV029Dp+HlyyWr)RkqTmiAr7SXs)^Q1d6g1JRPKfF~=wCn$S1*Qd%vw z%q-j0w~3Z_l+t_KA3fTZ=~9joMI(+BM9{{RJ<5Jb`)6ZVeydb2{4GR7OlIv&_xJ3(F2 zS_^j1Qe-vtR*~WxJA1XZ)%7@xYWOj<$K@T4-K)k|&MM7pi2 z7?nU-mg$(12`8ukjB}BXLF#3B0Y->kIwJh2(u63=c^K?{Gg(?Mjx@g<==Po>(v{y$ z)%5u8^t+{u;h>r&kz5=VQMG{Y&N|i4?ARoEbxYksOMA$VmRjVNY=y?rz#n%V1_vAi zM$1vBeNgE%xia^Xm0-XhmNWD88Daq^oM3a;*0KI1{3Ya{5$h`=yvuzweiEi#C9$5KYg$lXpxp9p1)B(*6r|Hmc zk}Wye<1E39Y{5Hj8Q`9S2i}rwYU$kmS^ofP?}NTC_!se0U-(<`8qwEV@XoJsqiatX z7@F$cwh5O#W0^wCq;gVKHR8ICnu{l!0o;cmI6uz6Xusg&&)E+}d%q6;*gh7tKk+M|VR58gNo97Rsrlu=Tm%mg$(c znS+ImKm+-GYRq)8_5T3;5QFyC@aODH`+R6x$HWg9+W3FK9yhg{!@fPe)FqHv!2_#H zX?Gw%!91J9hxdeSM3zn1QpA5uO{mNA-0729Mwd`Wr zNFA>9v>s)~l1p-0+vb`T?*VlG0G0`y+O)2Sx{LKb-2VWAv3xLJi+}J=pNl>gg7?jz zM)1%4BsR%D)njp{Oqa~cI2kcTI`qyP2RX0ihvIGJmA<<>>Qj01?aVMncBBxKnAgQ;;sZ`mh}qM1T%`Hz<)fHBT{*Yaum zQv4h7KkUiz{{Y3mv#-P(D|xkF0qgfV1oGY6+(OQ$YQZsjF&-+DuB)bB%Y%ujPahNjzx$*R9(-Wrp;sQuMVv(4gTr_ec5b?;2 zj0W21H0?&hHQR4=vC88COEJ!SXK}37*ECX0mPXi`cZM%t^KYX1$$SyaXBur&AS8+Pf#!hKpD?o4K>y|9LTWQsAG__ zOm`1sjx$fVGg=j#=@1tvKxQg7j=Qo+2OgaB$Gu4n^gra z?`BTrKeAn0%^m&BV&e#Wxg>$wI9!iH-1j_y0VC;0_I>CHaj9J-t_ecQqjLppZ3@Jw z9CFz0TClt^!n+G80|BFT&U4owanDckDlf6cad>54I5dn}Nft&QQb@qhT$Afdn07=l zDKoSY+&N-%EvPFiG9Che0O0YGPXPX0m&eoahg`bY@gpMs8Gz#$^x;#uGKDVY(%r&&m$@ZX%QwRfIi27a!B+Y@n1WC!Ad`8 zA02+#{{XUYiarH+16Z-swJY2GKUMJjnyu-AD~ar`Jk++Cl}^-<5~1+H97g3xQ-LibC5whvz}}D`uKPM00ct) zvix)VO;~&&_}$=!_-&?ZHn$hr?}~LB%X_;>Y~qqAuD;A{q?R|ke=r!~c8hstko<_) z{q^wq@b-N9pgJRcWrYtC><;YrAP4 zQgGP}2m=`Bj!!|;+O0;yI3h^niB%kVvH}Y`;EZGuj@>!r@l9D4a38e)0PNl2uiA(9 zcJXKI-{L)eka(|3Xsm9ew%oQCkr6GG-f#l>Q^y+-cqbG7#jYiZDG~gLvuKhky6jWn+(Cnja4M#nL4T*Kx%oSlvF+!IlMh3m!?e zgcFv2zLEbJW;`RDV66O&5RG%+By?(#v(y?yslg-g`cy6zL z#kUCgV9_DWC8IOlQi^#1@o zO?2M|ek^$J;J?Iu3*xuJ?-5I+_>aRH#2TiBsX^A(?iUXmk&;iTMuuNAb~_$_N>Ylpq&=*ET)(jI?OWjQ+l%&E@K5bA;QMrdc#lY9scBZ% z8UD!Bp*~Ihk;87pbE|F635lXShOf-O_&FQ?3B{}5_?{p5D38MZG*70sn`NYYPl71i zy@SGhi#=huC8B7|cC)`&nkX3jj@x`&{tc%6w&(am`%T<_#~wMAr|{p!R}JCCl0mmm z@gy;ph^|QT#nrrR17jJID`W0Icy)a*Me#<1ccAO~Zl8am>eh1GYC3)Oub&+D(;-Qq zl?mG-Nf{{0{18oN7SpkGr>&%aBuC+EBCMl21(Veb0p=bDSR`B)Jx^H;U9l#_>+;j6_0mmPeXq#-e8yIPDT0PE^ z1YmaV87uUyUkd9ZMAX*li#$vUTmg@0qv1g=82$l zYYGj#HgmxTf&7o>O6%CU@t->B$)MN4IQDsv0)4oq*xP7hv{T46jA$qBYd z+BPWBH_BCq81})><=UGj_W55V;DRG;oFCGn*EBSm$*7$KZOFy+9)=vO8$A?7|!G~aC4GQ(0>4Nz~YxO3mk9! zAUbrC$#XV|ndM}8V2lC@+xLlXNXHx=m^trSwmuq`#w3EzP`QCZ?qyj9(STd-l1?xW z1JLIJwq&rinjj>A=j8~}7ht#`;NWC~p63}Kg)Q{23{pxAioRF^Ae`?9o<>PtnZO6W zDcmemgHhBi5L--!WF!?35({UMgc&}EJwF;&*DTuVG@9YefMB2mDsjehpU<^EDK!{v z{OQ59hDTk8fa;|37a(9{cfsw}pdLdz`4%@9Stoma;UV3(qmVe`*mR*<;A+|G_E#;o z-tixAU)~Zk{ZFT*abL8b!M_K7)PDiIS^GVBitat0C(+&uDRc8%Unvt=+$lX8JDA~Y z`UV{du7v2;d7A2ZzRb#U!X&^^=l}=#o@%6*`mK%HN2S<{i9?aPc*5-+7m&^KV?Kwz z1q*!!q~h*>FaH1)d^O@-6H2)7*0lm!+`ZhE*LHEP$A)N&8KPzCyIwX!^sf`uBbp16 zJAr}=V*vjE`uh9V>sS02m-Y%@1AI+~_Qvqdh4sINJZr2yw~OomLG}%DNZG~0Z5%~- zk;qD(R^c18tB=bcAKQo2&Fz+pHJziN`#MV=)@A5(wD-<^dU0B<>{Ug5R>v(mJHT)i z)NzB_sJq6$GB;6<2UA)MWjmEHNy!H}&l%_Z@l@_35h-c5jGl+Lx7MEKHad@k{{Rzw zMeuXS`VYnL3f!2rZwTvlTHc!~ADM0}=Szga$E#$K7{c#mZ@tE z?WV^L1T&(`83Z0cM;JZwKM&)t!d?{BJV^}gXz>({3OSYdV#wQ9fzym1O8&6_0N|b8 zHSiC`57#$-mq9#XN}>W z(kbJWNR|nsR!NkC85|IDq!I}32?wozHXra(KiLyn{e{13?;3mq@cpdzz8!1T)qWq| zPP+v972nNnfzJ$W*h^^Mjcf_QVOia{`W?Os`(7y|iT4hNBopoZ zGwdpb;)QtTOyGYRT;vcr{PFtNMdNP{+URx`@{6-1j_fWbl2S~B^V@)B9*Qcl~9Evl0p9fK9zGuo9xBR)3Wbfj8%wW4;ch<2<$~u zgtN!{#rg4u9f1b~eJaEfZUJ+E81)pv*#4fs;F|vcwIY7lpRo6U{x@D~a!;F$GDNQ#I6Vlj@#r*7Qj)DL5=NajJQT9C&en;}(mLiDQwZDDdmP>sRjiKT9F zlk;SC&JVdX$JTG5LM?1VW;{tH%%hWxDZ-579PP&^y(~8JUfh9qG?C8n;#Yj3*|(!9 zBoKQ7GCisYj+Y|bU5O-@%TgQ7ki{712aSjOJN zJ-dT}_~N1d&D07q+SseEa&B&%=3T&n##`kXJXX|cV>P;q2rW`&llM)9eU3*`d;Gj{ zleC_lYP()dDlAEOBQc&tR@+D;JPeG0aBvSiji4;ge5--uE)D!!(3KvV+tQyu<6j_odS8 z>}{<}#cCjxiOkCu;yd86Uz?|2oAafmfNZx2LnPrO+=0r<*!e$;jN^}U$6Ow?#?;#O z%gi?KFm+NADbDV_I&d?OQcoitYQ>~dT+FV~Os)`Y^LHEq+-Eogj)NR~a4MDdtkGXd z-cnazGuxB89GiU6SJV0UT>*aO5`Kp)-;P-X;ET%Z@k#ht*Lb zH!-&02Ga4!G07P@!5*i+G1{`ExD!FXEnVa}Ec*;(RZyx}lh6#Flv_aE*yF!xzkz-q{jR@f?-l;eo+;BIZ8yex zB=#{(?B%R(vA)*r82};Rxmc!F9Xz~Xf<=Ec{{R|(1$h4eLh()ahJ0b+!=zm4H*@Ky zM$_#tH5slht^P}blE)c^&PGp^?0^tUSMM*zpWFNPM)=+EOX5$$%_rkNt)^Ps=m$>I zyjQ2`(g<~E?;~(-G^tikGUH%Ep~A*VkzO$A^H=7-?Fs(?1rzWOh9vO+0K!iW>Dtb( zp=dVIK`oA_;#-T$%MuZTPHu|^H$~l~XbCaylPAk?4R43T;xRQia3G#*=ff@8akD7PGSiU4S zweYXv1>U`NM>|$}6^KTRQuJcV;Ke2AMT|;su zM^#n`!AQmqK?iXJ9u0lITa@9b;nh~$rq@+#dN24Vjb9Tv)#X|tJ`iYn+F0usNu}I9 zq!Qc5J;ZRkLkz9IY;Ni{p=@9RL2lLi2mb&D!2Z#mJF)SP#6Q^+RIt3d@YTkaJXfXM zd1S?UrdC(MqB% z?3iGFb&e3(R{NPkj7wMDAMivEk6KUc;rn2GKJgB$_RnSUTxwnj)~_dlRkZn~xQ0zV z(X;YKZeooYMh4l=az;;;oAXN#D9Lrx07hWd#Q-3D80FQT>E{y0CRfRzN#Ue;0$4SnIlugFc;U45Ll5)1}gGBGpzW$u|>0`|#7gR#%Be#Z9QcESihE zJ&3MYx{qc0{{UarnfX0^b8F&_Vs8!T_Md4)b&d(4Y=Yf44#Xb6PI<|$UDi0HWt4pC z%!~ta2sk4@TJ`?`9{dsUFYKrBN5?+`JWp%0SMYa=Z}eMg#gZfTPFl0C@>>l&Bd++zR6$W98{oo~BDwZTK6--XZX3inPxe zcsImXdOowHY7k#(TCSfjmpzr#QY?{A6JzCcbqo(qFhK;0{*`~=mj3{^0{-8BvKEo? zbHr(B2aEK}rqO(TWn~bKHPoV2VRIZ~{ajZtymtt_6ugKyj=z}8dHkmghk=|Nb+6D5 z_#lVvyBF-o`&lQ$9~kQC2ZO#U{g*}6C%Ap9W2%Qg*|%-zrdeLu&tO$0NUXzd?S~+bn}OFH=C+#B+U;haXSRL10iLy_L9h$`pKplD zaDZeXBN<#|o(~5Z1+X0g{THrqsva-e+Dd7{)W zKrohKBtUTcC@}1BJTcaZUOu04%pH78OR{7 zbJNnNn&#HnE6Wwkl1j{XM5WXf!0HzRIpf$@h5pk&@BwNSmFiI+*?;y|w$iRVd8%LP zH@fDX750m5;oEPv{kq-e@*|Qt6}CvNAZ1Y*VoEmv08J~mq8f{q$GJ~6%(sSU?4t_I zLPZVW3mD@7;Q&9GJRhZe{snv{*1Q|7>2ccM+-p8L@cp#VL2J56 z9Tv^ySGbYu6jF4Z8v5lL-%E;D z^BFDG5f=rVoVMn2Ibv(82TFxn>NTC0_3A2**+t1}{{XM+{E0p?e$>7z@h^vC@Rx=( zJvYP5@<^AGcvWPE(im_OIPIb*5imiu+B_)$Ds5bf@FSx-v0QH|FvmO&dh@~0r@eY+ zyQXQ-qdm&R2xGX2uu0A^eGj*#b6y}z32bD71d|~?QW3F}_edc~IO|_~hgcj1E?PES zPY$QIuO#KM%RyzX>e`fk9?>jhxwyDLG-|nGqq*!c+?)Z9qpf-$h5rC$n>ml${vls| zqF2HG%+s8NRnPHBjGv<89<_m>{94s4GvU7hL7~{)I{`b#7uqhSkYg?R$ExSZU z<1bywjyv=?|;(SAB;M`#NQft3-)i;EVS(g z#>#XaBD1pJe1cNZwARw@B0Zu>w#yjZ!(iYFc6#~weh#d-YxLghjj0U z{{Rj~i{Yzp5crbT(o4JjN^&QiJoymF-b!6NZO+qls!8D2!msdq;^&4w4r*T!^dAu= z&Wmk-HIAw@>0@~8%SaWXj(FXamT3?)DnTK^!*gC+Z>dKtsRL=LbV463T~&b380Ag} z9c$)gM%$i@>N=?^3?rd{{VxAeg-; z^gsRx>!$b<_U-+fzA^s*!6*JMPZgwIBC?OhzYFiAe1BtW){3yrBuEDcH(N^J3`r5X z7|R@aCy&2kog=}vaQrm2{i=Qs=>8|4&#;R^)uQnRkgs`e-c`&{US4SlZE(pbD&#t- z;1(4;@lWvnzD25ztb1EW^4srL`P}&N3e5pzj0E01u7IC^^ZHdLwbU-Adw(!uKlN&g zyO=TE$?wj2{dliQ@we>x@lV5JRB0_Vd-37zNwog}5k8aRJ4>|Cr!z9<`Jra}JSPlR z;0J}&aN`6HGsT(@j6NCaBG1Iv@Y>x=1UD8i>EUIL<{1LGlX(SXh{oJ7I4#CFHQR-w zEKOAi_x}Kg6{*55RUMU{%KG%asapCsd#=xCoccpR!SyT1;^QTz)FVTWTyFlKoondU z_EAD6j^^I#IZ0U|wPPcYLC$f2GC1lvBc(ZPbbVgYrMb~8ZSM+x?o@;%Gd|uFpWr0% z&&%k0SE~F}_%-l{!X6Ru$B2Ft{8T!->K1Hut2^tMOtumvQ8|^70=1|TG?AEP$;@Gq znOeSvrbUOQ;-xt|`hGeyh8Xz0BTYY6{{Tjh=r{HR@$bZ+7=FZG3;bd6hsGLLi1qD5 z!M3wauV{BoJXiMQ5p5)n%DK7nL}@+-Jm}>kCnmjZHQ0)RWKc-tSHpksL5*j`AMo#+ zeK%j!F66eb@y@Srqrr5Fzh$>RM9my)lItItE3&MZY`YMu2o?4F-bcLt@-fd%)$%nd z`x>&Rd-Arv(Vf+x`nz;CW&DObOxt04Rb(CWqojIM1Q9Pi}j4I2F)E{{RJ1*7XY{ z)b0Ee=G(}mB(Zo0#8R$DB=s5UI`A`D^TOIE_doJBbfX>SzW)ID2iQ+LysD=1Mu-vS zd1!YIIV0Aa99K}z@+3%$xQNQ~+i~|!;4pgfIj@*(e{Ro;y0(fns|_yBD4u6T5&S&W z8ceR!!oAohA%{+S@xjABAAZ;07k(l9NnZi!7gv_F<+irdk9p6$0`UQezoLYDzyEkb!}DGK={Gqn4xARg^Fnw;{@;lYhv7f zd{NHrvwv_#5O3a+Uw`=?{;`F8&lFr7faO5_N&M;^{6t%Jd=8s!<3t%e43InH9M=WouZzDNd=DFGny14rh}x^b!4hQE zY_3{G9Pep&eBV>gwMv~yZ5BF;gnk*{pN%U!f`S!{gtGVl(Jpvww`|Qz?wK?})9UO!Vut zggjE7CzDX{pTJKZ3kdF#3Xpdfp+qcDj5VG`lTSvvvfWn$y$19C+5kf3)k3;m1B^#`_F27D0~2Ly~#V zBOaqQad?Su5@#s6K3Aui`L(Zp#s2^rZSOAi8=u+fOmd`)V{F>EDG7HBt>vZ5NW>=O z!!S|1B$7GxKZ75#hrzh~0d=DMZvB+BJug`ew$E77WtR1%Il|8I+)P$7jmt*T6_Hfs zRZl-}YM%`xg5E&4b}+CE6ElhqI@;+JH}rIv~PvJGx5%$p)I5q_S(qsn^{En_Rz{2D`k}e zeU$<#DR^-ERa7>2Gj;KJH{y)ai)+t|((Mb08Z9~qfWgl~hx|ROi1Dw3{x9EZh7Bj< zZ;0(+Mj?%|!n;R6epvE)^v~r)Y2sq^Ne47HzQ>#B-?A6QFNYe&qpf@-@n?fH`|G1| zZrat>i>_VjGTTg!jwHC6^~iFMjHWPDFd)~RcvJofb>n{scz;mQ^dE`(eVc05OKW?7 z4~DLq>NNR;ixj5eHY}>!WCSpE0Z7MRq%@y}9}r-fPOI@>#M+75fM>pugPx_afE0Tg z(?1CAB$Lj&zP`C69nsBwD-8WJ?b4sJuJq<#aXiu8A1`UY@JnrC%SIj>)&4r`sih0# zygFr`l^jrc=r#$2Z82XkA=-_`6}r^yR7hWHLn|D=0|akBkFV0PzA^k;_zCcv;X_?oc=N$G zFKcQa!q&Feu-{2F#Lc+P6c7hXm0v3*T~}91Uv=s zv-TpixcK+`CwwCK!{gr^!D(S@q3cqMdx(w+qYZogmWA;YACer*Vn&Ye)al!zL#hFQ~t-FAD_V* z0?(>z7IXgqYD=TpT)XL7{hX08TfkQe){@5LnV$*+?MwYW2APs^a3s7iTgPW>k1aOBcgo}Jd_sEL#5nhCU?NNp`w< zv(yKNh0V-2;iWUPPN5_hFXyq8Ng)MXlNgT}FBdA3_I0Vj`%ckK{Pf$={rVM#Qj?T9 zl8e_?lJeiz?z-!9#6B5*%X8Z3+NP)RKSY{cO3LngVWXs4ly>mP0R837#MaiY=0zsp zBUE+7YSKCTsam?X!Ow%!_$Jd%)9(CbZR0DerinZ?q+G+OL1>~DnI}}4noGNL3apV$ z47ia?V1l5ZNbyJQcj2$GYBu(eYL~LySkDYXdnj#DC6YH??9xiooE8i~QPUY2;v>P;=LRuBPPK`(Zx|!W2@$aYFD<2x4drE`E2w~?^OQg{{Vtle$nvw zGxo65J_dMiRfW7Q@hifX+LoIIl*B;>v1e@Odanc-ZQ_x&R_sl~~`@=hZ z)+)a##w%KH+ILCuB+ue4L*ov(XTM`}Id!|qclnz*Vl$F@5TDD*SgaOIsyyn{2&Jm9G zDe0dIv{+$S-o_P1B;dvmQVHPko(JSBSHu1g@W+I&A5_zAL8$aTAr&D zq-0>^ypbRqhoR#>_|846H%|CJ;zo$ww!I=VGKUPHlB4Thi+dx7U>x8#9MjR5$CAgO z?@+2r+7-x~Jg>x`3pEQEW`;dTO}JrG$^@||sjdf5_-6LE4-Nb{Vz^~yE62aBd*#6< z<0SX~wa@CYMR6R)c-7CVu790v3XL;4Q(B%bpARjdCFQa%0X-Bp1ysHGZK2%( z(+h*^!TfvIpx(xj%*z@D87B%c>rq@Ph2cjllgC~KZOq=LYAVN@-}nTh*0 zjZ)rboHzw{1wfxO7O%1Va!fuE*5-ulc=TDGH(m;`jfh}-5I{Nhufv}mfqn4*0O7p% zqt1Jc16RA9<@t=44XxhpnThKm+PP!N+Bx7>Q=F92H=|ApE!(f?Vm0D_753J(}r zXdXEIp*8umh>FLqT=*lvcGfy}sI086g62%$$8Yl~XK4QbdKd1XHS>@B6%sECL;GIq zx|fCgXQ179LtnD8)cg-OhjmM7Y-H0u%#g)|w{}?MR)y60fK1zWvp(&|%C#4C&c-8Y zeYib3{{VolOH;PDj$3v}5lqXvViXPMa}1wTk6O*O&2Un)aqf3EnohalZw*^C)BN`X zeWjhuGo;2Io?`EeD@XTm+mGH({8wM_`{0-Dk?{f;d_nM^;AeHD4 z4;t#wMXTzbR;?U5l&ZI}h#Lto4g1CUSBA*>N2Pl4&NBr@G~+8Ial9=lNxL2I#IM-% z_L!YltX?G6G)1-(-AIhySCt&{i1;5Uk=3%NARH}K{l7KMYg7HX z{C8=oYWG(WL94}W1XJ8ItZ*y}L`n(3Kv}S(fs@BI-haU;d;{QL6#n1d67cVd{1>F^ zI((Acyp}imbW=j?6-Xr=N|Fylipp7zF((%*+e3PkVNUYg;M?In?}5@yGsm7h@Yjc= zme|c<3u=c`x+DnB$*t@oo=7(a$TAhfD*`y@+B9$2n@;gZ!7WqviL>#fnr@BZ?IH`E zM%z`<@8r0h{ML)jiWvlJB2MTwB#$24oPw;R{O$19O4Bv}03Uo^8upz9=&^!XY=zt_ zC)wO;Ap<7hb`ZZVMh-aXSUN9_JY#L*RM9QGS*gcma|CwR7Kwi#idYnqu`&aOKzRyA z0qt4K8H$_bb9P@Y-GNbrDtdl9eaErf{4?+_sTymqHOGhaXj1Aa^~ccB66WDHxrHDX za=h~tk~CNlj0OzMtGVmV?))43Hzm}lzYKhRC=xIdKZ`WvXXmL=r8rW31F#?7Zl|U3 zY||%*v`rl)VQD^|Cz{kZ?&Os1QzI`z#Br0(IID@GXvG0KMX4lme$Kz)Ty9d7Q*ZjX L=95W3&7=R>P-1Ef literal 0 HcmV?d00001 diff --git a/docs/en/benchmark.md b/docs/en/benchmark.md index f9d2418185..8a5035de5e 100644 --- a/docs/en/benchmark.md +++ b/docs/en/benchmark.md @@ -1414,6 +1414,106 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](tut +
+MMPose +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MMposePytorchONNXRuntimeTensorRTPPLNNOpenVINOModel Config
ModelTaskDatasetMetricsfp32fp32fp32fp16fp16fp32model config file
HRNetPose DetectionCOCOAP0.7480.7480.7480.748-0.748$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_256x192.py
AR0.8020.8020.8020.802-0.802
LiteHRNetPose DetectionCOCOAP0.6630.6630.663--0.663$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/litehrnet_30_coco_256x192.py
AR0.7280.7280.728--0.728
MSPN Pose DetectionCOCOAP0.7620.7620.7620.762-0.762$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/4xmspn50_coco_256x192.py
AR0.8250.8250.8250.825-0.825
+
+
+ ### Notes - As some datasets contain images with various resolutions in codebase like MMDet. The speed benchmark is gained through static configs in MMDeploy, while the performance benchmark is gained through dynamic ones. @@ -1423,3 +1523,5 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](tut - DBNet uses the interpolate mode `nearest` in the neck of the model, which TensorRT-7 applies a quite different strategy from Pytorch. To make the repository compatible with TensorRT-7, we rewrite the neck to use the interpolate mode `bilinear` which improves final detection performance. To get the matched performance with Pytorch, TensorRT-8+ is recommended, which the interpolate methods are all the same as Pytorch. - Mask AP of Mask R-CNN drops by 1% for the backend. The main reason is that the predicted masks are directly interpolated to original image in PyTorch, while they are at first interpolated to the preprocessed input image of the model and then to original image in other backends. + +- MMPose models are tested with `flip_test` explicitly set to `False` in model configs. diff --git a/docs/en/codebases/mmpose.md b/docs/en/codebases/mmpose.md new file mode 100644 index 0000000000..3851782613 --- /dev/null +++ b/docs/en/codebases/mmpose.md @@ -0,0 +1,39 @@ +# MMPose Support + +[MMPose](https://github.com/open-mmlab/mmpose) is an open-source toolbox for pose estimation based on PyTorch. It is a part of the [OpenMMLab](https://openmmlab.com/) project. + +## MMPose installation tutorial + +Please refer to [official installation guide](https://mmpose.readthedocs.io/en/latest/install.html) to install the codebase. + +## MMEditing models support + +| Model | Task | ONNX Runtime | TensorRT | NCNN | PPLNN | OpenVINO | Model config | +|:----------|:--------------|:------------:|:--------:|:----:|:-----:|:--------:|:-------------------------------------------------------------------------------------------:| +| HRNet | PoseDetection | Y | Y | Y | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#hrnet-cvpr-2019) | +| MSPN | PoseDetection | Y | Y | Y | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#mspn-arxiv-2019) | +| LiteHRNet | PoseDetection | Y | Y | N | N | Y | [config](https://mmpose.readthedocs.io/en/latest/papers/backbones.html#litehrnet-cvpr-2021) | + +### Example + +```bash +python tools/deploy.py \ +configs/mmpose/posedetection_tensorrt_static-256x192.py \ +$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_256x192.py \ +$MMPOSE_DIR/checkpoints/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth \ +$MMDEPLOY_DIR/demo/resources/human-pose.jpg \ +--work-dir work-dirs/mmpose/topdown/hrnet/trt \ +--device cuda +``` + +Note + +- Usually, mmpose models need some extra information for the input image, but we can't get it directly. So, when exporting the model, you can use `$MMDEPLOY_DIR/demo/resources/human-pose.jpg` as input. + +## Reminder + +None + +## FAQs + +None diff --git a/docs/zh_cn/benchmark.md b/docs/zh_cn/benchmark.md index f15629d9a9..e1a12af880 100644 --- a/docs/zh_cn/benchmark.md +++ b/docs/zh_cn/benchmark.md @@ -1415,6 +1415,105 @@ GPU: ncnn, TensorRT, PPLNN +
+MMPose +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MMposePytorchONNXRuntimeTensorRTPPLNNOpenVINOModel Config
ModelTaskDatasetMetricsfp32fp32fp32fp16fp16fp32model config file
HRNetPose DetectionCOCOAP0.7480.7480.7480.748-0.748$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_256x192.py
AR0.8020.8020.8020.802-0.802
LiteHRNetPose DetectionCOCOAP0.6630.6630.663--0.663$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/litehrnet_30_coco_256x192.py
AR0.7280.7280.728--0.728
MSPN Pose DetectionCOCOAP0.7620.7620.7620.762-0.762$MMPOSE_DIR/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/4xmspn50_coco_256x192.py
AR0.8250.8250.8250.825-0.825
+
+
### 注意 @@ -1423,3 +1522,5 @@ GPU: ncnn, TensorRT, PPLNN - TensorRT 的一些 int8 性能基准测试需要具有 tensor core 的 Nvidia 卡,否则性能会大幅下降。 - DBNet 在模型的颈部使用了`nearest`插值模式,TensorRT-7 应用了与 Pytorch 完全不同的策略。为了使与 TensorRT-7 兼容,我们重写了`neck`以使用`bilinear`插值模式,这提高了最终检测性能。为了获得与 Pytorch 匹配的性能,推荐使用 TensorRT-8+,其插值方法与 Pytorch 相同。 + +- MMPose 中的模型是在模型配置文件中 `flip_test` 设置为 `False`条件下完成的。 diff --git a/mmdeploy/codebase/mmedit/deploy/mmediting.py b/mmdeploy/codebase/mmedit/deploy/mmediting.py index 62f4b921aa..b56bcb2255 100644 --- a/mmdeploy/codebase/mmedit/deploy/mmediting.py +++ b/mmdeploy/codebase/mmedit/deploy/mmediting.py @@ -7,7 +7,7 @@ from torch.utils.data import DataLoader, Dataset from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase -from mmdeploy.utils import Codebase, get_task_type +from mmdeploy.utils import Codebase, get_task_type, load_config def __build_mmedit_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, @@ -56,7 +56,6 @@ def build_dataset(dataset_cfg: Union[str, mmcv.Config], *args, """ from mmedit.datasets import build_dataset as build_dataset_mmedit - from mmdeploy.utils import load_config dataset_cfg = load_config(dataset_cfg)[0] data = dataset_cfg.data diff --git a/mmdeploy/codebase/mmpose/__init__.py b/mmdeploy/codebase/mmpose/__init__.py new file mode 100644 index 0000000000..03b0a6b1ee --- /dev/null +++ b/mmdeploy/codebase/mmpose/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .deploy import MMPose, PoseDetection +from .models import * # noqa: F401,F403 + +__all__ = ['MMPose', 'PoseDetection'] diff --git a/mmdeploy/codebase/mmpose/deploy/__init__.py b/mmdeploy/codebase/mmpose/deploy/__init__.py new file mode 100644 index 0000000000..9218af29c0 --- /dev/null +++ b/mmdeploy/codebase/mmpose/deploy/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmdeploy.codebase.mmpose.deploy.mmpose import MMPose +from mmdeploy.codebase.mmpose.deploy.pose_detection import PoseDetection + +__all__ = ['MMPose', 'PoseDetection'] diff --git a/mmdeploy/codebase/mmpose/deploy/mmpose.py b/mmdeploy/codebase/mmpose/deploy/mmpose.py new file mode 100644 index 0000000000..25b65b4cdc --- /dev/null +++ b/mmdeploy/codebase/mmpose/deploy/mmpose.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +import mmcv +import torch +from mmcv.utils import Registry +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase +from mmdeploy.utils import Codebase, get_task_type, load_config + + +def __build_mmpose_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str, registry: Registry) -> BaseTask: + task = get_task_type(deploy_cfg) + return registry.module_dict[task.value](model_cfg, deploy_cfg, device) + + +MMPOSE_TASK = Registry('mmpose_tasks', build_func=__build_mmpose_task) + + +@CODEBASE.register_module(Codebase.MMPOSE.value, force=True) +class MMPose(MMCodebase): + """mmpose codebase class.""" + + task_registry = MMPOSE_TASK + + def __init__(self): + super(MMCodebase, self).__init__() + + @staticmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str) -> BaseTask: + """The interface to build the task processors of mmpose. + + Args: + model_cfg (mmcv.Config): Model config file. + deploy_cfg (mmcv.Config): Deployment config file. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + return MMPOSE_TASK.build(model_cfg, deploy_cfg, device) + + @staticmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'test', + **kwargs) -> Dataset: + """Build dataset for mmpose. + + Args: + dataset_cfg (str | mmcv.Config): The input dataset config. + dataset_type (str): A string represents dataset type, e.g.: 'train' + , 'test', 'val'. Defaults to 'test'. + + Returns: + Dataset: A PyTorch dataset. + """ + from mmpose.datasets import build_dataset + + dataset_cfg = load_config(dataset_cfg)[0] + assert dataset_type in dataset_cfg.data + data_cfg = dataset_cfg.data[dataset_type] + data_cfg.test_mode = True + dataset = build_dataset(data_cfg, dict(test_mode=True)) + return dataset + + @staticmethod + def build_dataloader(dataset: Dataset, + samples_per_gpu: int, + workers_per_gpu: int, + num_gpus: int = 1, + dist: bool = False, + shuffle: bool = False, + seed: Optional[int] = None, + drop_last: bool = False, + pin_memory: bool = True, + **kwargs) -> DataLoader: + """Build PyTorch DataLoader. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, + i.e., batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: False. + seed (int): An integer set to be seed. Default is ``None``. + drop_last (bool): Whether to drop the last incomplete batch + in epoch. Default: False. + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True. + kwargs: Other keyword arguments to be used to initialize + DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + from mmpose.datasets import build_dataloader + return build_dataloader( + dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=num_gpus, + dist=dist, + shuffle=shuffle, + seed=seed, + drop_last=drop_last, + pin_memory=pin_memory, + **kwargs) + + @staticmethod + def single_gpu_test(model: torch.nn.Module, data_loader: DataLoader, + show: bool, out_dir: str, **kwargs) -> list: + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. Defaults + to ``False``. + out_dir (str): A directory to save results, defaults to ``None``. + Returns: + list: The prediction results. + """ + from mmpose.apis import single_gpu_test + return single_gpu_test(model, data_loader) diff --git a/mmdeploy/codebase/mmpose/deploy/pose_detection.py b/mmdeploy/codebase/mmpose/deploy/pose_detection.py new file mode 100644 index 0000000000..5c6e759acf --- /dev/null +++ b/mmdeploy/codebase/mmpose/deploy/pose_detection.py @@ -0,0 +1,297 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import logging +import os +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmcv.parallel import collate +from torch.utils.data import Dataset + +from mmdeploy.codebase.base import BaseTask +from mmdeploy.codebase.mmpose.deploy.mmpose import MMPOSE_TASK +from mmdeploy.utils import Task + + +@MMPOSE_TASK.register_module(Task.POSE_DETECTION.value) +class PoseDetection(BaseTask): + """Pose detection task class. + + Args: + model_cfg (mmcv.Config): Original PyTorch model config file. + deploy_cfg (mmcv.Config): Deployment config file or loaded Config + object. + device (str): A string represents device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + super().__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Sequence[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. Default is None. + + Returns: + nn.Module: An initialized backend model. + """ + from .pose_detection_model import build_pose_detection_model + model = build_pose_detection_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + return model.eval() + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + + Returns: + nn.Module: An initialized torch model generated by other OpenMMLab + codebases. + """ + from mmcv.cnn.utils import revert_sync_batchnorm + from mmpose.apis import init_pose_model + model = init_pose_model(self.model_cfg, model_checkpoint, self.device) + model = revert_sync_batchnorm(model) + model.eval() + return model + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Sequence[int] = None, + **kwargs) -> Tuple[Dict, torch.Tensor]: + """Create input for pose detection. + + Args: + imgs (Any): Input image(s), accepted data type are ``str``, + ``np.ndarray``. + input_shape (list[int]): A list of two integer in (width, height) + format specifying input shape. Defaults to ``None``. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + from mmpose.apis.inference import LoadImage, _box2cs + from mmpose.datasets.dataset_info import DatasetInfo + from mmpose.datasets.pipelines import Compose + + cfg = self.model_cfg + + dataset_info = cfg.data.test.dataset_info + dataset_info = DatasetInfo(dataset_info) + + if isinstance(imgs, str): + imgs = mmcv.imread(imgs) + height, width = imgs.shape[:2] + # create dummy person results + person_results = [{'bbox': np.array([0, 0, width, height])}] + bboxes = np.array([box['bbox'] for box in person_results]) + + # build the data pipeline + channel_order = cfg.test_pipeline[0].get('channel_order', 'rgb') + test_pipeline = [LoadImage(channel_order=channel_order) + ] + cfg.test_pipeline[1:] + test_pipeline = Compose(test_pipeline) + dataset_name = dataset_info.dataset_name + flip_pairs = dataset_info.flip_pairs + batch_data = [] + if input_shape is not None: + image_size = input_shape + else: + image_size = np.array(cfg.data_cfg['image_size']) + for bbox in bboxes: + center, scale = _box2cs(cfg, bbox) + + # prepare data + data = { + 'img_or_path': + imgs, + 'center': + center, + 'scale': + scale, + 'bbox_score': + bbox[4] if len(bbox) == 5 else 1, + 'bbox_id': + 0, # need to be assigned if batch_size > 1 + 'dataset': + dataset_name, + 'joints_3d': + np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32), + 'joints_3d_visible': + np.zeros((cfg.data_cfg.num_joints, 3), dtype=np.float32), + 'rotation': + 0, + 'ann_info': { + 'image_size': image_size, + 'num_joints': cfg.data_cfg['num_joints'], + 'flip_pairs': flip_pairs + } + } + data = test_pipeline(data) + batch_data.append(data) + + batch_data = collate(batch_data, samples_per_gpu=1) + # scatter not work so just move image to cuda device + batch_data['img'] = batch_data['img'].to(torch.device(self.device)) + # get all img_metas of each bounding box + batch_data['img_metas'] = [ + img_metas[0] for img_metas in batch_data['img_metas'].data + ] + return batch_data, batch_data['img'] + + def visualize(self, + model: torch.nn.Module, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str, + show_result: bool = False, + **kwargs): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + """ + from mmpose.datasets.dataset_info import DatasetInfo + dataset_info = self.model_cfg.data.test.dataset_info + dataset_info = DatasetInfo(dataset_info) + skeleton = dataset_info.skeleton + pose_kpt_color = dataset_info.pose_kpt_color + pose_link_color = dataset_info.pose_link_color + if hasattr(model, 'module'): + model = model.module + if isinstance(image, str): + image = mmcv.imread(image) + # convert result + result = [dict(keypoints=pose) for pose in result['preds']] + model.show_result( + image, + result, + skeleton=skeleton, + pose_kpt_color=pose_kpt_color, + pose_link_color=pose_link_color, + out_file=output_file, + show=show_result, + win_name=window_name) + + @staticmethod + def evaluate_outputs(model_cfg: mmcv.Config, + outputs: Sequence, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False, + **kwargs): + """Perform post-processing to predictions of model. + + Args: + model_cfg (mmcv.Config): The model config. + outputs (list): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., e.g., "mIoU" for generic + datasets, and "cityscapes" for Cityscapes in mmseg. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + res_folder = '.' + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(outputs, out) + res_folder, _ = os.path.split(out) + os.makedirs(res_folder, exist_ok=True) + + eval_config = model_cfg.get('evaluation', {}).copy() + if metrics is not None: + eval_config.update(dict(metric=metrics)) + + results = dataset.evaluate(outputs, res_folder, **eval_config) + for k, v in sorted(results.items()): + print(f'{k}: {v}') + + def get_model_name(self) -> str: + """Get the model name. + + Return: + str: the name of the model. + """ + assert 'type' in self.model_cfg.model, 'model config contains no type' + name = self.model_cfg.model.type.lower() + return name + + @staticmethod + def get_partition_cfg(partition_type: str, **kwargs) -> Dict: + """Get a certain partition config for mmpose. + + Args: + partition_type (str): A string specifying partition type. + """ + raise NotImplementedError('Not supported yet.') + + def get_preprocess(self) -> Dict: + """Get the preprocess information for SDK.""" + raise NotImplementedError('Not supported yet.') + + def get_postprocess(self) -> Dict: + """Get the postprocess information for SDK.""" + raise NotImplementedError('Not supported yet.') + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any], + **kwargs) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + img = input_data['img'] + if isinstance(img, (list, tuple)): + img = img[0] + return img + + @staticmethod + def run_inference(model, model_inputs: Dict[str, torch.Tensor]): + """Run inference once for a pose model of mmpose. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + output = model( + **model_inputs, + return_loss=False, + return_heatmap=False, + target=None, + target_weight=None) + return [output] diff --git a/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py b/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py new file mode 100644 index 0000000000..e54a2f9494 --- /dev/null +++ b/mmdeploy/codebase/mmpose/deploy/pose_detection_model.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import mmcv +import numpy as np +import torch + +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.utils import Backend, get_backend, load_config + + +class End2EndModel(BaseBackendModel): + """End to end model for inference of pose detection. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files(e.g. + '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config + object. + deploy_cfg (str | mmcv.Config): Model config file or loaded Config + object. + """ + + def __init__(self, + backend: Backend, + backend_files: Sequence[str], + device: str, + deploy_cfg: Union[str, mmcv.Config] = None, + model_cfg: Union[str, mmcv.Config] = None, + **kwargs): + super(End2EndModel, self).__init__(deploy_cfg=deploy_cfg) + from mmpose.models.heads.topdown_heatmap_base_head import \ + TopdownHeatmapBaseHead + + self.deploy_cfg = deploy_cfg + self.model_cfg = model_cfg + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + # create base_head for decoding heatmap + base_head = TopdownHeatmapBaseHead() + base_head.test_cfg = model_cfg.model.test_cfg + self.base_head = base_head + + def _init_wrapper(self, backend, backend_files, device): + """Initialize backend wrapper. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files + (e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string specifying device type. + """ + output_names = self.output_names + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + def forward(self, img: torch.Tensor, img_metas: Sequence[Sequence[dict]], + *args, **kwargs): + """Run forward inference. + + Args: + img (torch.Tensor): Input image(s) in [N x C x H x W] format. + img_metas (Sequence[Sequence[dict]]): A list of meta info for + image(s). + *args: Other arguments. + **kwargs: Other key-pair arguments. + + Returns: + list: A list contains predictions. + """ + input_img = img.contiguous() + outputs = self.forward_test(input_img, img_metas, *args, **kwargs) + heatmaps = outputs[0] + key_points = self.base_head.decode(img_metas, heatmaps) + return key_points + + def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ + List[np.ndarray]: + """The interface for forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + + Returns: + List[np.ndarray]: A list of segmentation map. + """ + outputs = self.wrapper({self.input_name: imgs}) + outputs = self.wrapper.output_to_list(outputs) + outputs = [out.detach().cpu().numpy() for out in outputs] + return outputs + + def show_result(self, + img: np.ndarray, + result: list, + win_name: str = '', + skeleton: Optional[Sequence[Sequence[int]]] = None, + pose_kpt_color: Optional[Sequence[Sequence[int]]] = None, + pose_link_color: Optional[Sequence[Sequence[int]]] = None, + show: bool = False, + out_file: Optional[str] = None, + **kwargs): + """Show predictions of pose. + + Args: + img: (np.ndarray): Input image to draw predictions. + result (list): A list of predictions. + win_name (str): The name of visualization window. Default is ''. + skeleton (Sequence[Sequence[int]])The connection of keypoints. + skeleton is 0-based indexing. + pose_kpt_color (np.array[Nx3]): Color of N keypoints. + If ``None``, do not draw keypoints. + pose_link_color (np.array[Mx3]): Color of M links. + If ``None``, do not draw links. + show (bool): Whether to show plotted image in windows. + Defaults to ``True``. + out_file (str): Output image file to save drawn predictions. + + Returns: + np.ndarray: Drawn image, only if not ``show`` or ``out_file``. + """ + from mmpose.models.detectors import TopDown + return TopDown.show_result( + self, + img, + result, + skeleton=skeleton, + pose_kpt_color=pose_kpt_color, + pose_link_color=pose_link_color, + show=show, + out_file=out_file, + win_name=win_name) + + +def build_pose_detection_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + device: str, **kwargs): + """Build object segmentation model for different backends. + + Args: + model_files (Sequence[str]): Input model file(s). + model_cfg (str | mmcv.Config): Input model config file or Config + object. + deploy_cfg (str | mmcv.Config): Input deployment config file or + Config object. + device (str): Device to input model. + + Returns: + BaseBackendModel: Pose model for a configured backend. + """ + # load cfg if necessary + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + backend = get_backend(deploy_cfg) + backend_pose_model = End2EndModel( + backend, + model_files, + device, + deploy_cfg=deploy_cfg, + model_cfg=model_cfg, + **kwargs) + + return backend_pose_model diff --git a/mmdeploy/codebase/mmpose/models/__init__.py b/mmdeploy/codebase/mmpose/models/__init__.py new file mode 100644 index 0000000000..d1fdb9eb44 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .backbones import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .heads import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmpose/models/backbones/__init__.py b/mmdeploy/codebase/mmpose/models/backbones/__init__.py new file mode 100644 index 0000000000..9309949c52 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/backbones/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .litehrnet import cross_resolution_weighting__forward + +__all__ = ['cross_resolution_weighting__forward'] diff --git a/mmdeploy/codebase/mmpose/models/backbones/litehrnet.py b/mmdeploy/codebase/mmpose/models/backbones/litehrnet.py new file mode 100644 index 0000000000..609eadaef6 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/backbones/litehrnet.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.backbones.litehrnet.CrossResolutionWeighting.forward') +def cross_resolution_weighting__forward(ctx, self, x): + """Rewrite ``forward`` for default backend. + + Rewrite this function to support export ``adaptive_avg_pool2d``. + + Args: + x (list): block input. + """ + + mini_size = [int(_) for _ in x[-1].shape[-2:]] + out = [F.adaptive_avg_pool2d(s, mini_size) for s in x[:-1]] + [x[-1]] + out = torch.cat(out, dim=1) + out = self.conv1(out) + out = self.conv2(out) + out = torch.split(out, self.channels, dim=1) + out = [ + s * F.interpolate(a, size=s.size()[-2:], mode='nearest') + for s, a in zip(x, out) + ] + return out diff --git a/mmdeploy/codebase/mmpose/models/detectors/__init__.py b/mmdeploy/codebase/mmpose/models/detectors/__init__.py new file mode 100644 index 0000000000..64d8d3262d --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/detectors/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .top_down import top_down__forward + +__all__ = ['top_down__forward'] diff --git a/mmdeploy/codebase/mmpose/models/detectors/top_down.py b/mmdeploy/codebase/mmpose/models/detectors/top_down.py new file mode 100644 index 0000000000..0fe3a24000 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/detectors/top_down.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.detectors.top_down.TopDown.forward') +def top_down__forward(ctx, self, img, *args, **kwargs): + """Rewrite `forward_test` of TopDown for default backend.'. + + Rewrite this function to run the model directly. + + Args: + img (torch.Tensor[NxCxHxW]): Input images. + + Returns: + torch.Tensor: The predicted heatmaps. + """ + features = self.backbone(img) + if self.with_neck: + features = self.neck(features) + assert self.with_keypoint + output_heatmap = self.keypoint_head.inference_model( + features, flip_pairs=None) + return output_heatmap diff --git a/mmdeploy/codebase/mmpose/models/heads/__init__.py b/mmdeploy/codebase/mmpose/models/heads/__init__.py new file mode 100644 index 0000000000..f462d37c75 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/heads/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .topdown_heatmap_multi_stage_head import \ + topdown_heatmap_msmu_head__inference_model +from .topdown_heatmap_simple_head import \ + topdown_heatmap_simple_head__inference_model + +__all__ = [ + 'topdown_heatmap_simple_head__inference_model', + 'topdown_heatmap_msmu_head__inference_model' +] diff --git a/mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_multi_stage_head.py b/mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_multi_stage_head.py new file mode 100644 index 0000000000..5bb1014a43 --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_multi_stage_head.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.heads.TopdownHeatmapMSMUHead.inference_model') +def topdown_heatmap_msmu_head__inference_model(ctx, self, x, flip_pairs=None): + """Rewrite ``inference_model`` for default backend. + + Rewrite this function to run forward directly. And we don't need to + transform result to np.ndarray. + + Args: + x (list[torch.Tensor[N,K,H,W]]): Input features. + flip_pairs (None | list[tuple]): + Pairs of keypoints which are mirrored. + + Returns: + output_heatmap (torch.Tensor): Output heatmaps. + """ + assert flip_pairs is None + output = self.forward(x) + assert isinstance(output, list) + output = output[-1] + return output diff --git a/mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_simple_head.py b/mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_simple_head.py new file mode 100644 index 0000000000..0c0c6af8fc --- /dev/null +++ b/mmdeploy/codebase/mmpose/models/heads/topdown_heatmap_simple_head.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmdeploy.core import FUNCTION_REWRITER + + +@FUNCTION_REWRITER.register_rewriter( + 'mmpose.models.heads.TopdownHeatmapSimpleHead.inference_model') +def topdown_heatmap_simple_head__inference_model(ctx, + self, + x, + flip_pairs=None): + """Rewrite `forward_test` of TopDown for default backend. + + Rewrite this function to run forward directly. And we don't need to + transform result to np.ndarray. + + Args: + x (torch.Tensor[N,K,H,W]): Input features. + flip_pairs (None | list[tuple]): + Pairs of keypoints which are mirrored. + + Returns: + output_heatmap (torch.Tensor): Output heatmaps. + """ + assert flip_pairs is None + output = self.forward(x) + return output diff --git a/mmdeploy/utils/constants.py b/mmdeploy/utils/constants.py index ab726fd528..da07cb28e7 100644 --- a/mmdeploy/utils/constants.py +++ b/mmdeploy/utils/constants.py @@ -24,6 +24,7 @@ class Task(AdvancedEnum): CLASSIFICATION = 'Classification' OBJECT_DETECTION = 'ObjectDetection' INSTANCE_SEGMENTATION = 'InstanceSegmentation' + POSE_DETECTION = 'PoseDetection' class Codebase(AdvancedEnum): @@ -33,6 +34,7 @@ class Codebase(AdvancedEnum): MMCLS = 'mmcls' MMOCR = 'mmocr' MMEDIT = 'mmedit' + MMPOSE = 'mmpose' class Backend(AdvancedEnum): diff --git a/requirements/optional.txt b/requirements/optional.txt index 2f7a0bbef4..e63b39de60 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -2,6 +2,7 @@ mmcls>=0.15.0,<=0.19.0 mmdet>=2.19.0,<=2.20.0 mmedit mmocr>=0.3.0 +mmpose>=0.23.0 mmsegmentation onnxruntime>=1.8.0 openvino-dev diff --git a/tests/test_codebase/test_mmcls/test_classification_model.py b/tests/test_codebase/test_mmcls/test_classification_model.py index c5a6185a77..af640ce694 100644 --- a/tests/test_codebase/test_mmcls/test_classification_model.py +++ b/tests/test_codebase/test_mmcls/test_classification_model.py @@ -107,7 +107,7 @@ def test_get_classes_from_config(from_file, data_type): @backend_checker(Backend.ONNXRUNTIME) -def test_build_classificaation_model(): +def test_build_classification_model(): model_cfg = mmcv.Config(dict(data=dict(test={'type': 'ImageNet'}))) deploy_cfg = mmcv.Config( dict( diff --git a/tests/test_codebase/test_mmcls/test_mmcls_models.py b/tests/test_codebase/test_mmcls/test_mmcls_models.py index c3b10cea8b..a3572fa38a 100644 --- a/tests/test_codebase/test_mmcls/test_mmcls_models.py +++ b/tests/test_codebase/test_mmcls/test_mmcls_models.py @@ -22,7 +22,7 @@ def get_invertedresudual_model(): return model -def test_baseclassfier_forward(): +def test_baseclassifier_forward(): from mmcls.models.classifiers import BaseClassifier class DummyClassifier(BaseClassifier): diff --git a/tests/test_codebase/test_mmpose/data/annotations/person_keypoints_val2017.json b/tests/test_codebase/test_mmpose/data/annotations/person_keypoints_val2017.json new file mode 100644 index 0000000000..3aefa94c65 --- /dev/null +++ b/tests/test_codebase/test_mmpose/data/annotations/person_keypoints_val2017.json @@ -0,0 +1 @@ +{"info": {"description": "COCO 2017 Dataset","url": "http://cocodataset.org","version": "1.0","year": 2017,"contributor": "COCO Consortium","date_created": "2017/09/01"},"licenses": [{"url": "http://creativecommons.org/licenses/by-nc-sa/2.0/","id": 1,"name": "Attribution-NonCommercial-ShareAlike License"},{"url": "http://creativecommons.org/licenses/by-nc/2.0/","id": 2,"name": "Attribution-NonCommercial License"},{"url": "http://creativecommons.org/licenses/by-nc-nd/2.0/","id": 3,"name": "Attribution-NonCommercial-NoDerivs License"},{"url": "http://creativecommons.org/licenses/by/2.0/","id": 4,"name": "Attribution License"},{"url": "http://creativecommons.org/licenses/by-sa/2.0/","id": 5,"name": "Attribution-ShareAlike License"},{"url": "http://creativecommons.org/licenses/by-nd/2.0/","id": 6,"name": "Attribution-NoDerivs License"},{"url": "http://flickr.com/commons/usage/","id": 7,"name": "No known copyright restrictions"},{"url": "http://www.usa.gov/copyright.shtml","id": 8,"name": "United States Government Work"}],"images": [{"license": 4,"file_name": "000000397133.jpg","coco_url": "http://images.cocodataset.org/val2017/000000397133.jpg","height": 427,"width": 640,"date_captured": "2013-11-14 17:02:52","flickr_url": "http://farm7.staticflickr.com/6116/6255196340_da26cf2c9e_z.jpg","id": 397133}], "annotations": [{"segmentation": [[125.12,539.69,140.94,522.43,100.67,496.54,84.85,469.21,73.35,450.52,104.99,342.65,168.27,290.88,179.78,288,189.84,286.56,191.28,260.67,202.79,240.54,221.48,237.66,248.81,243.42,257.44,256.36,253.12,262.11,253.12,275.06,299.15,233.35,329.35,207.46,355.24,206.02,363.87,206.02,365.3,210.34,373.93,221.84,363.87,226.16,363.87,237.66,350.92,237.66,332.22,234.79,314.97,249.17,271.82,313.89,253.12,326.83,227.24,352.72,214.29,357.03,212.85,372.85,208.54,395.87,228.67,414.56,245.93,421.75,266.07,424.63,276.13,437.57,266.07,450.52,284.76,464.9,286.2,479.28,291.96,489.35,310.65,512.36,284.76,549.75,244.49,522.43,215.73,546.88,199.91,558.38,204.22,565.57,189.84,568.45,184.09,575.64,172.58,578.52,145.26,567.01,117.93,551.19,133.75,532.49]],"num_keypoints": 10,"area": 47803.27955,"iscrowd": 0,"keypoints": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,142,309,1,177,320,2,191,398,2,237,317,2,233,426,2,306,233,2,92,452,2,123,468,2,0,0,0,251,469,2,0,0,0,162,551,2],"image_id": 425226,"bbox": [73.35,206.02,300.58,372.5],"category_id": 1,"id": 183126}], "categories": [{"supercategory": "person","id": 1,"name": "person","keypoints": ["nose","left_eye","right_eye","left_ear","right_ear","left_shoulder","right_shoulder","left_elbow","right_elbow","left_wrist","right_wrist","left_hip","right_hip","left_knee","right_knee","left_ankle","right_ankle"],"skeleton": [[16,14],[14,12],[17,15],[15,13],[12,13],[6,12],[7,13],[6,7],[6,8],[7,9],[8,10],[9,11],[2,3],[1,2],[1,3],[2,4],[3,5],[4,6],[5,7]]}]} diff --git a/tests/test_codebase/test_mmpose/data/model.py b/tests/test_codebase/test_mmpose/data/model.py new file mode 100644 index 0000000000..947b396f5d --- /dev/null +++ b/tests/test_codebase/test_mmpose/data/model.py @@ -0,0 +1,250 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# model settings +channel_cfg = dict( + num_output_channels=17, + dataset_joints=17, + dataset_channel=[ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + ], + inference_channel=[ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 + ]) +model = dict( + type='TopDown', + pretrained=None, + backbone=dict(type='ResNet', depth=18), + keypoint_head=dict( + type='TopdownHeatmapSimpleHead', + in_channels=512, + out_channels=17, + loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)), + train_cfg=dict(), + test_cfg=dict( + flip_test=False, + post_process='default', + shift_heatmap=False, + modulate_kernel=11)) + +data_cfg = dict( + image_size=[192, 256], + heatmap_size=[48, 64], + num_output_channels=channel_cfg['num_output_channels'], + num_joints=channel_cfg['dataset_joints'], + dataset_channel=channel_cfg['dataset_channel'], + inference_channel=channel_cfg['inference_channel'], + soft_nms=False, + nms_thr=1.0, + oks_thr=0.9, + vis_thr=0.2, + # here use_gt_bbox must be true in ut, or should use predicted + # bboxes. + use_gt_bbox=True, + det_bbox_thr=0.0, + bbox_file='tests/test_codebase/test_mmpose/data/coco/' + + 'person_detection_results' + + '/COCO_val2017_detections_AP_H_56_person.json', +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='TopDownAffine'), + dict(type='ToTensor'), + dict( + type='NormalizeTensor', + mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]), + dict( + type='Collect', + keys=['img'], + meta_keys=[ + 'image_file', 'center', 'scale', 'rotation', 'bbox_score', + 'flip_pairs' + ]), +] + +dataset_info = dict( + dataset_name='coco', + paper_info=dict(), + keypoint_info={ + 0: + dict(name='nose', id=0, color=[51, 153, 255], type='upper', swap=''), + 1: + dict( + name='left_eye', + id=1, + color=[51, 153, 255], + type='upper', + swap='right_eye'), + 2: + dict( + name='right_eye', + id=2, + color=[51, 153, 255], + type='upper', + swap='left_eye'), + 3: + dict( + name='left_ear', + id=3, + color=[51, 153, 255], + type='upper', + swap='right_ear'), + 4: + dict( + name='right_ear', + id=4, + color=[51, 153, 255], + type='upper', + swap='left_ear'), + 5: + dict( + name='left_shoulder', + id=5, + color=[0, 255, 0], + type='upper', + swap='right_shoulder'), + 6: + dict( + name='right_shoulder', + id=6, + color=[255, 128, 0], + type='upper', + swap='left_shoulder'), + 7: + dict( + name='left_elbow', + id=7, + color=[0, 255, 0], + type='upper', + swap='right_elbow'), + 8: + dict( + name='right_elbow', + id=8, + color=[255, 128, 0], + type='upper', + swap='left_elbow'), + 9: + dict( + name='left_wrist', + id=9, + color=[0, 255, 0], + type='upper', + swap='right_wrist'), + 10: + dict( + name='right_wrist', + id=10, + color=[255, 128, 0], + type='upper', + swap='left_wrist'), + 11: + dict( + name='left_hip', + id=11, + color=[0, 255, 0], + type='lower', + swap='right_hip'), + 12: + dict( + name='right_hip', + id=12, + color=[255, 128, 0], + type='lower', + swap='left_hip'), + 13: + dict( + name='left_knee', + id=13, + color=[0, 255, 0], + type='lower', + swap='right_knee'), + 14: + dict( + name='right_knee', + id=14, + color=[255, 128, 0], + type='lower', + swap='left_knee'), + 15: + dict( + name='left_ankle', + id=15, + color=[0, 255, 0], + type='lower', + swap='right_ankle'), + 16: + dict( + name='right_ankle', + id=16, + color=[255, 128, 0], + type='lower', + swap='left_ankle') + }, + skeleton_info={ + 0: + dict(link=('left_ankle', 'left_knee'), id=0, color=[0, 255, 0]), + 1: + dict(link=('left_knee', 'left_hip'), id=1, color=[0, 255, 0]), + 2: + dict(link=('right_ankle', 'right_knee'), id=2, color=[255, 128, 0]), + 3: + dict(link=('right_knee', 'right_hip'), id=3, color=[255, 128, 0]), + 4: + dict(link=('left_hip', 'right_hip'), id=4, color=[51, 153, 255]), + 5: + dict(link=('left_shoulder', 'left_hip'), id=5, color=[51, 153, 255]), + 6: + dict(link=('right_shoulder', 'right_hip'), id=6, color=[51, 153, 255]), + 7: + dict( + link=('left_shoulder', 'right_shoulder'), + id=7, + color=[51, 153, 255]), + 8: + dict(link=('left_shoulder', 'left_elbow'), id=8, color=[0, 255, 0]), + 9: + dict( + link=('right_shoulder', 'right_elbow'), id=9, color=[255, 128, 0]), + 10: + dict(link=('left_elbow', 'left_wrist'), id=10, color=[0, 255, 0]), + 11: + dict(link=('right_elbow', 'right_wrist'), id=11, color=[255, 128, 0]), + 12: + dict(link=('left_eye', 'right_eye'), id=12, color=[51, 153, 255]), + 13: + dict(link=('nose', 'left_eye'), id=13, color=[51, 153, 255]), + 14: + dict(link=('nose', 'right_eye'), id=14, color=[51, 153, 255]), + 15: + dict(link=('left_eye', 'left_ear'), id=15, color=[51, 153, 255]), + 16: + dict(link=('right_eye', 'right_ear'), id=16, color=[51, 153, 255]), + 17: + dict(link=('left_ear', 'left_shoulder'), id=17, color=[51, 153, 255]), + 18: + dict( + link=('right_ear', 'right_shoulder'), id=18, color=[51, 153, 255]) + }, + joint_weights=[ + 1., 1., 1., 1., 1., 1., 1., 1.2, 1.2, 1.5, 1.5, 1., 1., 1.2, 1.2, 1.5, + 1.5 + ], + sigmas=[ + 0.026, 0.025, 0.025, 0.035, 0.035, 0.079, 0.079, 0.072, 0.072, 0.062, + 0.062, 0.107, 0.107, 0.087, 0.087, 0.089, 0.089 + ]) + +data = dict( + samples_per_gpu=64, + workers_per_gpu=2, + test_dataloader=dict(samples_per_gpu=32), + test=dict( + type='TopDownCocoDataset', + ann_file='tests/test_codebase/test_mmpose/data/annotations/' + + 'person_keypoints_val2017.json', + img_prefix='tests/test_codebase/test_mmpose/data/val2017/', + data_cfg=data_cfg, + pipeline=test_pipeline, + dataset_info=dataset_info), +) diff --git a/tests/test_codebase/test_mmpose/test_mmpose_models.py b/tests/test_codebase/test_mmpose/test_mmpose_models.py new file mode 100644 index 0000000000..bd32927aa4 --- /dev/null +++ b/tests/test_codebase/test_mmpose/test_mmpose_models.py @@ -0,0 +1,285 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import pytest +import torch + +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Backend, Codebase, Task +from mmdeploy.utils.test import WrapModel, check_backend, get_rewrite_outputs + +try: + import_codebase(Codebase.MMPOSE) +except ImportError: + pytest.skip( + f'{Codebase.MMPOSE} is not installed.', allow_module_level=True) + + +def get_top_down_heatmap_simple_head_model(): + from mmpose.models.heads import TopdownHeatmapSimpleHead + model = TopdownHeatmapSimpleHead( + 2, + 4, + num_deconv_filters=(16, 16, 16), + loss_keypoint=dict(type='JointsMSELoss', use_target_weight=False)) + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.TENSORRT]) +def test_top_down_heatmap_simple_head_inference_model(backend_type: Backend): + check_backend(backend_type, True) + model = get_top_down_heatmap_simple_head_model() + model.cpu().eval() + if backend_type == Backend.TENSORRT: + deploy_cfg = mmcv.Config( + dict( + backend_config=dict( + type=backend_type.value, + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 32, 48], + opt_shape=[1, 3, 32, 48], + max_shape=[1, 3, 32, 48]))) + ]), + onnx_config=dict( + input_shape=[32, 48], output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, + task=Task.POSE_DETECTION.value))) + else: + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(input_shape=None, output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, + task=Task.POSE_DETECTION.value))) + img = torch.rand((1, 2, 32, 48)) + model_outputs = model.inference_model(img) + wrapped_model = WrapModel(model, 'inference_model') + rewrite_inputs = {'x': img} + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + if isinstance(rewrite_outputs, dict): + rewrite_outputs = rewrite_outputs['output'] + for model_output, rewrite_output in zip(model_outputs, rewrite_outputs): + if isinstance(rewrite_output, torch.Tensor): + rewrite_output = rewrite_output.cpu().numpy() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) + + +def get_top_down_heatmap_msmu_head_model(): + + class DummyMSMUHead(torch.nn.Module): + + def __init__(self, out_shape): + from mmpose.models.heads import TopdownHeatmapMSMUHead + super().__init__() + self.model = TopdownHeatmapMSMUHead( + out_shape, + unit_channels=2, + out_channels=17, + num_stages=1, + num_units=1, + loss_keypoint=dict( + type='JointsMSELoss', use_target_weight=False)) + + def inference_model(self, x): + assert isinstance(x, torch.Tensor) + return self.model.inference_model([[x]], flip_pairs=None) + + model = DummyMSMUHead((32, 48)) + + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.TENSORRT]) +def test_top_down_heatmap_msmu_head_inference_model(backend_type: Backend): + check_backend(backend_type, True) + model = get_top_down_heatmap_msmu_head_model() + model.cpu().eval() + if backend_type == Backend.TENSORRT: + deploy_cfg = mmcv.Config( + dict( + backend_config=dict( + type=backend_type.value, + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 32, 48], + opt_shape=[1, 3, 32, 48], + max_shape=[1, 3, 32, 48]))) + ]), + onnx_config=dict( + input_shape=[32, 48], output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, + task=Task.POSE_DETECTION.value))) + else: + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(input_shape=None, output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, + task=Task.POSE_DETECTION.value))) + img = torch.rand((1, 2, 32, 48)) + model_outputs = model.inference_model(img) + wrapped_model = WrapModel(model, 'inference_model') + rewrite_inputs = {'x': img} + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + if isinstance(rewrite_outputs, dict): + rewrite_outputs = rewrite_outputs['output'] + for model_output, rewrite_output in zip(model_outputs, rewrite_outputs): + if isinstance(rewrite_output, torch.Tensor): + rewrite_output = rewrite_output.cpu().numpy() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) + + +def get_cross_resolution_weighting_model(): + from mmpose.models.backbones.litehrnet import CrossResolutionWeighting + + class DummyModel(torch.nn.Module): + + def __init__(self): + super().__init__() + self.model = CrossResolutionWeighting([16, 16], ratio=8) + + def forward(self, x): + assert isinstance(x, torch.Tensor) + return self.model([x, x]) + + model = DummyModel() + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_cross_resolution_weighting_forward(backend_type: Backend): + check_backend(backend_type, True) + model = get_cross_resolution_weighting_model() + model.cpu().eval() + imgs = torch.rand(1, 16, 16, 16) + + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(input_shape=None, output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, task=Task.POSE_DETECTION.value))) + rewrite_inputs = {'x': imgs} + model_outputs = model.forward(imgs) + wrapped_model = WrapModel(model, 'forward') + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + if isinstance(rewrite_outputs, dict): + rewrite_outputs = rewrite_outputs['output'] + for model_output, rewrite_output in zip(model_outputs, rewrite_outputs): + model_output = model_output.cpu().numpy() + if isinstance(rewrite_output, torch.Tensor): + rewrite_output = rewrite_output.detach().cpu().numpy() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) + + +def get_top_down_model(): + from mmpose.models.detectors.top_down import TopDown + model_cfg = dict( + type='TopDown', + pretrained=None, + backbone=dict(type='ResNet', depth=18), + keypoint_head=dict( + type='TopdownHeatmapSimpleHead', + in_channels=512, + out_channels=17, + loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)), + train_cfg=dict(), + test_cfg=dict( + flip_test=False, + post_process='default', + shift_heatmap=False, + modulate_kernel=11)) + model = TopDown(model_cfg['backbone'], None, model_cfg['keypoint_head'], + model_cfg['train_cfg'], model_cfg['test_cfg'], + model_cfg['pretrained']) + + model.requires_grad_(False) + return model + + +@pytest.mark.parametrize('backend_type', + [Backend.ONNXRUNTIME, Backend.TENSORRT]) +def test_top_down_forward(backend_type: Backend): + check_backend(backend_type, True) + model = get_top_down_model() + model.cpu().eval() + if backend_type == Backend.TENSORRT: + deploy_cfg = mmcv.Config( + dict( + backend_config=dict( + type=backend_type.value, + common_config=dict(max_workspace_size=1 << 30), + model_inputs=[ + dict( + input_shapes=dict( + input=dict( + min_shape=[1, 3, 32, 32], + opt_shape=[1, 3, 32, 32], + max_shape=[1, 3, 32, 32]))) + ]), + onnx_config=dict( + input_shape=[32, 32], output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, + task=Task.POSE_DETECTION.value))) + else: + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(input_shape=None, output_names=['output']), + codebase_config=dict( + type=Codebase.MMPOSE.value, + task=Task.POSE_DETECTION.value))) + img = torch.rand((1, 3, 32, 32)) + img_metas = { + 'image_file': + 'tests/test_codebase/test_mmpose' + '/data/imgs/dataset/blank.jpg', + 'center': torch.tensor([0.5, 0.5]), + 'scale': 1., + 'location': torch.tensor([0.5, 0.5]), + 'bbox_score': 0.5 + } + model_outputs = model.forward( + img, img_metas=[img_metas], return_loss=False, return_heatmap=True) + model_outputs = model_outputs['output_heatmap'] + wrapped_model = WrapModel(model, 'forward', return_loss=False) + rewrite_inputs = {'img': img} + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + if isinstance(rewrite_outputs, dict): + rewrite_outputs = rewrite_outputs['output'] + for model_output, rewrite_output in zip(model_outputs, rewrite_outputs): + if isinstance(rewrite_output, torch.Tensor): + rewrite_output = rewrite_output.cpu().numpy() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) diff --git a/tests/test_codebase/test_mmpose/test_pose_detection.py b/tests/test_codebase/test_mmpose/test_pose_detection.py new file mode 100644 index 0000000000..012c67f346 --- /dev/null +++ b/tests/test_codebase/test_mmpose/test_pose_detection.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from tempfile import NamedTemporaryFile, TemporaryDirectory + +import mmcv +import numpy as np +import pytest +import torch + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.apis import build_task_processor +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Backend, Codebase, Task, load_config +from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper + +try: + import_codebase(Codebase.MMPOSE) +except ImportError: + pytest.skip( + f'{Codebase.MMPOSE.value} is not installed.', allow_module_level=True) + +model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py' +model_cfg = load_config(model_cfg_path)[0] +deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmpose', task='PoseDetection'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['output'], + input_shape=None))) + +onnx_file = NamedTemporaryFile(suffix='.onnx').name +task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') +img_shape = (192, 256) +heatmap_shape = (48, 64) +# mmpose.apis.inference.LoadImage uses opencv, needs float32 in +# cv2.cvtColor. +img = np.random.rand(*img_shape, 3).astype(np.float32) +num_output_channels = model_cfg['data_cfg']['num_output_channels'] + + +def test_create_input(): + model_cfg = load_config(model_cfg_path)[0] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=Backend.ONNXRUNTIME.value), + codebase_config=dict( + type=Codebase.MMPOSE.value, task=Task.POSE_DETECTION.value), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + save_file='end2end.onnx', + input_names=['input'], + output_names=['output'], + input_shape=None))) + task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + inputs = task_processor.create_input(img, input_shape=img_shape) + assert isinstance(inputs, tuple) and len(inputs) == 2 + + +def test_init_pytorch_model(): + from mmpose.models.detectors.base import BasePose + model = task_processor.init_pytorch_model(None) + assert isinstance(model, BasePose) + + +@pytest.fixture +def backend_model(): + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + wrapper = SwitchBackendWrapper(ORTWrapper) + wrapper.set(outputs={ + 'output': torch.rand(1, num_output_channels, *heatmap_shape), + }) + + yield task_processor.init_backend_model(['']) + + wrapper.recover() + + +def test_init_backend_model(backend_model): + assert isinstance(backend_model, torch.nn.Module) + + +def test_run_inference(backend_model): + input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + results = task_processor.run_inference(backend_model, input_dict) + assert results is not None + + +def test_visualize(backend_model): + input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + results = task_processor.run_inference(backend_model, input_dict) + with TemporaryDirectory() as dir: + filename = dir + 'tmp.jpg' + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) + + +def test_get_tensor_from_input(): + input_data = {'img': torch.ones(3, 4, 5)} + inputs = task_processor.get_tensor_from_input(input_data) + assert torch.equal(inputs, torch.ones(3, 4, 5)) + + +def test_get_partition_cfg(): + try: + _ = task_processor.get_partition_cfg(partition_type='') + except NotImplementedError: + pass + + +def test_get_model_name(): + model_name = task_processor.get_model_name() + assert isinstance(model_name, str) and model_name is not None + + +def test_build_dataset_and_dataloader(): + from torch.utils.data import DataLoader, Dataset + dataset = task_processor.build_dataset( + dataset_cfg=model_cfg, dataset_type='test') + assert isinstance(dataset, Dataset), 'Failed to build dataset' + dataloader = task_processor.build_dataloader(dataset, 1, 1) + assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' + + +def test_single_gpu_test_and_evaluate(): + from mmcv.parallel import MMDataParallel + dataset = task_processor.build_dataset( + dataset_cfg=model_cfg, dataset_type='test') + dataloader = task_processor.build_dataloader(dataset, 1, 1) + + # Prepare dummy model + model = DummyModel(outputs=[torch.rand([1, 1000])]) + model = MMDataParallel(model, device_ids=[0]) + assert model is not None + # Run test + outputs = task_processor.single_gpu_test(model, dataloader) + assert outputs is not None + task_processor.evaluate_outputs(model_cfg, outputs, dataset) diff --git a/tests/test_codebase/test_mmpose/test_pose_detection_model.py b/tests/test_codebase/test_mmpose/test_pose_detection_model.py new file mode 100644 index 0000000000..740dc2f04b --- /dev/null +++ b/tests/test_codebase/test_mmpose/test_pose_detection_model.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from tempfile import NamedTemporaryFile + +import mmcv +import numpy as np +import pytest +import torch + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.codebase import import_codebase +from mmdeploy.utils import Backend, Codebase +from mmdeploy.utils.test import SwitchBackendWrapper, backend_checker + +IMAGE_H = 192 +IMAGE_W = 256 + +try: + import_codebase(Codebase.MMPOSE) +except ImportError: + pytest.skip( + f'{Codebase.MMPOSE} is not installed.', allow_module_level=True) + + +@backend_checker(Backend.ONNXRUNTIME) +class TestEnd2EndModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + cls.outputs = { + 'outputs': torch.rand(1, 1, IMAGE_H, IMAGE_W), + } + cls.wrapper.set(outputs=cls.outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) + + from mmdeploy.utils import load_config + model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py' + model_cfg = load_config(model_cfg_path)[0] + from mmdeploy.codebase.mmpose.deploy.pose_detection_model import \ + End2EndModel + cls.end2end_model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + deploy_cfg=deploy_cfg, + model_cfg=model_cfg) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + def test_forward(self): + img = torch.rand(1, 3, IMAGE_H, IMAGE_W) + img_metas = [{ + 'image_file': + 'tests/test_codebase/test_mmpose' + '/data/imgs/dataset/blank.jpg', + 'center': torch.tensor([0.5, 0.5]), + 'scale': 1., + 'location': torch.tensor([0.5, 0.5]), + 'bbox_score': 0.5 + }] + results = self.end2end_model.forward(img, img_metas) + assert results is not None, 'failed to get output using '\ + 'End2EndModel' + + def test_forward_test(self): + imgs = torch.rand(2, 3, IMAGE_H, IMAGE_W) + results = self.end2end_model.forward_test(imgs) + assert isinstance(results[0], np.ndarray) + + def test_show_result(self): + input_img = np.zeros([IMAGE_H, IMAGE_W, 3]) + img_path = NamedTemporaryFile(suffix='.jpg').name + + pred_bbox = torch.rand(1, 5) + pred_keypoint = torch.rand((1, 10, 2)) + result = [{'bbox': pred_bbox, 'keypoints': pred_keypoint}] + self.end2end_model.show_result( + input_img, result, '', show=False, out_file=img_path) + assert osp.exists(img_path), 'Fails to create drawn image.' + + +@backend_checker(Backend.ONNXRUNTIME) +def test_build_pose_detection_model(): + from mmdeploy.utils import load_config + model_cfg_path = 'tests/test_codebase/test_mmpose/data/model.py' + model_cfg = load_config(model_cfg_path)[0] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=Backend.ONNXRUNTIME.value), + onnx_config=dict(output_names=['outputs']), + codebase_config=dict(type=Codebase.MMPOSE.value))) + + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg) + from mmdeploy.codebase.mmpose.deploy.pose_detection_model import ( + End2EndModel, build_pose_detection_model) + posedetector = build_pose_detection_model([''], model_cfg, deploy_cfg, + 'cpu') + assert isinstance(posedetector, End2EndModel) From be93956015b432872e20a3317b00c40e11692f33 Mon Sep 17 00:00:00 2001 From: RunningLeon Date: Wed, 16 Feb 2022 11:50:04 +0800 Subject: [PATCH 05/17] fix tools (#160) * add non-zero error code * update mmdeploy version --- mmdeploy/version.py | 2 +- tools/deploy.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mmdeploy/version.py b/mmdeploy/version.py index 14571e3022..106378f190 100644 --- a/mmdeploy/version.py +++ b/mmdeploy/version.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from typing import Tuple -__version__ = '0.1.0' +__version__ = '0.2.0' short_version = __version__ diff --git a/tools/deploy.py b/tools/deploy.py index 9d06d24686..ef854764df 100644 --- a/tools/deploy.py +++ b/tools/deploy.py @@ -61,7 +61,7 @@ def create_process(name, target, args, kwargs, ret_value=None): if ret_value is not None: if ret_value.value != 0: logger.error(f'{name} failed.') - exit() + exit(1) else: logger.info(f'{name} success.') @@ -181,7 +181,7 @@ def main(): if not is_available_ncnn(): logger.error('ncnn support is not available.') - exit(-1) + exit(1) from mmdeploy.apis.ncnn import get_output_model_file, onnx2ncnn From 59470fef0b28e0b760c72269e0696bbdf57db7f1 Mon Sep 17 00:00:00 2001 From: TheSeriousProgrammer <26043350+TheSeriousProgrammer@users.noreply.github.com> Date: Wed, 16 Feb 2022 09:24:29 +0530 Subject: [PATCH 06/17] Lock pplcv to v0.6.1 (#143) * Lock pplcv to v0.6.1 To avoid breaking changes which arose from recent updates in pplcv , fix pplcv to v0.6.1 * Mentioned Version as ARG * Mentioned Version changes of pplcv in build.md * Update docs/en/build.md Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> * Change version stuff in cn_build.md Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> --- docker/GPU/Dockerfile | 5 ++++- docs/en/build.md | 7 +++++-- docs/zh_cn/build.md | 3 ++- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docker/GPU/Dockerfile b/docker/GPU/Dockerfile index 60d9c75ea1..32ca12ddae 100644 --- a/docker/GPU/Dockerfile +++ b/docker/GPU/Dockerfile @@ -7,6 +7,7 @@ ARG TORCHVISION_VERSION=0.9.0 ARG ONNXRUNTIME_VERSION=1.8.1 ARG MMCV_VERSION=1.4.0 ARG CMAKE_VERSION=3.20.0 +ARG PPLCV_VERSION=0.6.1 ENV FORCE_CUDA="1" ENV DEBIAN_FRONTEND=noninteractive @@ -66,9 +67,11 @@ RUN git clone https://github.com/open-mmlab/mmdeploy &&\ pip install -e . ### build sdk -RUN git clone https://github.com/openppl-public/ppl.cv.git &&\ +RUN wget https://github.com/openppl-public/ppl.cv/archive/refs/tags/v${PPLCV_VERSION}.zip &&\ + unzip v${PPLCV_VERSION}.zip && mv ppl.cv-${PPLCV_VERSION} ppl.cv &&\ cd ppl.cv &&\ ./build.sh cuda + RUN cd /root/workspace/mmdeploy &&\ rm -rf build/CM* &&\ mkdir -p build && cd build &&\ diff --git a/docs/en/build.md b/docs/en/build.md index 0be0f137dc..005950d1ab 100644 --- a/docs/en/build.md +++ b/docs/en/build.md @@ -137,8 +137,11 @@ Each package's installation command is given based on Ubuntu 18.04. A high-performance image processing library of openPPL supporting x86 and cuda platforms.
It is **OPTIONAL** which only be needed if `cuda` platform is required. - ```bash - git clone git@github.com:openppl-public/ppl.cv.git + + Using v0.6.1, since latest updates have broughtup breaking changes + ```Bash + wget https://github.com/openppl-public/ppl.cv/archive/refs/tags/v0.6.1.zip + unzip v0.6.1.zip && mv ppl.cv-0.6.1 ppl.cv cd ppl.cv ./build.sh cuda ``` diff --git a/docs/zh_cn/build.md b/docs/zh_cn/build.md index 9fd283010c..3e0717ddc8 100644 --- a/docs/zh_cn/build.md +++ b/docs/zh_cn/build.md @@ -135,7 +135,8 @@ pip install -e . 此依赖项为可选项,只有在cuda平台下,才需安装。安装命令如下所示: ```bash - git clone git@github.com:openppl-public/ppl.cv.git + wget https://github.com/openppl-public/ppl.cv/archive/refs/tags/v0.6.1.zip + unzip v0.6.1.zip && mv ppl.cv-0.6.1 ppl.cv cd ppl.cv ./build.sh cuda ``` From 141d956636e5bc45ecdb240ef2595cdcb87ff053 Mon Sep 17 00:00:00 2001 From: Haofan Wang Date: Tue, 22 Feb 2022 15:31:21 +0800 Subject: [PATCH 07/17] [Docs] Update get_started.md (#173) * Update get_started.md * Update get_started.md --- docs/en/get_started.md | 2 +- docs/zh_cn/get_started.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/get_started.md b/docs/en/get_started.md index 9336a06c67..a151c2bb01 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -49,7 +49,7 @@ Now you can do model inference with the APIs provided by the backend. But what i ```python from mmdeploy.apis import inference_model -result = inference_model(model_cfg, deploy_cfg, backend_models, img=img, device=device) +result = inference_model(model_cfg, deploy_cfg, backend_files, img=img, device=device) ``` The `inference_model` will create a wrapper module and do the inference for you. The result has the same format as the original OpenMMLab repo. diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md index 414bc481d3..f817128296 100644 --- a/docs/zh_cn/get_started.md +++ b/docs/zh_cn/get_started.md @@ -49,7 +49,7 @@ python ${MMDEPLOY_DIR}/tools/deploy.py \ ```python from mmdeploy.apis import inference_model -result = inference_model(model_cfg, deploy_cfg, backend_models, img=img, device=device) +result = inference_model(model_cfg, deploy_cfg, backend_files, img=img, device=device) ``` `inference_model`会创建一个对后端模型的封装,通过该封装进行推理。推理的结果会保持与OpenMMLab中原模型同样的格式。 From 2c25eff32c75ae077690bb133a78aa38dc19e4c9 Mon Sep 17 00:00:00 2001 From: "q.yao" Date: Wed, 23 Feb 2022 10:59:05 +0800 Subject: [PATCH 08/17] [Enhancement] optimize delta2bboxes (#152) * optimize delta2bboxes * ncnn update --- .../mmdet/core/bbox/delta_xywh_bbox_coder.py | 138 +++++++----------- 1 file changed, 53 insertions(+), 85 deletions(-) diff --git a/mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py b/mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py index 1b9b7904a5..56d71499cb 100644 --- a/mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py +++ b/mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py @@ -92,48 +92,42 @@ def delta2bbox(ctx, bboxes (Tensor): Boxes with shape (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. """ - means = deltas.new_tensor(means).view(1, - -1).repeat(1, - deltas.size(-1) // 4) - stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) - denorm_deltas = deltas * stds + means - dx = denorm_deltas[..., 0::4] - dy = denorm_deltas[..., 1::4] - dw = denorm_deltas[..., 2::4] - dh = denorm_deltas[..., 3::4] + means = deltas.new_tensor(means).view(1, -1) + stds = deltas.new_tensor(stds).view(1, -1) + delta_shape = deltas.shape + reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4)) + denorm_deltas = reshaped_deltas * stds + means - x1, y1 = rois[..., 0], rois[..., 1] - x2, y2 = rois[..., 2], rois[..., 3] - # Compute center of each roi - px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx) - py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy) - # Compute width/height of each roi - pw = (x2 - x1).unsqueeze(-1).expand_as(dw) - ph = (y2 - y1).unsqueeze(-1).expand_as(dh) + dxy = denorm_deltas[..., :2] + dwh = denorm_deltas[..., 2:] - dx_width = pw * dx - dy_height = ph * dy + xy1 = rois[..., None, :2] + xy2 = rois[..., None, 2:] + + pxy = (xy1 + xy2) * 0.5 + pwh = xy2 - xy1 + dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: - dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) - dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) - dw = torch.clamp(dw, max=max_ratio) - dh = torch.clamp(dh, max=max_ratio) + dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) + dwh = torch.clamp(dwh, max=max_ratio) else: - dw = dw.clamp(min=-max_ratio, max=max_ratio) - dh = dh.clamp(min=-max_ratio, max=max_ratio) + dwh = dwh.clamp(min=-max_ratio, max=max_ratio) + # Use exp(network energy) to enlarge/shrink each roi - gw = pw * dw.exp() - gh = ph * dh.exp() + half_gwh = pwh * dwh.exp() * 0.5 # Use network energy to shift the center of each roi - gx = px + dx_width - gy = py + dy_height + gxy = pxy + dxy_wh + # Convert center-xy/width/height to top-left, bottom-right - x1 = gx - gw * 0.5 - y1 = gy - gh * 0.5 - x2 = gx + gw * 0.5 - y2 = gy + gh * 0.5 + xy1 = gxy - half_gwh + xy2 = gxy + half_gwh + + x1 = xy1[..., 0] + y1 = xy1[..., 1] + x2 = xy2[..., 0] + y2 = xy2[..., 1] if clip_border and max_shape is not None: from mmdeploy.codebase.mmdet.deploy import clip_bboxes @@ -190,68 +184,42 @@ def delta2bbox__ncnn(ctx, or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. """ - means = deltas.new_tensor(means).view(1, 1, - -1).repeat(1, deltas.size(-2), - deltas.size(-1) // 4).data - stds = deltas.new_tensor(stds).view(1, 1, - -1).repeat(1, deltas.size(-2), - deltas.size(-1) // 4).data - denorm_deltas = deltas * stds + means - if denorm_deltas.shape[-1] == 4: - dx = denorm_deltas[..., 0:1] - dy = denorm_deltas[..., 1:2] - dw = denorm_deltas[..., 2:3] - dh = denorm_deltas[..., 3:4] - else: - dx = denorm_deltas[..., 0::4] - dy = denorm_deltas[..., 1::4] - dw = denorm_deltas[..., 2::4] - dh = denorm_deltas[..., 3::4] + means = deltas.new_tensor(means).view(1, 1, 1, -1).data + stds = deltas.new_tensor(stds).view(1, 1, 1, -1).data + delta_shape = deltas.shape + reshaped_deltas = deltas.view(delta_shape[:-1] + (-1, 4)) + denorm_deltas = reshaped_deltas * stds + means - x1, y1 = rois[..., 0:1], rois[..., 1:2] - x2, y2 = rois[..., 2:3], rois[..., 3:4] + dxy = denorm_deltas[..., :2] + dwh = denorm_deltas[..., 2:] - # Compute center of each roi - px = (x1 + x2) * 0.5 - py = (y1 + y2) * 0.5 - # Compute width/height of each roi - pw = x2 - x1 - ph = y2 - y1 + xy1 = rois[..., None, :2] + xy2 = rois[..., None, 2:] - # do not use expand unless necessary - # since expand is a custom ops - if px.shape[-1] != 4: - px = px.expand_as(dx) - if py.shape[-1] != 4: - py = py.expand_as(dy) - if pw.shape[-1] != 4: - pw = pw.expand_as(dw) - if px.shape[-1] != 4: - ph = ph.expand_as(dh) - - dx_width = pw * dx - dy_height = ph * dy + pxy = (xy1 + xy2) * 0.5 + pwh = xy2 - xy1 + dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: - dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) - dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) - dw = torch.clamp(dw, max=max_ratio) - dh = torch.clamp(dh, max=max_ratio) + dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) + dwh = torch.clamp(dwh, max=max_ratio) else: - dw = dw.clamp(min=-max_ratio, max=max_ratio) - dh = dh.clamp(min=-max_ratio, max=max_ratio) + dwh = dwh.clamp(min=-max_ratio, max=max_ratio) + # Use exp(network energy) to enlarge/shrink each roi - gw = pw * dw.exp() - gh = ph * dh.exp() + half_gwh = pwh * dwh.exp() * 0.5 # Use network energy to shift the center of each roi - gx = px + dx_width - gy = py + dy_height + gxy = pxy + dxy_wh + # Convert center-xy/width/height to top-left, bottom-right - x1 = gx - gw * 0.5 - y1 = gy - gh * 0.5 - x2 = gx + gw * 0.5 - y2 = gy + gh * 0.5 + xy1 = gxy - half_gwh + xy2 = gxy + half_gwh + + x1 = xy1[..., 0] + y1 = xy1[..., 1] + x2 = xy2[..., 0] + y2 = xy2[..., 1] if clip_border and max_shape is not None: from mmdeploy.codebase.mmdet.deploy import clip_bboxes From f0c110e6e60454e82bf47c184d6d61b7846f6c58 Mon Sep 17 00:00:00 2001 From: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Date: Wed, 23 Feb 2022 11:38:40 +0800 Subject: [PATCH 09/17] Add a new pre-commit-hook to automatically add a copyright (#166) * Add a new pre-commit-hook to automatically add copyright * Add a new pre-commit-hook to automatically add copyright * append 'demo' and 'tools' directories to .pre-commit-config.yaml --- .pre-commit-config.yaml | 6 ++++++ demo/demo_rewrite.py | 1 + mmdeploy/codebase/mmdet/core/ops/detection_output.py | 1 + mmdeploy/codebase/mmdet/core/ops/prior_box.py | 1 + mmdeploy/codebase/mmdet/models/backbones.py | 1 + .../codebase/mmdet/models/dense_heads/base_dense_head.py | 1 + mmdeploy/codebase/mmdet/models/necks.py | 1 + tests/test_apis/test_onnx2ncnn.py | 1 + tools/onnx2ncnn.py | 1 + 9 files changed, 14 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 029a2dcd69..1540e469de 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,3 +46,9 @@ repos: hooks: - id: docformatter args: ["--in-place", "--wrap-descriptions", "79"] + + - repo: https://github.com/open-mmlab/pre-commit-hooks + rev: v0.2.0 + hooks: + - id: check-copyright + args: ["csrc", "mmdeploy", "tests", "demo", "tools"] diff --git a/demo/demo_rewrite.py b/demo/demo_rewrite.py index 5adc581f30..a624c26eba 100644 --- a/demo/demo_rewrite.py +++ b/demo/demo_rewrite.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import asyncio import os import shutil diff --git a/mmdeploy/codebase/mmdet/core/ops/detection_output.py b/mmdeploy/codebase/mmdet/core/ops/detection_output.py index 48d9f84415..67809660de 100644 --- a/mmdeploy/codebase/mmdet/core/ops/detection_output.py +++ b/mmdeploy/codebase/mmdet/core/ops/detection_output.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch diff --git a/mmdeploy/codebase/mmdet/core/ops/prior_box.py b/mmdeploy/codebase/mmdet/core/ops/prior_box.py index 24efb02dee..28d76f95bf 100644 --- a/mmdeploy/codebase/mmdet/core/ops/prior_box.py +++ b/mmdeploy/codebase/mmdet/core/ops/prior_box.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch diff --git a/mmdeploy/codebase/mmdet/models/backbones.py b/mmdeploy/codebase/mmdet/models/backbones.py index 2012df0241..6520702adb 100644 --- a/mmdeploy/codebase/mmdet/models/backbones.py +++ b/mmdeploy/codebase/mmdet/models/backbones.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdeploy.core import FUNCTION_REWRITER diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/base_dense_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/base_dense_head.py index d4413182a9..3c94c16250 100644 --- a/mmdeploy/codebase/mmdet/models/dense_heads/base_dense_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/base_dense_head.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, DistancePointBBoxCoder, TBLRBBoxCoder) diff --git a/mmdeploy/codebase/mmdet/models/necks.py b/mmdeploy/codebase/mmdet/models/necks.py index f430124381..2931de9b0a 100644 --- a/mmdeploy/codebase/mmdet/models/necks.py +++ b/mmdeploy/codebase/mmdet/models/necks.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdeploy.core import FUNCTION_REWRITER diff --git a/tests/test_apis/test_onnx2ncnn.py b/tests/test_apis/test_onnx2ncnn.py index 57199e37ff..8073b77548 100644 --- a/tests/test_apis/test_onnx2ncnn.py +++ b/tests/test_apis/test_onnx2ncnn.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import tempfile diff --git a/tools/onnx2ncnn.py b/tools/onnx2ncnn.py index 6f2bb6190b..0bddd6e036 100644 --- a/tools/onnx2ncnn.py +++ b/tools/onnx2ncnn.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. import argparse import logging From aeee4aa48f81b3bcfb046829a40a97131805d84d Mon Sep 17 00:00:00 2001 From: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Date: Wed, 23 Feb 2022 20:18:30 +0800 Subject: [PATCH 10/17] [Docs]add mmrotate link (#171) * add mmrotate link * Update README_zh-CN.md * fix docs --- README.md | 1 + README_zh-CN.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index ab05a73ad0..503588d833 100644 --- a/README.md +++ b/README.md @@ -128,3 +128,4 @@ If you find this project useful in your research, please consider cite: - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning Toolbox and Benchmark. - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab Model Compression Toolbox and Benchmark. - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. diff --git a/README_zh-CN.md b/README_zh-CN.md index 3ccf14e8af..0ed95fa110 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -127,6 +127,7 @@ MMDeploy 是一个开源深度学习模型部署工具箱,它是 [OpenMMLab](h - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准 - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 ## 欢迎加入 OpenMMLab 社区 From 486d45e739d0b2c606869b07676290b5671e133e Mon Sep 17 00:00:00 2001 From: tripleMu <92794867+q3394101@users.noreply.github.com> Date: Thu, 24 Feb 2022 16:09:58 +0800 Subject: [PATCH 11/17] [FIX] update docs and configs about openvino ssd deployment (#175) * add openvino static config for docs' ssd deploy * fix docs of openvino.md * fix ssd openvino deployment * rename openvino config * remove some files --- configs/mmdet/_base_/base_openvino_dynamic-300x300.py | 6 ++++++ .../mmdet/detection/detection_openvino_dynamic-300x300.py | 1 + docs/en/backends/openvino.md | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 configs/mmdet/_base_/base_openvino_dynamic-300x300.py create mode 100644 configs/mmdet/detection/detection_openvino_dynamic-300x300.py diff --git a/configs/mmdet/_base_/base_openvino_dynamic-300x300.py b/configs/mmdet/_base_/base_openvino_dynamic-300x300.py new file mode 100644 index 0000000000..ae1116df91 --- /dev/null +++ b/configs/mmdet/_base_/base_openvino_dynamic-300x300.py @@ -0,0 +1,6 @@ +_base_ = ['./base_dynamic.py', '../../_base_/backends/openvino.py'] + +onnx_config = dict(input_shape=None) + +backend_config = dict( + model_inputs=[dict(opt_shapes=dict(input=[1, 3, 300, 300]))]) diff --git a/configs/mmdet/detection/detection_openvino_dynamic-300x300.py b/configs/mmdet/detection/detection_openvino_dynamic-300x300.py new file mode 100644 index 0000000000..1df7d12114 --- /dev/null +++ b/configs/mmdet/detection/detection_openvino_dynamic-300x300.py @@ -0,0 +1 @@ +_base_ = ['../_base_/base_openvino_dynamic.py'] diff --git a/docs/en/backends/openvino.md b/docs/en/backends/openvino.md index d043c578ac..12a6686d36 100644 --- a/docs/en/backends/openvino.md +++ b/docs/en/backends/openvino.md @@ -27,7 +27,7 @@ sudo apt-get install libpython3.7 Example: ```bash python tools/deploy.py \ - configs/mmdet/detection/detection_openvino_dynamic.py \ + configs/mmdet/detection/detection_openvino_static-300x300.py \ /mmdetection_dir/mmdetection/configs/ssd/ssd300_coco.py \ /tmp/snapshots/ssd300_coco_20210803_015428-d231a06e.pth \ tests/data/tiger.jpeg \ From e9ee21fc1d64590f941fb96bff717537c3f6b3a6 Mon Sep 17 00:00:00 2001 From: "q.yao" Date: Thu, 24 Feb 2022 16:10:42 +0800 Subject: [PATCH 12/17] support export hardsigmoid in torch<=1.8 (#169) * support export hardsigmoid in torch<=1.8 * fix lint --- mmdeploy/pytorch/ops/__init__.py | 4 +++- mmdeploy/pytorch/ops/hardsigmoid.py | 12 ++++++++++++ tests/test_pytorch/test_pytorch_ops.py | 7 +++++++ 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 mmdeploy/pytorch/ops/hardsigmoid.py diff --git a/mmdeploy/pytorch/ops/__init__.py b/mmdeploy/pytorch/ops/__init__.py index 48f9c90137..0608aadf79 100644 --- a/mmdeploy/pytorch/ops/__init__.py +++ b/mmdeploy/pytorch/ops/__init__.py @@ -3,6 +3,7 @@ adaptive_avg_pool2d__default, adaptive_avg_pool3d__default) from .grid_sampler import grid_sampler__default +from .hardsigmoid import hardsigmoid__default from .instance_norm import instance_norm__tensorrt from .lstm import generic_rnn__ncnn from .squeeze import squeeze__default @@ -10,5 +11,6 @@ __all__ = [ 'adaptive_avg_pool1d__default', 'adaptive_avg_pool2d__default', 'adaptive_avg_pool3d__default', 'grid_sampler__default', - 'instance_norm__tensorrt', 'generic_rnn__ncnn', 'squeeze__default' + 'hardsigmoid__default', 'instance_norm__tensorrt', 'generic_rnn__ncnn', + 'squeeze__default' ] diff --git a/mmdeploy/pytorch/ops/hardsigmoid.py b/mmdeploy/pytorch/ops/hardsigmoid.py new file mode 100644 index 0000000000..a4d14173ed --- /dev/null +++ b/mmdeploy/pytorch/ops/hardsigmoid.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# Modified from: +# https://github.com/pytorch/pytorch/blob/9ade03959392e5a90b74261012de1d806cab2253/torch/onnx/symbolic_opset9.py +from mmdeploy.core import SYMBOLIC_REWRITER + + +@SYMBOLIC_REWRITER.register_symbolic( + 'hardsigmoid', is_pytorch=True, arg_descriptors=['v']) +def hardsigmoid__default(ctx, g, self): + """Support export hardsigmoid This rewrite enable export hardsigmoid in + torch<=1.8.2.""" + return g.op('HardSigmoid', self, alpha_f=1 / 6) diff --git a/tests/test_pytorch/test_pytorch_ops.py b/tests/test_pytorch/test_pytorch_ops.py index 69c9e12ed7..9a03148817 100644 --- a/tests/test_pytorch/test_pytorch_ops.py +++ b/tests/test_pytorch/test_pytorch_ops.py @@ -116,3 +116,10 @@ def test_squeeze(self): nodes = get_model_onnx_nodes(model, x) assert nodes[0].attribute[0].ints == [0] assert nodes[0].op_type == 'Squeeze' + + +def test_hardsigmoid(): + x = torch.rand(1, 2, 3, 4) + model = torch.nn.Hardsigmoid().eval() + nodes = get_model_onnx_nodes(model, x) + assert nodes[0].op_type == 'HardSigmoid' From 640aa03538e370d247fc71e3c2aa36c72a82f177 Mon Sep 17 00:00:00 2001 From: lzhangzz Date: Thu, 24 Feb 2022 20:08:44 +0800 Subject: [PATCH 13/17] Support Windows (#106) * minor changes * support windows * fix GCC build * fix lint * reformat * fix Windows build * fix GCC build * search backend ops for onnxruntime * fix lint * fix lint * code clean-up * code clean-up * fix clang build * fix trt support * fix cmake for ncnn * fix cmake for openvino * fix SDK Python API * handle ops for other backends (ncnn, trt) * handle SDK Python API library location * robustify linkage * fix cuda * minor fix for openvino & ncnn * use CMAKE_CUDA_ARCHITECTURES if set * fix cuda preprocessor * fix misc * fix pplnn & pplcv, drop support for pplcv<0.6.0 * robustify cmake * update build.md (#2) * build dynamic modules as module library & fix demo (partially) * fix candidate path for mmdeploy_python * move "enable CUDA" to cmake config for demo * refine demo cmake * add comment * fix ubuntu build * revert docs/en/build.md * fix C API * fix lint * Windows build doc (#3) * check in docs related to mmdeploy build on windows * update build guide on windows platform * update build guide on windows platform * make path of thirdparty libraries consistent * make path consistency * correct build command for custom ops * correct build command for sdk * update sdk build instructions * update doc * correct build command * fix lint * correct build command and fix lint Co-authored-by: lvhan * trailing whitespace (#4) * minor fix * fix sr sdk model * fix type deduction * fix cudaFree after driver shutting down * update ppl.cv installation warning (#5) * fix device allocator threshold & fix lint * update doc (#6) * update ppl.cv installation warning * missing 'git clone' Co-authored-by: chenxin Co-authored-by: zhangli Co-authored-by: lvhan028 Co-authored-by: lvhan --- .gitignore | 4 + CMakeLists.txt | 51 ++- cmake/MMDeploy.cmake | 151 ++++++++ cmake/MMDeployConfig.cmake.in | 17 +- cmake/common.cmake | 108 ------ cmake/cuda.cmake | 44 ++- cmake/loader.cpp.in | 39 ++ csrc/CMakeLists.txt | 1 + csrc/apis/c/CMakeLists.txt | 7 +- csrc/apis/c/classifier.cpp | 27 +- csrc/apis/c/classifier.h | 26 +- csrc/apis/c/common.h | 18 +- csrc/apis/c/detector.cpp | 27 +- csrc/apis/c/detector.h | 26 +- csrc/apis/c/handle.h | 4 +- csrc/apis/c/model.cpp | 14 +- csrc/apis/c/model.h | 14 +- csrc/apis/c/restorer.cpp | 8 +- csrc/apis/c/restorer.h | 24 +- csrc/apis/c/segmentor.cpp | 26 +- csrc/apis/c/segmentor.h | 24 +- csrc/apis/c/text_detector.cpp | 29 +- csrc/apis/c/text_detector.h | 29 +- csrc/apis/c/text_recognizer.cpp | 8 +- csrc/apis/c/text_recognizer.h | 35 +- csrc/apis/python/CMakeLists.txt | 11 +- csrc/archive/CMakeLists.txt | 1 - csrc/archive/json_archive.h | 2 +- csrc/archive/value_archive.h | 4 + csrc/backend_ops/CMakeLists.txt | 32 +- csrc/backend_ops/ncnn/CMakeLists.txt | 31 +- csrc/backend_ops/ncnn/ops/CMakeLists.txt | 26 +- csrc/backend_ops/ncnn/ops/ncnn_ops_register.h | 7 +- .../ncnn/pyncnn_ext/CMakeLists.txt | 5 +- csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp | 2 +- csrc/backend_ops/onnxruntime/CMakeLists.txt | 31 +- .../onnxruntime/common/onnxruntime_register.h | 5 +- .../onnxruntime/onnxruntime_register.cpp | 1 - csrc/backend_ops/tensorrt/CMakeLists.txt | 40 +-- csrc/codebase/CMakeLists.txt | 4 +- csrc/codebase/common.h | 8 +- csrc/codebase/mmcls/CMakeLists.txt | 6 +- csrc/codebase/mmcls/linear_cls.cpp | 11 +- csrc/codebase/mmcls/mmcls.cpp | 8 +- csrc/codebase/mmcls/mmcls.h | 7 +- csrc/codebase/mmdet/CMakeLists.txt | 9 +- csrc/codebase/mmdet/instance_segmentation.cpp | 32 +- csrc/codebase/mmdet/mmdet.cpp | 8 +- csrc/codebase/mmdet/mmdet.h | 10 +- csrc/codebase/mmdet/object_detection.cpp | 24 +- csrc/codebase/mmedit/CMakeLists.txt | 7 +- csrc/codebase/mmedit/mmedit.cpp | 8 +- csrc/codebase/mmedit/mmedit.h | 8 +- csrc/codebase/mmedit/restorer.cpp | 4 +- csrc/codebase/mmocr/CMakeLists.txt | 8 +- csrc/codebase/mmocr/crnn.cpp | 13 +- csrc/codebase/mmocr/dbnet.cpp | 18 +- csrc/codebase/mmocr/mmocr.cpp | 8 +- csrc/codebase/mmocr/mmocr.h | 8 +- csrc/codebase/mmocr/resize_ocr.cpp | 12 +- csrc/codebase/mmocr/warp.cpp | 2 +- csrc/codebase/mmseg/CMakeLists.txt | 7 +- csrc/codebase/mmseg/mmseg.cpp | 8 +- csrc/codebase/mmseg/mmseg.h | 8 +- csrc/codebase/mmseg/segment.cpp | 20 +- csrc/core/CMakeLists.txt | 17 +- csrc/core/device.h | 19 +- csrc/core/device_impl.cpp | 2 +- csrc/core/graph.cpp | 13 +- csrc/core/graph.h | 20 +- csrc/core/logger.cpp | 3 + csrc/core/logger.h | 20 +- csrc/core/macro.h | 117 +++++- csrc/core/mat.h | 2 +- csrc/core/model.cpp | 32 +- csrc/core/model.h | 9 +- csrc/core/module.cpp | 6 +- csrc/core/module.h | 5 +- csrc/core/net.cpp | 6 +- csrc/core/net.h | 2 + csrc/core/operator.cpp | 2 + csrc/core/operator.h | 25 +- csrc/core/registry.cpp | 46 +++ csrc/core/registry.h | 97 +++-- csrc/core/serialization.h | 64 +--- csrc/core/status_code.h | 5 +- csrc/core/tensor.cpp | 20 +- csrc/core/tensor.h | 2 +- csrc/core/utils/device_utils.h | 6 +- csrc/core/utils/filesystem.h | 15 + csrc/core/utils/formatter.h | 2 +- csrc/core/utils/source_location.h | 2 +- csrc/core/utils/stacktrace.h | 1 + csrc/core/value.h | 8 +- csrc/device/cpu/CMakeLists.txt | 12 +- csrc/device/cuda/CMakeLists.txt | 10 +- csrc/device/cuda/buddy_allocator.h | 13 +- csrc/device/cuda/cuda_builtin_kernels.cu | 6 +- csrc/device/cuda/cuda_device.cpp | 6 +- csrc/device/cuda/cuda_device.h | 10 + csrc/device/cuda/default_allocator.h | 14 +- csrc/device/cuda/linear_allocator.h | 6 +- csrc/device/device_allocator.h | 18 +- csrc/experimental/collection.h | 186 +++++----- csrc/experimental/module_adapter.h | 2 +- csrc/experimental/token.h | 140 ++++---- csrc/graph/CMakeLists.txt | 6 +- csrc/graph/common.cpp | 2 +- csrc/graph/common.h | 5 +- csrc/graph/flatten.cpp | 2 +- csrc/graph/inference.cpp | 4 +- csrc/graph/pipeline.cpp | 4 +- csrc/graph/task.cpp | 7 +- csrc/model/CMakeLists.txt | 15 +- csrc/model/directory_model_impl.cpp | 11 +- csrc/model/zip_model_impl.cpp | 31 +- csrc/net/CMakeLists.txt | 6 +- csrc/net/ncnn/CMakeLists.txt | 28 +- csrc/net/ncnn/ncnn_net.cpp | 5 +- csrc/net/net_module.cpp | 14 +- csrc/net/openvino/CMakeLists.txt | 6 +- csrc/net/openvino/openvino_net.cpp | 32 +- csrc/net/ort/CMakeLists.txt | 14 +- csrc/net/ort/ort_net.cpp | 25 +- csrc/net/ppl/CMakeLists.txt | 8 +- csrc/net/ppl/ppl_net.cpp | 25 +- csrc/net/trt/CMakeLists.txt | 16 +- csrc/net/trt/trt_net.cpp | 25 +- csrc/preprocess/CMakeLists.txt | 7 +- csrc/preprocess/cpu/CMakeLists.txt | 11 +- csrc/preprocess/cpu/pad_impl.cpp | 2 +- csrc/preprocess/cuda/CMakeLists.txt | 38 +- csrc/preprocess/cuda/crop_impl.cpp | 6 +- csrc/preprocess/cuda/load_impl.cpp | 8 +- csrc/preprocess/cuda/normalize.cu | 4 +- csrc/preprocess/cuda/normalize_impl.cpp | 6 +- csrc/preprocess/cuda/pad_impl.cpp | 18 +- csrc/preprocess/cuda/resize_impl.cpp | 20 +- csrc/preprocess/transform/CMakeLists.txt | 26 +- csrc/preprocess/transform/collect.cpp | 8 +- csrc/preprocess/transform/collect.h | 6 +- csrc/preprocess/transform/compose.cpp | 6 +- csrc/preprocess/transform/compose.h | 2 +- csrc/preprocess/transform/crop.cpp | 7 +- csrc/preprocess/transform/crop.h | 8 +- csrc/preprocess/transform/image2tensor.cpp | 7 +- csrc/preprocess/transform/image2tensor.h | 6 +- csrc/preprocess/transform/load.cpp | 9 +- csrc/preprocess/transform/load.h | 6 +- csrc/preprocess/transform/normalize.cpp | 14 +- csrc/preprocess/transform/normalize.h | 6 +- csrc/preprocess/transform/pad.cpp | 10 +- csrc/preprocess/transform/pad.h | 9 +- csrc/preprocess/transform/resize.cpp | 18 +- csrc/preprocess/transform/resize.h | 9 +- csrc/preprocess/transform/transform.cpp | 2 + csrc/preprocess/transform/transform.h | 31 +- csrc/preprocess/transform_module.cpp | 11 +- csrc/utils/CMakeLists.txt | 3 + csrc/utils/opencv/CMakeLists.txt | 17 + .../cpu => utils/opencv}/opencv_utils.cpp | 20 +- .../cpu => utils/opencv}/opencv_utils.h | 38 +- demo/csrc/CMakeLists.txt | 13 +- demo/csrc/image_classification.cpp | 2 +- demo/csrc/image_restorer.cpp | 2 +- demo/csrc/image_segmentation.cpp | 4 +- demo/csrc/object_detection.cpp | 2 +- demo/csrc/ocr.cpp | 2 +- docs/en/build/linux.md | 1 + docs/en/build/windows.md | 1 + docs/zh_cn/build/linux.md | 1 + docs/zh_cn/build/windows.md | 336 ++++++++++++++++++ mmdeploy/backend/ncnn/init_plugins.py | 27 +- mmdeploy/backend/onnxruntime/init_plugins.py | 16 +- mmdeploy/backend/sdk/__init__.py | 20 +- mmdeploy/backend/tensorrt/init_plugins.py | 16 +- .../mmedit/deploy/super_resolution_model.py | 6 +- mmdeploy/utils/__init__.py | 4 +- mmdeploy/utils/utils.py | 20 ++ tests/test_csrc/CMakeLists.txt | 95 ++--- .../test_csrc/archive/test_value_archive.cpp | 14 +- tests/test_csrc/capi/test_classifier.cpp | 10 +- tests/test_csrc/capi/test_detector.cpp | 21 +- tests/test_csrc/capi/test_model.cpp | 2 +- tests/test_csrc/capi/test_restorer.cpp | 4 +- tests/test_csrc/capi/test_segmentor.cpp | 4 +- tests/test_csrc/capi/test_text_detector.cpp | 10 +- tests/test_csrc/capi/test_text_recognizer.cpp | 16 +- tests/test_csrc/core/test_mat.cpp | 2 + tests/test_csrc/core/test_status_code.cpp | 4 +- tests/test_csrc/core/test_token.cpp | 36 -- tests/test_csrc/core/test_value.cpp | 4 +- tests/test_csrc/device/test_cpu_device.cpp | 87 ----- .../test_csrc/model/test_directory_model.cpp | 8 +- tests/test_csrc/model/test_model.cpp | 5 +- tests/test_csrc/model/test_zip_model.cpp | 12 +- tests/test_csrc/net/test_ncnn_net.cpp | 2 +- tests/test_csrc/net/test_openvino_net.cpp | 2 +- tests/test_csrc/net/test_ort_net.cpp | 2 +- tests/test_csrc/net/test_ppl_net.cpp | 2 +- tests/test_csrc/net/test_trt_net.cpp | 2 +- tests/test_csrc/preprocess/test_compose.cpp | 2 +- tests/test_csrc/preprocess/test_crop.cpp | 2 +- .../preprocess/test_image2tensor.cpp | 2 +- tests/test_csrc/preprocess/test_load.cpp | 2 +- tests/test_csrc/preprocess/test_normalize.cpp | 2 +- tests/test_csrc/preprocess/test_pad.cpp | 2 +- tests/test_csrc/preprocess/test_resize.cpp | 2 +- tests/test_csrc/test_resource.h | 43 +-- 209 files changed, 2208 insertions(+), 1572 deletions(-) create mode 100644 cmake/MMDeploy.cmake delete mode 100644 cmake/common.cmake create mode 100644 cmake/loader.cpp.in create mode 100644 csrc/core/registry.cpp create mode 100644 csrc/core/utils/filesystem.h create mode 100644 csrc/utils/CMakeLists.txt create mode 100644 csrc/utils/opencv/CMakeLists.txt rename csrc/{preprocess/cpu => utils/opencv}/opencv_utils.cpp (91%) rename csrc/{preprocess/cpu => utils/opencv}/opencv_utils.h (69%) create mode 100644 docs/en/build/linux.md create mode 100644 docs/en/build/windows.md create mode 100644 docs/zh_cn/build/linux.md create mode 100644 docs/zh_cn/build/windows.md delete mode 100644 tests/test_csrc/core/test_token.cpp diff --git a/.gitignore b/.gitignore index 09a9677680..e5d951cc20 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,7 @@ work_dirs/ # the generated header files /tests/test_csrc/test_define.h + +# +!docs/zh_cn/build +!docs/en/build diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f8f8e4672..3ae98d745b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,7 +11,11 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +if (MSVC) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +else () + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) +endif () set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) # options @@ -39,12 +43,20 @@ endif () # notice that ubsan has linker issues for ubuntu < 18.04, see # https://stackoverflow.com/questions/50024731/ld-unrecognized-option-push-state-no-as-needed if (MMDEPLOY_UBSAN_ENABLE) - add_compile_options($<$:-fsanitize=undefined>) - add_link_options(-fsanitize=undefined) + add_compile_options($<$:-fsanitize=undefined>) + add_link_options(-fsanitize=undefined) +endif () + +if (MSVC) + add_compile_options($<$:/diagnostics:classic>) + add_compile_options($<$:/Zc:preprocessor>) # /experimental:preprocessor on VS2017 + add_compile_options($<$:/wd4251>) +else () + add_compile_options($<$:-fvisibility=hidden>) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -# set INTERFACE target to gather linked modules +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) + add_library(MMDeployStaticModules INTERFACE) add_library(MMDeployDynamicModules INTERFACE) add_library(MMDeployLibs INTERFACE) @@ -52,25 +64,10 @@ add_library(MMDeployLibs INTERFACE) add_subdirectory(csrc) if (MMDEPLOY_BUILD_SDK) - # get static modules and dynamic modules from ${MMDeployStaticModules} and ${MMDeployDynamicModules}, respectively - set(STATIC_MODULES) - get_target_property(STATIC_MODULES MMDeployStaticModules INTERFACE_LINK_LIBRARIES) - get_target_list("${STATIC_MODULES}" FILTERED_MODULES) - set(MMDEPLOY_STATIC_MODULES "${FILTERED_MODULES}" CACHE STRING "MMDeploy's static modules") - message(STATUS "MMDEPLOY_STATIC_MODULES: ${MMDEPLOY_STATIC_MODULES}") - - set(DYNAMIC_MODULES) - get_target_property(DYNAMIC_MODULES MMDeployDynamicModules INTERFACE_LINK_LIBRARIES) - get_target_list("${DYNAMIC_MODULES}" FILTERED_MODULES) - set(MMDEPLOY_DYNAMIC_MODULES "${FILTERED_MODULES}" CACHE STRING "MMDeploy's dynamic modules") - message(STATUS "MMDEPLOY_DYNAMIC_MODULES: ${MMDEPLOY_DYNAMIC_MODULES}") - - # get libs from ${MMDeployLibs} - set(LIBS) - get_target_property(LIBS MMDeployLibs INTERFACE_LINK_LIBRARIES) - get_target_list("${LIBS}" FILTERED_LIBS) - set(MMDEPLOY_LIBS "${FILTERED_LIBS}" CACHE STRING "MMDeploy's libs that can be linked directly by application") - message(STATUS "MMDEPLOY_LIBS: ${MMDEPLOY_LIBS}") + install(TARGETS MMDeployStaticModules + MMDeployDynamicModules + MMDeployLibs + EXPORT MMDeployTargets) if (MMDEPLOY_BUILD_TEST) add_subdirectory(tests/test_csrc) @@ -78,13 +75,11 @@ if (MMDEPLOY_BUILD_SDK) if (MMDEPLOY_BUILD_SDK_PYTHON_API) add_subdirectory(csrc/apis/python) - endif() + endif () # export MMDeploy package install(EXPORT MMDeployTargets - # NAMESPACE mmdeploy:: FILE MMDeployTargets.cmake - #EXPORT_LINK_INTERFACE_LIBRARIES DESTINATION lib/cmake/MMDeploy) include(CMakePackageConfigHelpers) @@ -105,6 +100,8 @@ if (MMDEPLOY_BUILD_SDK) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/MMDeployConfigVersion.cmake + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/MMDeploy.cmake + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/loader.cpp.in DESTINATION lib/cmake/MMDeploy ) diff --git a/cmake/MMDeploy.cmake b/cmake/MMDeploy.cmake new file mode 100644 index 0000000000..086b45681f --- /dev/null +++ b/cmake/MMDeploy.cmake @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +function (mmdeploy_export NAME) + set(_LIB_DIR lib) + if (MSVC) + set(_LIB_DIR bin) + endif () + install(TARGETS ${NAME} + EXPORT MMDeployTargets + ARCHIVE DESTINATION lib + LIBRARY DESTINATION ${_LIB_DIR} + RUNTIME DESTINATION bin) +endfunction () + + +function (mmdeploy_add_library NAME) + cmake_parse_arguments(_MMDEPLOY "EXCLUDE" "" "" ${ARGN}) + add_library(${NAME} ${_MMDEPLOY_UNPARSED_ARGUMENTS}) + target_compile_definitions(${NAME} PRIVATE -DMMDEPLOY_API_EXPORTS=1) + get_target_property(_TYPE ${NAME} TYPE) + if (_TYPE STREQUAL STATIC_LIBRARY) + set_target_properties(${NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) + elseif (_TYPE STREQUAL SHARED_LIBRARY) + else () + message(FATAL_ERROR "unsupported type: ${_TYPE}") + endif () + if (NOT _MMDEPLOY_EXCLUDE) + target_link_libraries(MMDeployLibs INTERFACE ${NAME}) + mmdeploy_export(${NAME}) + endif () +endfunction () + + +function (mmdeploy_add_module NAME) + # EXCLUDE: exclude from registering & exporting as SDK module + # LIBRARY: the module is also a library (add_libray with SHARED instead of MODULE) + cmake_parse_arguments(_MMDEPLOY "EXCLUDE;LIBRARY" "" "" ${ARGN}) + # search for add_library keywords + cmake_parse_arguments(_KW "STATIC;SHARED;MODULE" "" "" ${_MMDEPLOY_UNPARSED_ARGUMENTS}) + + set(_MAYBE_MODULE) + # no library type specified + if (NOT (_KW_STATIC OR _KW_SHARED OR _KW_MODULE)) + # shared but not marked as a library, build module library so that no .lib dependency + # will be generated for MSVC + if (MSVC AND BUILD_SHARED_LIBS AND NOT _MMDEPLOY_LIBRARY) + set(_MAYBE_MODULE MODULE) + endif () + endif () + + add_library(${NAME} ${_MAYBE_MODULE} ${_MMDEPLOY_UNPARSED_ARGUMENTS}) + + # automatically link mmdeploy::core if exists + if (TARGET mmdeploy::core) + target_link_libraries(${NAME} PRIVATE mmdeploy::core) + endif () + + # export public symbols when marked as a library + if (_MMDEPLOY_LIBRARY) + target_compile_definitions(${NAME} PRIVATE -DMMDEPLOY_API_EXPORTS=1) + endif () + + get_target_property(_TYPE ${NAME} TYPE) + if (_TYPE STREQUAL STATIC_LIBRARY) + set_target_properties(${NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) + if (MSVC) + target_link_options(${NAME} INTERFACE "/WHOLEARCHIVE:${NAME}") + endif () + # register static modules + if (NOT _MMDEPLOY_EXCLUDE) + target_link_libraries(MMDeployStaticModules INTERFACE ${NAME}) + endif () + elseif (_TYPE STREQUAL SHARED_LIBRARY OR _TYPE STREQUAL MODULE_LIBRARY) + # register dynamic modules + if (NOT _MMDEPLOY_EXCLUDE) + target_link_libraries(MMDeployDynamicModules INTERFACE ${NAME}) + endif () + else () + message(FATAL_ERROR "unsupported type: ${_TYPE}") + endif () + if (NOT _MMDEPLOY_EXCLUDE) + mmdeploy_export(${NAME}) + endif () +endfunction () + + +function (_mmdeploy_flatten_modules RETVAL) + set(_RETVAL) + foreach (ARG IN LISTS ARGN) + get_target_property(TYPE ${ARG} TYPE) + if (TYPE STREQUAL "INTERFACE_LIBRARY") + get_target_property(LIBS ${ARG} INTERFACE_LINK_LIBRARIES) + if (LIBS) + # pattern for 3.17+ + list(FILTER LIBS EXCLUDE REGEX "^::@") + # pattern for 3.13-3.16 + list(TRANSFORM LIBS REPLACE "(.+)::@.*" "\\1") + list(APPEND _RETVAL ${LIBS}) + endif () + else () + list(APPEND _RETVAL ${ARG}) + endif () + endforeach () + set(${RETVAL} ${_RETVAL} PARENT_SCOPE) +endfunction () + + +function (mmdeploy_load_static NAME) + if (MSVC) + target_link_libraries(${NAME} PRIVATE ${ARGN}) + else () + _mmdeploy_flatten_modules(_MODULE_LIST ${ARGN}) + target_link_libraries(${NAME} PRIVATE + -Wl,--whole-archive + ${_MODULE_LIST} + -Wl,--no-whole-archive) + endif () +endfunction () + +function (mmdeploy_load_dynamic NAME) + _mmdeploy_flatten_modules(_MODULE_LIST ${ARGN}) + if (MSVC) + if (NOT _MODULE_LIST) + return () + endif () + # MSVC has nothing like "-Wl,--no-as-needed ... -Wl,--as-needed", as a + # workaround we build a static module which loads the dynamic modules + set(_MODULE_STR ${_MODULE_LIST}) + list(TRANSFORM _MODULE_STR REPLACE "(.+)" "\"\\1\"") + string(JOIN ",\n " _MODULE_STR ${_MODULE_STR}) + set(_MMDEPLOY_DYNAMIC_MODULES ${_MODULE_STR}) + + set(_LOADER_NAME ${NAME}_loader) + + add_dependencies(${NAME} ${_MODULE_LIST}) + + set(_LOADER_PATH ${CMAKE_BINARY_DIR}/${_LOADER_NAME}.cpp) + # ! CMAKE_CURRENT_FUNCTION_LIST_DIR requires cmake 3.17+ + configure_file( + ${CMAKE_CURRENT_FUNCTION_LIST_DIR}/loader.cpp.in + ${_LOADER_PATH}) + + mmdeploy_add_module(${_LOADER_NAME} STATIC EXCLUDE ${_LOADER_PATH}) + mmdeploy_load_static(${NAME} ${_LOADER_NAME}) + else () + target_link_libraries(${NAME} PRIVATE + -Wl,--no-as-needed + ${_MODULE_LIST} + -Wl,--as-needed) + endif () +endfunction () diff --git a/cmake/MMDeployConfig.cmake.in b/cmake/MMDeployConfig.cmake.in index 7a23a9e153..4bd05489e4 100644 --- a/cmake/MMDeployConfig.cmake.in +++ b/cmake/MMDeployConfig.cmake.in @@ -2,23 +2,26 @@ cmake_minimum_required(VERSION 3.14) -include ("${CMAKE_CURRENT_LIST_DIR}/MMDeployTargets.cmake") +include("${CMAKE_CURRENT_LIST_DIR}/MMDeployTargets.cmake") set(MMDEPLOY_CODEBASES @MMDEPLOY_CODEBASES@) set(MMDEPLOY_TARGET_DEVICES @MMDEPLOY_TARGET_DEVICES@) set(MMDEPLOY_TARGET_BACKENDS @MMDEPLOY_TARGET_BACKENDS@) set(MMDEPLOY_BUILD_TYPE @CMAKE_BUILD_TYPE@) -set(MMDEPLOY_STATIC_MODULES @MMDEPLOY_STATIC_MODULES@) -set(MMDEPLOY_DYNAMIC_MODULES @MMDEPLOY_DYNAMIC_MODULES@) set(MMDEPLOY_BUILD_SHARED @BUILD_SHARED_LIBS@) -set(MMDEPLOY_LIBS @MMDEPLOY_LIBS@) if (NOT MMDEPLOY_BUILD_SHARED) if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) + set(CMAKE_CUDA_RUNTIME_LIBRARY Shared) + enable_language(CUDA) find_package(pplcv REQUIRED) endif () endif () -set(MMDeploy_LIBS ${MMDEPLOY_LIBS} - -Wl,--no-as-needed ${MMDEPLOY_DYNAMIC_MODULES} -Wl,--as-needed - -Wl,--whole-archive ${MMDEPLOY_STATIC_MODULES} -Wl,--no-whole-archive) +find_package(spdlog REQUIRED) +find_package(OpenCV REQUIRED) + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) + +include("${CMAKE_CURRENT_LIST_DIR}/MMDeploy.cmake") diff --git a/cmake/common.cmake b/cmake/common.cmake deleted file mode 100644 index fae162bfe2..0000000000 --- a/cmake/common.cmake +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -function(set_targets PROJECT_NAME OBJ_TARGET STATIC_TARGET SHARED_TARGET) - set(${OBJ_TARGET} ${PROJECT_NAME}_obj PARENT_SCOPE) - set(${STATIC_TARGET} ${PROJECT_NAME}_static PARENT_SCOPE) - set(${SHARED_TARGET} ${PROJECT_NAME} PARENT_SCOPE) -endfunction() - -function(install_targets TARGET_NAMES) - foreach (TARGET_NAME ${TARGET_NAMES}) - install(TARGETS ${TARGET_NAME} - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - RUNTIME DESTINATION bin - ) - endforeach () -endfunction() - -function(build_target TARGET_NAME TARGET_SRCS) - add_library(${TARGET_NAME} ${TARGET_SRCS}) - set_target_properties(${TARGET_NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) -endfunction() - -# When the object target ${TARGET_NAME} has more than one source file, -# "${SRCS_VARIABLE}" MUST be passed to ${TARGET_SRCS}. The quotation marks CANNOT be dismissed. -function(build_object_target TARGET_NAME TARGET_SRCS) - add_library(${TARGET_NAME} OBJECT) - target_sources(${TARGET_NAME} PRIVATE ${TARGET_SRCS}) - set_target_properties(${TARGET_NAME} PROPERTIES POSITION_INDEPENDENT_CODE 1) -endfunction() - -function(build_static_target TARGET_NAME OBJECT_TARGET LINK_TYPE) - add_library(${TARGET_NAME} STATIC $) - if (${LINK_TYPE} STREQUAL "PRIVATE") - target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "PUBLIC") - target_link_libraries(${TARGET_NAME} PUBLIC ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "INTERFACE") - target_link_libraries(${TARGET_NAME} INTERFACE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "") - target_link_libraries(${TARGET_NAME} ${OBJECT_TARGET}) - else () - message(FATAL_ERROR "Incorrect link type: ${LINK_TYPE}") - endif () -endfunction() - -function(build_shared_target TARGET_NAME OBJECT_TARGET LINK_TYPE) - add_library(${TARGET_NAME} SHARED $) - if (${LINK_TYPE} STREQUAL "PRIVATE") - target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "PUBLIC") - target_link_libraries(${TARGET_NAME} PUBLIC ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "INTERFACE") - target_link_libraries(${TARGET_NAME} INTERFACE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "") - target_link_libraries(${TARGET_NAME} ${OBJECT_TARGET}) - else () - message(FATAL_ERROR "Incorrect link type: ${LINK_TYPE}") - endif () -endfunction() - -function(build_module_target TARGET_NAME OBJECT_TARGET LINK_TYPE) - add_library(${TARGET_NAME} MODULE $) - if (${LINK_TYPE} STREQUAL "PRIVATE") - target_link_libraries(${TARGET_NAME} PRIVATE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "PUBLIC") - target_link_libraries(${TARGET_NAME} PUBLIC ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "INTERFACE") - target_link_libraries(${TARGET_NAME} INTERFACE ${OBJECT_TARGET}) - elseif (${LINK_TYPE} STREQUAL "") - target_link_libraries(${TARGET_NAME} ${OBJECT_TARGET}) - else () - message(FATAL_ERROR "Incorrect link type: ${LINK_TYPE}") - endif () -endfunction() - - -function(export_target TARGET_NAME) - target_link_libraries(MMDeployLibs INTERFACE ${TARGET_NAME}) - install(TARGETS ${TARGET_NAME} - EXPORT MMDeployTargets - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - ) -endfunction() - -function(export_module TARGET_NAME) - get_target_property(TARGET_TYPE ${TARGET_NAME} TYPE) - if (${TARGET_TYPE} STREQUAL "STATIC_LIBRARY") - target_link_libraries(MMDeployStaticModules INTERFACE ${TARGET_NAME}) - elseif (${TARGET_TYPE} STREQUAL "SHARED_LIBRARY") - target_link_libraries(MMDeployDynamicModules INTERFACE ${TARGET_NAME}) - endif () - install(TARGETS ${TARGET_NAME} - EXPORT MMDeployTargets - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - ) -endfunction() - -function(get_target_list INPUT_TARGETS OUTPUT_TARGETS) - set(FILTERED_TARGETS) - foreach (INPUT_TARGET IN LISTS INPUT_TARGETS) - if (TARGET ${INPUT_TARGET}) - list(APPEND FILTERED_TARGETS ${INPUT_TARGET}) - endif() - endforeach () - set(${OUTPUT_TARGETS} "${FILTERED_TARGETS}" PARENT_SCOPE) -endfunction() diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index 158e542e16..9fe42596c4 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -23,35 +23,41 @@ else () set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER}) set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=-fPIC,-Wall,-fvisibility=hidden") - set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=-fno-gnu-unique") + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU") + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler=-fno-gnu-unique") + endif () endif () enable_language(CUDA) # set virtual compute architecture and real ones set(_NVCC_FLAGS) -set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52") -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "8") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_60,code=sm_60") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_61,code=sm_61") -endif () -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "9") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_70,code=sm_70") -endif () -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "10") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_72,code=sm_72") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75") -endif () -if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "11") - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80") - if (CUDA_VERSION_MINOR VERSION_GREATER_EQUAL "1") - # cuda doesn't support `sm_86` until version 11.1 - set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_86,code=sm_86") +if (NOT CMAKE_CUDA_ARCHITECTURES) + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_52,code=sm_52") + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "8") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_60,code=sm_60") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_61,code=sm_61") + endif () + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "9") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_70,code=sm_70") + endif () + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "10") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_72,code=sm_72") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_75,code=sm_75") + endif () + if (CUDA_VERSION_MAJOR VERSION_GREATER_EQUAL "11") + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_80,code=sm_80") + if (CUDA_VERSION_MINOR VERSION_GREATER_EQUAL "1") + # cuda doesn't support `sm_86` until version 11.1 + set(_NVCC_FLAGS "${_NVCC_FLAGS} -gencode arch=compute_86,code=sm_86") + endif () endif () endif () set(CUDA_NVCC_FLAGS_DEBUG "-g -O0") set(CUDA_NVCC_FLAGS_RELEASE "-O3") set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") -set(CMAKE_CUDA_STANDARD 14) +if (NOT MSVC) + set(CMAKE_CUDA_STANDARD 14) +endif () set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${CUDA_NVCC_FLAGS} ${_NVCC_FLAGS}") diff --git a/cmake/loader.cpp.in b/cmake/loader.cpp.in new file mode 100644 index 0000000000..6627d6e2e7 --- /dev/null +++ b/cmake/loader.cpp.in @@ -0,0 +1,39 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include + +#include + +namespace mmdeploy { +namespace { + +void* mmdeploy_load_library(const char* name) { + fprintf(stderr, "loading %s ...\n", name); + auto handle = LoadLibraryA(name); + if (!handle) { + fprintf(stderr, "failed to load library %s\n", name); + return nullptr; + } + return handle; +} + +// clang-format off + +class Loader { + public: + Loader() { + const char* modules[] = { + @_MMDEPLOY_DYNAMIC_MODULES@ + }; + for (const auto name : modules) { + mmdeploy_load_library(name); + } + } +}; + +// clang-format on + +static Loader loader; + +} // namespace +} // namespace mmdeploy diff --git a/csrc/CMakeLists.txt b/csrc/CMakeLists.txt index 889e54bb5d..b14c81c136 100644 --- a/csrc/CMakeLists.txt +++ b/csrc/CMakeLists.txt @@ -4,6 +4,7 @@ add_subdirectory(backend_ops) if (MMDEPLOY_BUILD_SDK) add_subdirectory(core) + add_subdirectory(utils) add_subdirectory(archive) add_subdirectory(device) add_subdirectory(graph) diff --git a/csrc/apis/c/CMakeLists.txt b/csrc/apis/c/CMakeLists.txt index 81da0a3fb1..f1809995bb 100644 --- a/csrc/apis/c/CMakeLists.txt +++ b/csrc/apis/c/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(capis) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) if ("all" IN_LIST MMDEPLOY_CODEBASES) set(TASK_LIST "classifier;detector;segmentor;text_detector;text_recognizer;restorer;model") @@ -28,16 +28,13 @@ endif () foreach (TASK ${TASK_LIST}) set(TARGET_NAME mmdeploy_${TASK}) - build_target(${TARGET_NAME} ${TASK}.cpp) + mmdeploy_add_library(${TARGET_NAME} ${TASK}.cpp) target_link_libraries(${TARGET_NAME} PRIVATE mmdeploy::core) target_include_directories(${TARGET_NAME} PUBLIC $ $) - export_target(${TARGET_NAME}) - install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${TASK}.h DESTINATION include/c) - endforeach () install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/common.h diff --git a/csrc/apis/c/classifier.cpp b/csrc/apis/c/classifier.cpp index 9236f5eae0..ecdfaafc87 100644 --- a/csrc/apis/c/classifier.cpp +++ b/csrc/apis/c/classifier.cpp @@ -55,28 +55,28 @@ int mmdeploy_classifier_create_impl(ModelType&& m, const char* device_name, int return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_classifier_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle) { return mmdeploy_classifier_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_class_t** results, int** result_count) { +int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_class_t** results, int** result_count) { if (handle == nullptr || mats == nullptr || mat_count == 0) { return MM_E_INVALID_ARG; } @@ -92,7 +92,7 @@ MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mat } auto output = classifier->Run(std::move(input)).value().front(); - DEBUG("output: {}", output); + MMDEPLOY_DEBUG("output: {}", output); auto classify_outputs = from_value>(output); @@ -124,20 +124,19 @@ MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mat return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, - int count) { +void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, int count) { delete[] results; delete[] result_count; } -MM_SDK_API void mmdeploy_classifier_destroy(mm_handle_t handle) { +void mmdeploy_classifier_destroy(mm_handle_t handle) { if (handle != nullptr) { auto classifier = static_cast(handle); delete classifier; diff --git a/csrc/apis/c/classifier.h b/csrc/apis/c/classifier.h index 6834b8e401..a2209792ba 100644 --- a/csrc/apis/c/classifier.h +++ b/csrc/apis/c/classifier.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_class_t { int label_id; float score; @@ -25,8 +29,8 @@ typedef struct mm_class_t { * by \ref mmdeploy_classifier_destroy * @return status of creating classifier's handle */ -MM_SDK_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_classifier_create(mm_model_t model, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Create classifier's handle @@ -37,8 +41,8 @@ MM_SDK_API int mmdeploy_classifier_create(mm_model_t model, const char* device_n * by \ref mmdeploy_classifier_destroy * @return status of creating classifier's handle */ -MM_SDK_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_classifier_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Use classifier created by \ref mmdeploy_classifier_create_by_path to get label @@ -53,8 +57,8 @@ MM_SDK_API int mmdeploy_classifier_create_by_path(const char* model_path, const * mmdeploy_classifier_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_class_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_class_t** results, int** result_count); /** * @brief Release the inference result buffer created \ref mmdeploy_classifier_apply @@ -62,13 +66,17 @@ MM_SDK_API int mmdeploy_classifier_apply(mm_handle_t handle, const mm_mat_t* mat * @param[in] result_count \p results size buffer * @param[in] count length of \p result_count */ -MM_SDK_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, - int count); +MMDEPLOY_API void mmdeploy_classifier_release_result(mm_class_t* results, const int* result_count, + int count); /** * @brief Destroy classifier's handle * @param[in] handle classifier's handle created by \ref mmdeploy_classifier_create_by_path */ -MM_SDK_API void mmdeploy_classifier_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_classifier_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_CLASSIFIER_H diff --git a/csrc/apis/c/common.h b/csrc/apis/c/common.h index 1809f77727..dc82d44292 100644 --- a/csrc/apis/c/common.h +++ b/csrc/apis/c/common.h @@ -3,9 +3,23 @@ #ifndef MMDEPLOY_COMMON_H #define MMDEPLOY_COMMON_H -#include +#include -#define MM_SDK_API +#ifndef MMDEPLOY_EXPORT +#ifdef _MSC_VER +#define MMDEPLOY_EXPORT __declspec(dllexport) +#else +#define MMDEPLOY_EXPORT __attribute__((visibility("default"))) +#endif +#endif + +#ifndef MMDEPLOY_API +#ifdef MMDEPLOY_API_EXPORTS +#define MMDEPLOY_API MMDEPLOY_EXPORT +#else +#define MMDEPLOY_API +#endif +#endif // clang-format off diff --git a/csrc/apis/c/detector.cpp b/csrc/apis/c/detector.cpp index 190b8bf7d5..4dbb573f96 100644 --- a/csrc/apis/c/detector.cpp +++ b/csrc/apis/c/detector.cpp @@ -55,27 +55,27 @@ int mmdeploy_detector_create_impl(ModelType&& m, const char* device_name, int de return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_detector_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_detector_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_detect_t** results, int** result_count) { +int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_detect_t** results, int** result_count) { if (handle == nullptr || mats == nullptr || mat_count == 0) { return MM_E_INVALID_ARG; } @@ -91,7 +91,7 @@ MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, } auto output = detector->Run(std::move(input)).value().front(); - DEBUG("output: {}", output); + MMDEPLOY_DEBUG("output: {}", output); auto detector_outputs = from_value>(output); @@ -142,15 +142,14 @@ MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, - int count) { +void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, int count) { auto result_ptr = results; for (int i = 0; i < count; ++i) { for (int j = 0; j < result_count[i]; ++j, ++result_ptr) { @@ -164,7 +163,7 @@ MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int delete[] result_count; } -MM_SDK_API void mmdeploy_detector_destroy(mm_handle_t handle) { +void mmdeploy_detector_destroy(mm_handle_t handle) { if (handle != nullptr) { auto detector = static_cast(handle); delete detector; diff --git a/csrc/apis/c/detector.h b/csrc/apis/c/detector.h index 59689dd0de..bfcf0a8acb 100644 --- a/csrc/apis/c/detector.h +++ b/csrc/apis/c/detector.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_instance_mask_t { char* data; int height; @@ -32,8 +36,8 @@ typedef struct mm_detect_t { * @param[out] handle instance of a detector * @return status of creating detector's handle */ -MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_detector_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Create detector's handle @@ -43,8 +47,8 @@ MM_SDK_API int mmdeploy_detector_create(mm_model_t model, const char* device_nam * @param[out] handle instance of a detector * @return status of creating detector's handle */ -MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_detector_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Apply detector to batch images and get their inference results @@ -58,21 +62,25 @@ MM_SDK_API int mmdeploy_detector_create_by_path(const char* model_path, const ch * mmdeploy_detector_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_detect_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_detect_t** results, int** result_count); /** @brief Release the inference result buffer created by \ref mmdeploy_detector_apply * @param[in] results detection results buffer * @param[in] result_count \p results size buffer * @param[in] count length of \p result_count */ -MM_SDK_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, - int count); +MMDEPLOY_API void mmdeploy_detector_release_result(mm_detect_t* results, const int* result_count, + int count); /** * @brief Destroy detector's handle * @param[in] handle detector's handle created by \ref mmdeploy_detector_create_by_path */ -MM_SDK_API void mmdeploy_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_detector_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_DETECTOR_H diff --git a/csrc/apis/c/handle.h b/csrc/apis/c/handle.h index 2af9c0da58..4223452995 100644 --- a/csrc/apis/c/handle.h +++ b/csrc/apis/c/handle.h @@ -20,12 +20,12 @@ class Handle { config["context"].update({{"device", device_}, {"stream", stream_}}); auto creator = Registry::Get().GetCreator("Pipeline"); if (!creator) { - ERROR("failed to find Pipeline creator"); + MMDEPLOY_ERROR("failed to find Pipeline creator"); throw_exception(eEntryNotFound); } pipeline_ = creator->Create(config); if (!pipeline_) { - ERROR("create pipeline failed"); + MMDEPLOY_ERROR("create pipeline failed"); throw_exception(eFail); } pipeline_->Build(graph_); diff --git a/csrc/apis/c/model.cpp b/csrc/apis/c/model.cpp index 9834071c70..5101b92a47 100644 --- a/csrc/apis/c/model.cpp +++ b/csrc/apis/c/model.cpp @@ -1,11 +1,13 @@ // Copyright (c) OpenMMLab. All rights reserved. -#include "core/model.h" +// clang-format off +#include "model.h" #include #include "core/logger.h" -#include "model.h" +#include "core/model.h" +// clang-format on using namespace mmdeploy; @@ -15,9 +17,9 @@ int mmdeploy_model_create_by_path(const char *path, mm_model_t *model) { *model = ptr.release(); return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("failed to create model: {}", e.what()); + MMDEPLOY_ERROR("failed to create model: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } @@ -28,9 +30,9 @@ int mmdeploy_model_create(const void *buffer, int size, mm_model_t *model) { *model = ptr.release(); return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("failed to create model: {}", e.what()); + MMDEPLOY_ERROR("failed to create model: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } diff --git a/csrc/apis/c/model.h b/csrc/apis/c/model.h index 731bb0270b..6151ba43a5 100644 --- a/csrc/apis/c/model.h +++ b/csrc/apis/c/model.h @@ -10,13 +10,17 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + /** * @brief Create SDK Model instance from given model path * @param[in] path model path * @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model); +MMDEPLOY_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model); /** * @brief Create SDK Model instance from memory @@ -25,13 +29,17 @@ MM_SDK_API int mmdeploy_model_create_by_path(const char* path, mm_model_t* model * @param[out] model sdk model instance that must be destroyed by \ref mmdeploy_model_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_model_create(const void* buffer, int size, mm_model_t* model); +MMDEPLOY_API int mmdeploy_model_create(const void* buffer, int size, mm_model_t* model); /** * @brief Destroy model instance * @param[in] model sdk model instance created by \ref mmdeploy_model_create_by_path or \ref * mmdeploy_model_create */ -MM_SDK_API void mmdeploy_model_destroy(mm_model_t model); +MMDEPLOY_API void mmdeploy_model_destroy(mm_model_t model); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SRC_APIS_C_MODEL_H_ diff --git a/csrc/apis/c/restorer.cpp b/csrc/apis/c/restorer.cpp index 0e12fa02c6..95e3679584 100644 --- a/csrc/apis/c/restorer.cpp +++ b/csrc/apis/c/restorer.cpp @@ -51,9 +51,9 @@ int mmdeploy_restorer_create_impl(ModelType &&m, const char *device_name, int de return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } @@ -105,9 +105,9 @@ int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t *images, int coun *results = _results.release(); return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } diff --git a/csrc/apis/c/restorer.h b/csrc/apis/c/restorer.h index 5d568cd418..4ae491a236 100644 --- a/csrc/apis/c/restorer.h +++ b/csrc/apis/c/restorer.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + /** * @brief Create a restorer instance * @param[in] model an instance of image restoration model created by @@ -20,8 +24,8 @@ * by \ref mmdeploy_restorer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_restorer_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_restorer_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Create a restorer instance @@ -32,8 +36,8 @@ MM_SDK_API int mmdeploy_restorer_create(mm_model_t model, const char* device_nam * by \ref mmdeploy_restorer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_restorer_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_restorer_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Apply restorer to a batch of images @@ -44,19 +48,23 @@ MM_SDK_API int mmdeploy_restorer_create_by_path(const char* model_path, const ch * by \ref mmdeploy_restorer_release_result * @return status code of the operation */ -MM_SDK_API int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_mat_t** results); +MMDEPLOY_API int mmdeploy_restorer_apply(mm_handle_t handle, const mm_mat_t* images, int count, + mm_mat_t** results); /** @brief Release result buffer returned by \ref mmdeploy_restorer_apply * @param[in] results result buffer by restorer * @param[in] count length of \p result */ -MM_SDK_API void mmdeploy_restorer_release_result(mm_mat_t* results, int count); +MMDEPLOY_API void mmdeploy_restorer_release_result(mm_mat_t* results, int count); /** * @brief destroy restorer * @param[in] handle handle of restorer created by \ref mmdeploy_restorer_create_by_path */ -MM_SDK_API void mmdeploy_restorer_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_restorer_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SRC_APIS_C_RESTORER_H_ diff --git a/csrc/apis/c/segmentor.cpp b/csrc/apis/c/segmentor.cpp index 2c578de321..bcdca722a7 100644 --- a/csrc/apis/c/segmentor.cpp +++ b/csrc/apis/c/segmentor.cpp @@ -53,28 +53,28 @@ int mmdeploy_segmentor_create_impl(ModelType&& m, const char* device_name, int d return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_segmentor_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle) { return mmdeploy_segmentor_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_segment_t** results) { +int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_segment_t** results) { if (handle == nullptr || mats == nullptr || mat_count == 0 || results == nullptr) { return MM_E_INVALID_ARG; } @@ -97,7 +97,7 @@ MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats auto results_ptr = _results.get(); for (auto i = 0; i < mat_count; ++i, ++results_ptr) { auto& output_item = output[i]; - DEBUG("the {}-th item in output: {}", i, output_item); + MMDEPLOY_DEBUG("the {}-th item in output: {}", i, output_item); auto segmentor_output = from_value(output_item); results_ptr->height = segmentor_output.height; results_ptr->width = segmentor_output.width; @@ -110,14 +110,14 @@ MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count) { +void mmdeploy_segmentor_release_result(mm_segment_t* results, int count) { if (results == nullptr) { return; } @@ -128,7 +128,7 @@ MM_SDK_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int cou delete[] results; } -MM_SDK_API void mmdeploy_segmentor_destroy(mm_handle_t handle) { +void mmdeploy_segmentor_destroy(mm_handle_t handle) { if (handle != nullptr) { auto segmentor = static_cast(handle); delete segmentor; diff --git a/csrc/apis/c/segmentor.h b/csrc/apis/c/segmentor.h index 4abcd3cf68..741fbd9633 100644 --- a/csrc/apis/c/segmentor.h +++ b/csrc/apis/c/segmentor.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_segment_t { int height; ///< height of \p mask that equals to the input image's height int width; ///< width of \p mask that equals to the input image's width @@ -28,8 +32,8 @@ typedef struct mm_segment_t { * by \ref mmdeploy_segmentor_destroy * @return status of creating segmentor's handle */ -MM_SDK_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Create segmentor's handle @@ -40,8 +44,8 @@ MM_SDK_API int mmdeploy_segmentor_create(mm_model_t model, const char* device_na * by \ref mmdeploy_segmentor_destroy * @return status of creating segmentor's handle */ -MM_SDK_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_segmentor_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Apply segmentor to batch images and get their inference results @@ -53,19 +57,23 @@ MM_SDK_API int mmdeploy_segmentor_create_by_path(const char* model_path, const c * image. It must be released by \ref mmdeploy_segmentor_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_segment_t** results); +MMDEPLOY_API int mmdeploy_segmentor_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_segment_t** results); /** @brief Release result buffer returned by \ref mmdeploy_segmentor_apply * @param[in] results result buffer * @param[in] count length of \p results */ -MM_SDK_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count); +MMDEPLOY_API void mmdeploy_segmentor_release_result(mm_segment_t* results, int count); /** * @brief Destroy segmentor's handle * @param[in] handle segmentor's handle created by \ref mmdeploy_segmentor_create_by_path */ -MM_SDK_API void mmdeploy_segmentor_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_segmentor_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SEGMENTOR_H diff --git a/csrc/apis/c/text_detector.cpp b/csrc/apis/c/text_detector.cpp index 1f0479f30d..6369044bfd 100644 --- a/csrc/apis/c/text_detector.cpp +++ b/csrc/apis/c/text_detector.cpp @@ -53,29 +53,28 @@ int mmdeploy_text_detector_create_impl(ModelType&& m, const char* device_name, i return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } } // namespace -MM_SDK_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle) { +int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, int device_id, + mm_handle_t* handle) { return mmdeploy_text_detector_create_impl(*static_cast(model), device_name, device_id, handle); } -MM_SDK_API int mmdeploy_text_detector_create_by_path(const char* model_path, - const char* device_name, int device_id, - mm_handle_t* handle) { +int mmdeploy_text_detector_create_by_path(const char* model_path, const char* device_name, + int device_id, mm_handle_t* handle) { return mmdeploy_text_detector_create_impl(model_path, device_name, device_id, handle); } -MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_text_detect_t** results, int** result_count) { +int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, + mm_text_detect_t** results, int** result_count) { if (handle == nullptr || mats == nullptr || mat_count == 0) { return MM_E_INVALID_ARG; } @@ -91,7 +90,7 @@ MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* } auto output = text_detector->Run(std::move(input)).value().front(); - DEBUG("output: {}", output); + MMDEPLOY_DEBUG("output: {}", output); auto detector_outputs = from_value>(output); vector _result_count; @@ -125,20 +124,20 @@ MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* return MM_SUCCESS; } catch (const std::exception& e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } -MM_SDK_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, - const int* result_count, int count) { +void mmdeploy_text_detector_release_result(mm_text_detect_t* results, const int* result_count, + int count) { delete[] results; delete[] result_count; } -MM_SDK_API void mmdeploy_text_detector_destroy(mm_handle_t handle) { +void mmdeploy_text_detector_destroy(mm_handle_t handle) { if (handle != nullptr) { auto text_detector = static_cast(handle); delete text_detector; diff --git a/csrc/apis/c/text_detector.h b/csrc/apis/c/text_detector.h index 06cb78558c..0ca39b9003 100644 --- a/csrc/apis/c/text_detector.h +++ b/csrc/apis/c/text_detector.h @@ -10,6 +10,10 @@ #include "common.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_text_detect_t { mm_pointf_t bbox[4]; ///< a text bounding box of which the vertex are in clock-wise float score; @@ -25,8 +29,8 @@ typedef struct mm_text_detect_t { * by \ref mmdeploy_text_detector_destroy * @return status of creating text-detector's handle */ -MM_SDK_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_detector_create(mm_model_t model, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Create text-detector's handle @@ -37,9 +41,9 @@ MM_SDK_API int mmdeploy_text_detector_create(mm_model_t model, const char* devic * by \ref mmdeploy_text_detector_destroy * @return status of creating text-detector's handle */ -MM_SDK_API int mmdeploy_text_detector_create_by_path(const char* model_path, - const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_detector_create_by_path(const char* model_path, + const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Apply text-detector to batch images and get their inference results @@ -52,22 +56,27 @@ MM_SDK_API int mmdeploy_text_detector_create_by_path(const char* model_path, * results of each image. It must be released by \ref mmdeploy_detector_release_result * @return status of inference */ -MM_SDK_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, int mat_count, - mm_text_detect_t** results, int** result_count); +MMDEPLOY_API int mmdeploy_text_detector_apply(mm_handle_t handle, const mm_mat_t* mats, + int mat_count, mm_text_detect_t** results, + int** result_count); /** @brief Release the inference result buffer returned by \ref mmdeploy_text_detector_apply * @param[in] results text detection result buffer * @param[in] result_count \p results size buffer * @param[in] count the length of buffer \p result_count */ -MM_SDK_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, - const int* result_count, int count); +MMDEPLOY_API void mmdeploy_text_detector_release_result(mm_text_detect_t* results, + const int* result_count, int count); /** * @brief Destroy text-detector's handle * @param[in] handle text-detector's handle created by \ref mmdeploy_text_detector_create_by_path or * \ref mmdeploy_text_detector_create */ -MM_SDK_API void mmdeploy_text_detector_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_text_detector_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_TEXT_DETECTOR_H diff --git a/csrc/apis/c/text_recognizer.cpp b/csrc/apis/c/text_recognizer.cpp index 1ecf29e220..9458712b54 100644 --- a/csrc/apis/c/text_recognizer.cpp +++ b/csrc/apis/c/text_recognizer.cpp @@ -74,9 +74,9 @@ int mmdeploy_text_recognizer_create_impl(ModelType &&m, const char *device_name, return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } @@ -178,9 +178,9 @@ int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t *imag return MM_SUCCESS; } catch (const std::exception &e) { - ERROR("exception caught: {}", e.what()); + MMDEPLOY_ERROR("exception caught: {}", e.what()); } catch (...) { - ERROR("unknown exception caught"); + MMDEPLOY_ERROR("unknown exception caught"); } return MM_E_FAIL; } diff --git a/csrc/apis/c/text_recognizer.h b/csrc/apis/c/text_recognizer.h index d5bbd5e1ca..e257da5833 100644 --- a/csrc/apis/c/text_recognizer.h +++ b/csrc/apis/c/text_recognizer.h @@ -11,6 +11,10 @@ #include "common.h" #include "text_detector.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct mm_text_recognize_t { char* text; float* score; @@ -27,8 +31,8 @@ typedef struct mm_text_recognize_t { * by \ref mmdeploy_text_recognizer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* device_name, - int device_id, mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* device_name, + int device_id, mm_handle_t* handle); /** * @brief Create a text recognizer instance @@ -39,9 +43,9 @@ MM_SDK_API int mmdeploy_text_recognizer_create(mm_model_t model, const char* dev * by \ref mmdeploy_text_recognizer_destroy * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, - const char* device_name, int device_id, - mm_handle_t* handle); +MMDEPLOY_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, + const char* device_name, int device_id, + mm_handle_t* handle); /** * @brief Apply text recognizer to a batch of text images @@ -53,8 +57,8 @@ MM_SDK_API int mmdeploy_text_recognizer_create_by_path(const char* model_path, * by \ref mmdeploy_text_recognizer_release_result * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t* images, int count, - mm_text_recognize_t** results); +MMDEPLOY_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t* images, + int count, mm_text_recognize_t** results); /** * @brief Apply text recognizer to a batch of images supplied with text bboxes @@ -68,23 +72,28 @@ MM_SDK_API int mmdeploy_text_recognizer_apply(mm_handle_t handle, const mm_mat_t * bboxes, must be release by \ref mmdeploy_text_recognizer_release_result * @return status code of the operation */ -MM_SDK_API int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t* images, - int image_count, const mm_text_detect_t* bboxes, - const int* bbox_count, - mm_text_recognize_t** results); +MMDEPLOY_API int mmdeploy_text_recognizer_apply_bbox(mm_handle_t handle, const mm_mat_t* images, + int image_count, + const mm_text_detect_t* bboxes, + const int* bbox_count, + mm_text_recognize_t** results); /** @brief Release result buffer returned by \ref mmdeploy_text_recognizer_apply or \ref * mmdeploy_text_recognizer_apply_bbox * @param[in] results result buffer by text recognizer * @param[in] count length of \p result */ -MM_SDK_API void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int count); +MMDEPLOY_API void mmdeploy_text_recognizer_release_result(mm_text_recognize_t* results, int count); /** * @brief destroy text recognizer * @param[in] handle handle of text recognizer created by \ref * mmdeploy_text_recognizer_create_by_path or \ref mmdeploy_text_recognizer_create */ -MM_SDK_API void mmdeploy_text_recognizer_destroy(mm_handle_t handle); +MMDEPLOY_API void mmdeploy_text_recognizer_destroy(mm_handle_t handle); + +#ifdef __cplusplus +} +#endif #endif // MMDEPLOY_SRC_APIS_C_TEXT_RECOGNIZER_H_ diff --git a/csrc/apis/python/CMakeLists.txt b/csrc/apis/python/CMakeLists.txt index 1d98d84d4b..0730268f07 100644 --- a/csrc/apis/python/CMakeLists.txt +++ b/csrc/apis/python/CMakeLists.txt @@ -23,11 +23,10 @@ mmdeploy_python_add_module(restorer) pybind11_add_module(${PROJECT_NAME} ${MMDEPLOY_PYTHON_SRCS}) -target_link_libraries(${PROJECT_NAME} PRIVATE - ${MMDEPLOY_LIBS} - -Wl,--whole-archive ${MMDEPLOY_STATIC_MODULES} -Wl,--no-whole-archive - -Wl,--no-as-needed ${MMDEPLOY_DYNAMIC_MODULES} -Wl,--as-need) +mmdeploy_load_static(${PROJECT_NAME} MMDeployStaticModules) +mmdeploy_load_dynamic(${PROJECT_NAME} MMDeployDynamicModules) +target_link_libraries(${PROJECT_NAME} PRIVATE MMDeployLibs) target_include_directories(${PROJECT_NAME} PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/../..) + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR}/../..) diff --git a/csrc/archive/CMakeLists.txt b/csrc/archive/CMakeLists.txt index 2b34ab1bb8..2645974152 100644 --- a/csrc/archive/CMakeLists.txt +++ b/csrc/archive/CMakeLists.txt @@ -6,7 +6,6 @@ add_library(${PROJECT_NAME} INTERFACE) target_link_libraries(${PROJECT_NAME} INTERFACE mmdeploy::core) add_library(mmdeploy::archive ALIAS mmdeploy_archive) -export_target(${PROJECT_NAME}) install(DIRECTORY ${CMAKE_SOURCE_DIR}/src/archive DESTINATION include/cpp FILES_MATCHING PATTERN "*.h") diff --git a/csrc/archive/json_archive.h b/csrc/archive/json_archive.h index 31666d89be..6f137b9a7c 100644 --- a/csrc/archive/json_archive.h +++ b/csrc/archive/json_archive.h @@ -133,7 +133,7 @@ inline Value json_to_value(const nlohmann::json& json) { return value; } default: - ERROR("unsupported json type: {}", json.type_name()); + MMDEPLOY_ERROR("unsupported json type: {}", json.type_name()); return {}; } } diff --git a/csrc/archive/value_archive.h b/csrc/archive/value_archive.h index 8500cbc424..f4115b3cb0 100644 --- a/csrc/archive/value_archive.h +++ b/csrc/archive/value_archive.h @@ -114,6 +114,8 @@ inline T from_value(const Value& value) { return x; } +namespace detail { + inline void load(ValueInputArchive& archive, Value& v) { archive.native(v); } template , Value>::value, bool> = true> @@ -121,6 +123,8 @@ inline void save(ValueOutputArchive& archive, T&& v) { archive.native(std::forward(v)); } +} // namespace detail + } // namespace mmdeploy #endif // MMDEPLOY_SRC_ARCHIVE_VALUE_ARCHIVE_H_ diff --git a/csrc/backend_ops/CMakeLists.txt b/csrc/backend_ops/CMakeLists.txt index 91117d2dbf..a9eac86ae4 100644 --- a/csrc/backend_ops/CMakeLists.txt +++ b/csrc/backend_ops/CMakeLists.txt @@ -1,26 +1,28 @@ -set(CMAKE_CXX_STANDARD 14) +if (NOT MSVC) + set(CMAKE_CXX_STANDARD 14) +endif () set(CMAKE_CXX_FLAGS_RELEASE "-O3") # build ONNXRUNTIME ops if ("ort" IN_LIST MMDEPLOY_TARGET_BACKENDS) - if (NOT DEFINED ONNXRUNTIME_DIR) - set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) - endif () - if (NOT ONNXRUNTIME_DIR) - message(FATAL_ERROR " ONNXRUNTIME_DIR is not found.") - else () - message(STATUS "Build ONNXRUNTIME custom ops.") - add_subdirectory(onnxruntime) - endif () + if (NOT DEFINED ONNXRUNTIME_DIR) + set(ONNXRUNTIME_DIR $ENV{ONNXRUNTIME_DIR}) + endif () + if (NOT ONNXRUNTIME_DIR) + message(FATAL_ERROR " ONNXRUNTIME_DIR is not found.") + else () + message(STATUS "Build ONNXRUNTIME custom ops.") + add_subdirectory(onnxruntime) + endif () endif () # build TensorRT ops if ("trt" IN_LIST MMDEPLOY_TARGET_BACKENDS) - if (NOT DEFINED TENSORRT_DIR) - set(TENSORRT_DIR $ENV{TENSORRT_DIR}) - endif () - message(STATUS "Build TensorRT custom ops.") - add_subdirectory(tensorrt) + if (NOT DEFINED TENSORRT_DIR) + set(TENSORRT_DIR $ENV{TENSORRT_DIR}) + endif () + message(STATUS "Build TensorRT custom ops.") + add_subdirectory(tensorrt) endif () # build NCNN ops diff --git a/csrc/backend_ops/ncnn/CMakeLists.txt b/csrc/backend_ops/ncnn/CMakeLists.txt index 6345448e6a..9580d3b96c 100755 --- a/csrc/backend_ops/ncnn/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/CMakeLists.txt @@ -4,21 +4,20 @@ cmake_minimum_required(VERSION 3.14) # ncnn find_package(ncnn) -if(ncnn_FOUND) - message(STATUS "ncnn library found!") -else() - message(FATAL_ERROR "Could not locate ncnn") -endif() +if (ncnn_FOUND) + message(STATUS "ncnn library found!") +else () + message(FATAL_ERROR "Could not locate ncnn") +endif () -set_targets(mmdeploy_ncnn_ops NCNN_OPS_OBJ NCNN_OPS_STATIC NCNN_OPS_SHARED) -if(NOT ANDROID AND NOT IOS) - add_subdirectory(ops) - add_subdirectory(onnx2ncnn) - add_subdirectory(pyncnn_ext) -else() - # In case of embedded platform, like android, or ios, we only build custom ncnn - # ops, and leave the executable converter(onnx2ncnn, pyncnn_ext) built under - # the host platforms - add_subdirectory(ops) -endif() +if (NOT ANDROID AND NOT IOS) + add_subdirectory(ops) + add_subdirectory(onnx2ncnn) + add_subdirectory(pyncnn_ext) +else () + # In case of embedded platform, like android, or ios, we only build custom ncnn + # ops, and leave the executable converter(onnx2ncnn, pyncnn_ext) built under + # the host platforms + add_subdirectory(ops) +endif () diff --git a/csrc/backend_ops/ncnn/ops/CMakeLists.txt b/csrc/backend_ops/ncnn/ops/CMakeLists.txt index aa89729843..4a4334518c 100755 --- a/csrc/backend_ops/ncnn/ops/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/ops/CMakeLists.txt @@ -2,18 +2,24 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ncnn_ops) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) # add plugin source file(GLOB_RECURSE NCNN_OPS_SRCS *.cpp) -build_object_target(${NCNN_OPS_OBJ} "${NCNN_OPS_SRCS}") -target_link_libraries(${NCNN_OPS_OBJ} ncnn) +add_library(${PROJECT_NAME}_obj OBJECT "${NCNN_OPS_SRCS}") +set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) +target_include_directories(${PROJECT_NAME}_obj PUBLIC + $) +set(_COMMON_INCLUDE_DIRS + $ + $) +target_include_directories(${PROJECT_NAME}_obj + PUBLIC ${_COMMON_INCLUDE_DIRS}) +mmdeploy_export(${PROJECT_NAME}_obj) -build_shared_target(${NCNN_OPS_SHARED} ${NCNN_OPS_OBJ} "PRIVATE") -install_targets(${NCNN_OPS_SHARED}) +mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "") +target_link_libraries(${PROJECT_NAME} PRIVATE ${PROJECT_NAME}_obj) +target_include_directories(${PROJECT_NAME} + PUBLIC ${_COMMON_INCLUDE_DIRS}) -if (MMDEPLOY_BUILD_SDK) - ## Build static library. SDK's uses it to build `ncnn_net` module - build_static_target(${NCNN_OPS_STATIC} ${NCNN_OPS_OBJ} "PRIVATE") - add_library(mmdeploy::ncnn_ops::static ALIAS ${NCNN_OPS_STATIC}) -endif () +add_library(mmdeploy::ncnn_ops ALIAS ${PROJECT_NAME}) diff --git a/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h b/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h index 2fb07d8b0f..b265f6d4af 100755 --- a/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h +++ b/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h @@ -5,13 +5,14 @@ #include #include +#include "core/macro.h" #include "net.h" extern "C" { -std::map& get_mmdeploy_layer_creator(); -std::map& get_mmdeploy_layer_destroyer(); +MMDEPLOY_API std::map& get_mmdeploy_layer_creator(); +MMDEPLOY_API std::map& get_mmdeploy_layer_destroyer(); -int register_mmdeploy_custom_layers(ncnn::Net& net); +MMDEPLOY_API int register_mmdeploy_custom_layers(ncnn::Net& net); } #endif diff --git a/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt b/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt index f0d4148963..b60c91006d 100755 --- a/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt @@ -6,9 +6,10 @@ project(ncnn_ext) if (NOT TARGET pybind11) add_subdirectory(${CMAKE_SOURCE_DIR}/third_party/pybind11 pybind11) endif () -include_directories(${pybind11_INCLUDE_DIR} ${PYTHON_INCLUDE_DIRS}) + pybind11_add_module(ncnn_ext ncnn_ext.cpp) -target_link_libraries(ncnn_ext PUBLIC ncnn ${NCNN_OPS_SHARED}) + +target_link_libraries(ncnn_ext PUBLIC mmdeploy_ncnn_ops ncnn) set_target_properties( ncnn_ext PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) diff --git a/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp b/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp index e9ff04eb41..ac158b9edb 100755 --- a/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp +++ b/csrc/backend_ops/ncnn/pyncnn_ext/ncnn_ext.cpp @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include "../ops/ncnn_ops_register.h" +#include "ncnn_ops_register.h" #include "net.h" PYBIND11_MODULE(ncnn_ext, m) { diff --git a/csrc/backend_ops/onnxruntime/CMakeLists.txt b/csrc/backend_ops/onnxruntime/CMakeLists.txt index f646bbc98a..613a60881e 100644 --- a/csrc/backend_ops/onnxruntime/CMakeLists.txt +++ b/csrc/backend_ops/onnxruntime/CMakeLists.txt @@ -2,26 +2,23 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_onnxruntime_ops) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -set_targets(${PROJECT_NAME} ORT_OPS_OBJ ORT_OPS_STATIC ORT_OPS_MODULE) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) # add plugin source file(GLOB_RECURSE ORT_OPS_SRCS *.cpp) -build_object_target(${ORT_OPS_OBJ} "${ORT_OPS_SRCS}") -target_include_directories(${ORT_OPS_OBJ} PUBLIC +add_library(${PROJECT_NAME}_obj OBJECT "${ORT_OPS_SRCS}") +target_compile_definitions(${PROJECT_NAME}_obj PRIVATE -DMMDEPLOY_API_EXPORTS=1) +set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) +mmdeploy_export(${PROJECT_NAME}_obj) + +target_include_directories(${PROJECT_NAME}_obj PUBLIC $ - $) -target_link_directories(${ORT_OPS_OBJ} PUBLIC + $ + $) +target_link_directories(${PROJECT_NAME}_obj PUBLIC ${ONNXRUNTIME_DIR}/lib) -target_link_libraries(${ORT_OPS_OBJ} PUBLIC onnxruntime) - -add_library(${ORT_OPS_MODULE} MODULE $) -target_link_libraries(${ORT_OPS_MODULE} PRIVATE ${ORT_OPS_OBJ}) -add_library(mmdeploy::onnxruntime::ops ALIAS ${ORT_OPS_MODULE}) -install_targets(${ORT_OPS_MODULE}) +target_link_libraries(${PROJECT_NAME}_obj PUBLIC onnxruntime) -if (MMDEPLOY_BUILD_SDK) - ## Build static library. SDK's uses it to build `ort_net` module - build_static_target(${ORT_OPS_STATIC} ${ORT_OPS_OBJ} "PRIVATE") - add_library(mmdeploy::onnxruntime::ops::static ALIAS ${ORT_OPS_STATIC}) -endif () +mmdeploy_add_library(${PROJECT_NAME} SHARED EXCLUDE "") +target_link_libraries(${PROJECT_NAME} PUBLIC ${PROJECT_NAME}_obj) +add_library(mmdeploy::onnxruntime::ops ALIAS ${PROJECT_NAME}) diff --git a/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h b/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h index 84318bc818..344031e791 100644 --- a/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h +++ b/csrc/backend_ops/onnxruntime/common/onnxruntime_register.h @@ -3,11 +3,14 @@ #define ONNXRUNTIME_REGISTER_H #include +#include "core/macro.h" + #ifdef __cplusplus extern "C" { #endif -OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, const OrtApiBase *api); +MMDEPLOY_API OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, + const OrtApiBase *api); #ifdef __cplusplus } diff --git a/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp b/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp index 9f2ce2cc0f..f7b9cedff8 100644 --- a/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp +++ b/csrc/backend_ops/onnxruntime/onnxruntime_register.cpp @@ -7,7 +7,6 @@ const char *c_MMDeployOpDomain = "mmdeploy"; OrtStatus *ORT_API_CALL RegisterCustomOps(OrtSessionOptions *options, const OrtApiBase *api) { const OrtApi *kOrtApi = api->GetApi(ORT_API_VERSION); - OrtStatus *status = nullptr; for (auto &_op_list_pair : mmdeploy::get_mmdeploy_custom_ops()) { OrtCustomOpDomain *domain = nullptr; diff --git a/csrc/backend_ops/tensorrt/CMakeLists.txt b/csrc/backend_ops/tensorrt/CMakeLists.txt index 88a0176df6..796a600eb7 100644 --- a/csrc/backend_ops/tensorrt/CMakeLists.txt +++ b/csrc/backend_ops/tensorrt/CMakeLists.txt @@ -3,41 +3,35 @@ cmake_minimum_required(VERSION 3.14) include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake NO_POLICY_SCOPE) project(mmdeploy_tensorrt_ops CUDA CXX) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) include(${CMAKE_SOURCE_DIR}/cmake/cuda.cmake NO_POLICY_SCOPE) include(${CMAKE_SOURCE_DIR}/cmake/tensorrt.cmake NO_POLICY_SCOPE) -set_targets(${PROJECT_NAME} BACKEND_OPS_OBJ BACKEND_OPS_STATIC BACKEND_OPS_MODULE) - # cub if (NOT DEFINED CUB_ROOT_DIR) if (CUDA_VERSION VERSION_LESS 11.0) set(CUB_ROOT_DIR "${CMAKE_SOURCE_DIR}/third_party/cub") - endif() + endif () endif () file(GLOB_RECURSE BACKEND_OPS_SRCS *.cpp *.cu) -build_object_target(${BACKEND_OPS_OBJ} "${BACKEND_OPS_SRCS}") -target_compile_definitions(${BACKEND_OPS_OBJ} +add_library(${PROJECT_NAME}_obj OBJECT "${BACKEND_OPS_SRCS}") +set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) +target_compile_definitions(${PROJECT_NAME}_obj PRIVATE -DTHRUST_IGNORE_DEPRECATED_CPP_DIALECT=1) -target_include_directories(${BACKEND_OPS_OBJ} +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/common) -target_include_directories(${BACKEND_OPS_OBJ} +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_include_directories(${BACKEND_OPS_OBJ} PRIVATE ${TENSORRT_INCLUDE_DIR}) -target_include_directories(${BACKEND_OPS_OBJ} PRIVATE ${CUDNN_DIR}/include) -target_include_directories(${BACKEND_OPS_OBJ} PRIVATE ${CUB_ROOT_DIR}) -target_link_directories(${BACKEND_OPS_OBJ} PUBLIC ${CUDNN_DIR}/lib64) -target_link_libraries(${BACKEND_OPS_OBJ} - PRIVATE ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn) +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${TENSORRT_INCLUDE_DIR}) +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUDNN_DIR}/include) +target_include_directories(${PROJECT_NAME}_obj PRIVATE ${CUB_ROOT_DIR}) +target_link_directories(${PROJECT_NAME}_obj PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64) +target_link_libraries(${PROJECT_NAME}_obj + PUBLIC ${TENSORRT_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} cudnn) +mmdeploy_export(${PROJECT_NAME}_obj) # Build module library. It is used to convert onnx model to tensorrt engine -build_module_target(${BACKEND_OPS_MODULE} ${BACKEND_OPS_OBJ} "PRIVATE") -add_library(mmdeploy::tensorrt_ops ALIAS ${BACKEND_OPS_MODULE}) -install_targets(${BACKEND_OPS_MODULE}) - -if (MMDEPLOY_BUILD_SDK) - ## Build static library. SDK's uses it to build `trt_net` module - build_static_target(${BACKEND_OPS_STATIC} ${BACKEND_OPS_OBJ} "PRIVATE") - add_library(mmdeploy::tensorrt_ops::static ALIAS ${BACKEND_OPS_STATIC}) -endif () +mmdeploy_add_module(${PROJECT_NAME} MODULE EXCLUDE "") +target_link_libraries(${PROJECT_NAME} PRIVATE ${PROJECT_NAME}_obj) +add_library(mmdeploy::tensorrt_ops ALIAS ${PROJECT_NAME}) diff --git a/csrc/codebase/CMakeLists.txt b/csrc/codebase/CMakeLists.txt index 023be7668d..9ef6490a8c 100644 --- a/csrc/codebase/CMakeLists.txt +++ b/csrc/codebase/CMakeLists.txt @@ -9,8 +9,8 @@ if ("all" IN_LIST MMDEPLOY_CODEBASES) list(APPEND CODEBASES "mmseg") list(APPEND CODEBASES "mmocr") list(APPEND CODEBASES "mmedit") -else() - set (CODEBASES ${MMDEPLOY_CODEBASES}) +else () + set(CODEBASES ${MMDEPLOY_CODEBASES}) endif () foreach (codebase IN LISTS CODEBASES) diff --git a/csrc/codebase/common.h b/csrc/codebase/common.h index b0b164ddb7..c815aa11f7 100644 --- a/csrc/codebase/common.h +++ b/csrc/codebase/common.h @@ -14,7 +14,7 @@ namespace mmdeploy { class Context { public: explicit Context(const Value& config) { - DEBUG("config: {}", config); + MMDEPLOY_DEBUG("config: {}", config); device_ = config["context"]["device"].get(); stream_ = config["context"]["stream"].get(); } @@ -35,17 +35,17 @@ class CodebaseCreator : public Creator { std::unique_ptr Create(const Value& cfg) override { constexpr auto key{"component"}; if (!cfg.contains(key)) { - ERROR("no key '{}' in config {}", key, cfg); + MMDEPLOY_ERROR("no key '{}' in config {}", key, cfg); throw_exception(eInvalidArgument); } if (!cfg[key].is_string()) { - ERROR("key '{}' is not a string", key); + MMDEPLOY_ERROR("key '{}' is not a string", key); throw_exception(eInvalidArgument); } auto postprocess_type = cfg[key].get(); auto creator = Registry::Get().GetCreator(postprocess_type); if (creator == nullptr) { - ERROR("could not found entry '{}' in {}", postprocess_type, Tag::name); + MMDEPLOY_ERROR("could not found entry '{}' in {}", postprocess_type, Tag::name); throw_exception(eEntryNotFound); } return creator->Create(cfg); diff --git a/csrc/codebase/mmcls/CMakeLists.txt b/csrc/codebase/mmcls/CMakeLists.txt index c2b254149c..259b653cd1 100644 --- a/csrc/codebase/mmcls/CMakeLists.txt +++ b/csrc/codebase/mmcls/CMakeLists.txt @@ -2,10 +2,8 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmcls) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") add_library(mmdeploy::mmcls ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmcls/linear_cls.cpp b/csrc/codebase/mmcls/linear_cls.cpp index 07704cd088..8b14f4e926 100644 --- a/csrc/codebase/mmcls/linear_cls.cpp +++ b/csrc/codebase/mmcls/linear_cls.cpp @@ -1,5 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include #include #include "codebase/mmcls/mmcls.h" @@ -18,19 +19,19 @@ class LinearClsHead : public MMClassification { if (cfg.contains("params")) { topk_ = cfg["params"].value("topk", 1); if (topk_ <= 0) { - ERROR("'topk' should be greater than 0, but got '{}'", topk_); + MMDEPLOY_ERROR("'topk' should be greater than 0, but got '{}'", topk_); throw_exception(eInvalidArgument); } } } Result operator()(const Value& infer_res) { - DEBUG("infer_res: {}", infer_res); + MMDEPLOY_DEBUG("infer_res: {}", infer_res); auto output = infer_res["output"].get(); if (!(output.shape().size() >= 2 && output.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", output.shape(), - (int)output.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", output.shape(), + (int)output.data_type()); return Status(eNotSupported); } @@ -53,7 +54,7 @@ class LinearClsHead : public MMClassification { [&](int i, int j) { return scores_data[i] > scores_data[j]; }); for (int i = 0; i < topk_; ++i) { auto label = ClassifyOutput::Label{idx[i], scores_data[idx[i]]}; - DEBUG("label_id: {}, score: {}", label.label_id, label.score); + MMDEPLOY_DEBUG("label_id: {}, score: {}", label.label_id, label.score); output.labels.push_back(label); } return to_value(std::move(output)); diff --git a/csrc/codebase/mmcls/mmcls.cpp b/csrc/codebase/mmcls/mmcls.cpp index cd59907244..973a4c6d80 100644 --- a/csrc/codebase/mmcls/mmcls.cpp +++ b/csrc/codebase/mmcls/mmcls.cpp @@ -2,8 +2,12 @@ #include "codebase/mmcls/mmcls.h" -namespace mmdeploy::mmcls { +namespace mmdeploy { +namespace mmcls { REGISTER_CODEBASE(MMClassification); -} // namespace mmdeploy::mmcls +} + +MMDEPLOY_DEFINE_REGISTRY(mmcls::MMClassification); +} // namespace mmdeploy diff --git a/csrc/codebase/mmcls/mmcls.h b/csrc/codebase/mmcls/mmcls.h index 5cddfd197d..2b87b2d538 100644 --- a/csrc/codebase/mmcls/mmcls.h +++ b/csrc/codebase/mmcls/mmcls.h @@ -8,7 +8,8 @@ #include "core/module.h" #include "core/serialization.h" -namespace mmdeploy::mmcls { +namespace mmdeploy { +namespace mmcls { struct ClassifyOutput { struct Label { @@ -21,7 +22,9 @@ struct ClassifyOutput { }; DECLARE_CODEBASE(MMClassification, mmcls); +} // namespace mmcls -} // namespace mmdeploy::mmcls +MMDEPLOY_DECLARE_REGISTRY(mmcls::MMClassification); +} // namespace mmdeploy #endif // MMDEPLOY_SRC_CODEBASE_MMCLS_MMCLS_H_ diff --git a/csrc/codebase/mmdet/CMakeLists.txt b/csrc/codebase/mmdet/CMakeLists.txt index 31a00813bb..75ae6c4d4b 100644 --- a/csrc/codebase/mmdet/CMakeLists.txt +++ b/csrc/codebase/mmdet/CMakeLists.txt @@ -3,10 +3,11 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmdet) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} + PRIVATE mmdeploy_opencv_utils) + add_library(mmdeploy::mmdet ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmdet/instance_segmentation.cpp b/csrc/codebase/mmdet/instance_segmentation.cpp index 481e0b1e23..638ce0b80a 100644 --- a/csrc/codebase/mmdet/instance_segmentation.cpp +++ b/csrc/codebase/mmdet/instance_segmentation.cpp @@ -5,7 +5,7 @@ #include "experimental/module_adapter.h" #include "object_detection.h" #include "opencv2/imgproc/imgproc.hpp" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" namespace mmdeploy::mmdet { @@ -19,35 +19,35 @@ class ResizeInstanceMask : public ResizeBBox { // TODO: remove duplication Result operator()(const Value& prep_res, const Value& infer_res) { - DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); + MMDEPLOY_DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); try { auto dets = infer_res["dets"].get(); auto labels = infer_res["labels"].get(); auto masks = infer_res["masks"].get(); - DEBUG("dets.shape: {}", dets.shape()); - DEBUG("labels.shape: {}", labels.shape()); - DEBUG("masks.shape: {}", masks.shape()); + MMDEPLOY_DEBUG("dets.shape: {}", dets.shape()); + MMDEPLOY_DEBUG("labels.shape: {}", labels.shape()); + MMDEPLOY_DEBUG("masks.shape: {}", masks.shape()); // `dets` is supposed to have 3 dims. They are 'batch', 'bboxes_number' // and 'channels' respectively if (!(dets.shape().size() == 3 && dets.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), - (int)dets.data_type()); + MMDEPLOY_ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), + (int)dets.data_type()); return Status(eNotSupported); } // `labels` is supposed to have 2 dims, which are 'batch' and // 'bboxes_number' if (labels.shape().size() != 2) { - ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), - (int)labels.data_type()); + MMDEPLOY_ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), + (int)labels.data_type()); return Status(eNotSupported); } if (!(masks.shape().size() == 4 && masks.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `mask` tensor, shape: {}, dtype: {}", masks.shape(), - (int)masks.data_type()); + MMDEPLOY_ERROR("unsupported `mask` tensor, shape: {}, dtype: {}", masks.shape(), + (int)masks.data_type()); return Status(eNotSupported); } @@ -65,7 +65,7 @@ class ResizeInstanceMask : public ResizeBBox { return to_value(result); } catch (const std::exception& e) { - ERROR("{}", e.what()); + MMDEPLOY_ERROR("{}", e.what()); return Status(eFail); } } @@ -74,10 +74,10 @@ class ResizeInstanceMask : public ResizeBBox { void ProcessMasks(DetectorOutput& result, Tensor cpu_masks, int img_w, int img_h) const { auto shape = TensorShape{cpu_masks.shape(1), cpu_masks.shape(2), cpu_masks.shape(3)}; cpu_masks.Reshape(shape); - + MMDEPLOY_DEBUG("{}, {}", cpu_masks.shape(), cpu_masks.data_type()); for (auto& det : result.detections) { auto mask = cpu_masks.Slice(det.index); - cv::Mat mask_mat(mask.shape(1), mask.shape(2), CV_32F, mask.data()); + cv::Mat mask_mat((int)mask.shape(1), (int)mask.shape(2), CV_32F, mask.data()); cv::Mat warped_mask; auto& bbox = det.bbox; // same as mmdet with skip_empty = True @@ -97,7 +97,9 @@ class ResizeInstanceMask : public ResizeBBox { cv::warpAffine(mask_mat, warped_mask, m, cv::Size{width, height}, cv::INTER_LINEAR | cv::WARP_INVERSE_MAP); warped_mask = warped_mask > mask_thr_binary_; - det.mask = cpu::CVMat2Mat(warped_mask, PixelFormat::kGRAYSCALE); + + det.mask = Mat(height, width, PixelFormat::kGRAYSCALE, DataType::kINT8, + std::shared_ptr(warped_mask.data, [mat = warped_mask](void*) {})); } } diff --git a/csrc/codebase/mmdet/mmdet.cpp b/csrc/codebase/mmdet/mmdet.cpp index 218b73c502..45fe21f9b0 100644 --- a/csrc/codebase/mmdet/mmdet.cpp +++ b/csrc/codebase/mmdet/mmdet.cpp @@ -2,8 +2,12 @@ #include "codebase/mmdet/mmdet.h" -namespace mmdeploy::mmdet { +namespace mmdeploy { +namespace mmdet { REGISTER_CODEBASE(MMDetection); -} // namespace mmdeploy::mmdet +} + +MMDEPLOY_DEFINE_REGISTRY(mmdet::MMDetection); +} // namespace mmdeploy diff --git a/csrc/codebase/mmdet/mmdet.h b/csrc/codebase/mmdet/mmdet.h index a663004038..dcb1e18194 100644 --- a/csrc/codebase/mmdet/mmdet.h +++ b/csrc/codebase/mmdet/mmdet.h @@ -3,13 +3,17 @@ #ifndef MMDEPLOY_SRC_CODEBASE_MMDET_MMDET_H_ #define MMDEPLOY_SRC_CODEBASE_MMDET_MMDET_H_ +#include + #include "codebase/common.h" #include "core/device.h" #include "core/mat.h" #include "core/module.h" +#include "core/registry.h" #include "core/serialization.h" -namespace mmdeploy::mmdet { +namespace mmdeploy { +namespace mmdet { struct DetectorOutput { struct Detection { @@ -25,7 +29,9 @@ struct DetectorOutput { }; DECLARE_CODEBASE(MMDetection, mmdet); +} // namespace mmdet -} // namespace mmdeploy::mmdet +MMDEPLOY_DECLARE_REGISTRY(mmdet::MMDetection); +} // namespace mmdeploy #endif // MMDEPLOY_SRC_CODEBASE_MMDET_MMDET_H_ diff --git a/csrc/codebase/mmdet/object_detection.cpp b/csrc/codebase/mmdet/object_detection.cpp index 6a7c6d6a37..62a5c21017 100644 --- a/csrc/codebase/mmdet/object_detection.cpp +++ b/csrc/codebase/mmdet/object_detection.cpp @@ -17,26 +17,27 @@ ResizeBBox::ResizeBBox(const Value& cfg) : MMDetection(cfg) { } } Result ResizeBBox::operator()(const Value& prep_res, const Value& infer_res) { - DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); + MMDEPLOY_DEBUG("prep_res: {}\ninfer_res: {}", prep_res, infer_res); try { auto dets = infer_res["dets"].get(); auto labels = infer_res["labels"].get(); - DEBUG("dets.shape: {}", dets.shape()); - DEBUG("labels.shape: {}", labels.shape()); + MMDEPLOY_DEBUG("dets.shape: {}", dets.shape()); + MMDEPLOY_DEBUG("labels.shape: {}", labels.shape()); // `dets` is supposed to have 3 dims. They are 'batch', 'bboxes_number' // and 'channels' respectively if (!(dets.shape().size() == 3 && dets.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), (int)dets.data_type()); + MMDEPLOY_ERROR("unsupported `dets` tensor, shape: {}, dtype: {}", dets.shape(), + (int)dets.data_type()); return Status(eNotSupported); } // `labels` is supposed to have 2 dims, which are 'batch' and // 'bboxes_number' if (labels.shape().size() != 2) { - ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), - (int)labels.data_type()); + MMDEPLOY_ERROR("unsupported `labels`, tensor, shape: {}, dtype: {}", labels.shape(), + (int)labels.data_type()); return Status(eNotSupported); } @@ -98,16 +99,17 @@ Result ResizeBBox::GetBBoxes(const Value& prep_res, const Tensor auto right = dets_ptr[2]; auto bottom = dets_ptr[3]; - DEBUG("ori left {}, top {}, right {}, bottom {}, label {}", left, top, right, bottom, - *labels_ptr); + MMDEPLOY_DEBUG("ori left {}, top {}, right {}, bottom {}, label {}", left, top, right, bottom, + *labels_ptr); auto rect = MapToOriginImage(left, top, right, bottom, scale_factor.data(), w_offset, h_offset, ori_width, ori_height); if (rect[2] - rect[0] < min_bbox_size_ || rect[3] - rect[1] < min_bbox_size_) { - DEBUG("ignore small bbox with width '{}' and height '{}", rect[2] - rect[0], - rect[3] - rect[1]); + MMDEPLOY_DEBUG("ignore small bbox with width '{}' and height '{}", rect[2] - rect[0], + rect[3] - rect[1]); continue; } - DEBUG("remap left {}, top {}, right {}, bottom {}", rect[0], rect[1], rect[2], rect[3]); + MMDEPLOY_DEBUG("remap left {}, top {}, right {}, bottom {}", rect[0], rect[1], rect[2], + rect[3]); DetectorOutput::Detection det{}; det.index = i; det.label_id = static_cast(*labels_ptr); diff --git a/csrc/codebase/mmedit/CMakeLists.txt b/csrc/codebase/mmedit/CMakeLists.txt index a546642551..59646d0f6a 100644 --- a/csrc/codebase/mmedit/CMakeLists.txt +++ b/csrc/codebase/mmedit/CMakeLists.txt @@ -3,10 +3,9 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmedit) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} PRIVATE opencv_core) add_library(mmdeploy::mmedit ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmedit/mmedit.cpp b/csrc/codebase/mmedit/mmedit.cpp index 28a8dfa1d2..b10c084b24 100644 --- a/csrc/codebase/mmedit/mmedit.cpp +++ b/csrc/codebase/mmedit/mmedit.cpp @@ -4,8 +4,12 @@ #include "core/registry.h" -namespace mmdeploy::mmedit { +namespace mmdeploy { +namespace mmedit { REGISTER_CODEBASE(MMEdit); -} // namespace mmdeploy::mmedit +} // namespace mmedit + +MMDEPLOY_DEFINE_REGISTRY(mmedit::MMEdit); +} // namespace mmdeploy diff --git a/csrc/codebase/mmedit/mmedit.h b/csrc/codebase/mmedit/mmedit.h index ed9c3cdc88..e7c4285134 100644 --- a/csrc/codebase/mmedit/mmedit.h +++ b/csrc/codebase/mmedit/mmedit.h @@ -9,12 +9,16 @@ #include "core/module.h" #include "core/serialization.h" -namespace mmdeploy::mmedit { +namespace mmdeploy { +namespace mmedit { using RestorerOutput = Mat; DECLARE_CODEBASE(MMEdit, mmedit); -} // namespace mmdeploy::mmedit +} // namespace mmedit + +MMDEPLOY_DECLARE_REGISTRY(mmedit::MMEdit); +} // namespace mmdeploy #endif // MMDEPLOY_SRC_CODEBASE_MMEDIT_MMEDIT_H_ diff --git a/csrc/codebase/mmedit/restorer.cpp b/csrc/codebase/mmedit/restorer.cpp index da06075a4d..84626d15be 100644 --- a/csrc/codebase/mmedit/restorer.cpp +++ b/csrc/codebase/mmedit/restorer.cpp @@ -32,8 +32,8 @@ class TensorToImg : public MMEdit { mat_hwc.convertTo(rescale_uint8, CV_8UC(channels), 255.f); return mat; } else { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", upscale.shape(), - (int)upscale.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", upscale.shape(), + (int)upscale.data_type()); return Status(eNotSupported); } } diff --git a/csrc/codebase/mmocr/CMakeLists.txt b/csrc/codebase/mmocr/CMakeLists.txt index 42e63a0dff..60ac5c6ff4 100644 --- a/csrc/codebase/mmocr/CMakeLists.txt +++ b/csrc/codebase/mmocr/CMakeLists.txt @@ -3,13 +3,13 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmocr) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR} OCR_SRCS) aux_source_directory(${CMAKE_SOURCE_DIR}/third_party/clipper CLIPPER_SRCS) set(SRCS ${OCR_SRCS} ${CLIPPER_SRCS}) -build_target(${PROJECT_NAME} "${SRCS}") +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_SOURCE_DIR}/third_party/clipper) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core opencv_imgproc) +target_link_libraries(${PROJECT_NAME} + PRIVATE mmdeploy::transform mmdeploy_opencv_utils) add_library(mmdeploy::mmocr ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmocr/crnn.cpp b/csrc/codebase/mmocr/crnn.cpp index bd6c4a6179..fc611e8588 100644 --- a/csrc/codebase/mmocr/crnn.cpp +++ b/csrc/codebase/mmocr/crnn.cpp @@ -1,5 +1,6 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include #include #include "core/device.h" @@ -22,7 +23,7 @@ class CTCConvertor : public MMOCR { explicit CTCConvertor(const Value& cfg) : MMOCR(cfg) { auto model = cfg["context"]["model"].get(); if (!cfg.contains("params")) { - ERROR("'params' is required, but it's not in the config"); + MMDEPLOY_ERROR("'params' is required, but it's not in the config"); throw_exception(eInvalidArgument); } // BaseConverter @@ -40,11 +41,11 @@ class CTCConvertor : public MMOCR { } else if (dict_type == "DICT90") { idx2char_ = SplitChars(DICT90); } else { - ERROR("unknown dict_type: {}", dict_type); + MMDEPLOY_ERROR("unknown dict_type: {}", dict_type); throw_exception(eInvalidArgument); } } else { - ERROR("either dict_file, dict_list or dict_type must be specified"); + MMDEPLOY_ERROR("either dict_file, dict_list or dict_type must be specified"); throw_exception(eInvalidArgument); } // CTCConverter @@ -62,8 +63,8 @@ class CTCConvertor : public MMOCR { auto d_conf = _prob["output"].get(); if (!(d_conf.shape().size() == 3 && d_conf.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", d_conf.shape(), - (int)d_conf.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", d_conf.shape(), + (int)d_conf.data_type()); return Status(eNotSupported); } @@ -80,7 +81,7 @@ class CTCConvertor : public MMOCR { auto [indexes, scores] = Tensor2Idx(data, w, c, valid_ratio); auto text = Idx2Str(indexes); - DEBUG("text: {}", text); + MMDEPLOY_DEBUG("text: {}", text); TextRecognizerOutput output{text, scores}; diff --git a/csrc/codebase/mmocr/dbnet.cpp b/csrc/codebase/mmocr/dbnet.cpp index 93a3d0400b..bd90dca3f3 100644 --- a/csrc/codebase/mmocr/dbnet.cpp +++ b/csrc/codebase/mmocr/dbnet.cpp @@ -13,7 +13,7 @@ #include "core/value.h" #include "experimental/module_adapter.h" #include "mmocr.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" namespace mmdeploy::mmocr { @@ -37,21 +37,21 @@ class DBHead : public MMOCR { } Result operator()(const Value& _data, const Value& _prob) { - DEBUG("preprocess_result: {}", _data); - DEBUG("inference_result: {}", _prob); + MMDEPLOY_DEBUG("preprocess_result: {}", _data); + MMDEPLOY_DEBUG("inference_result: {}", _prob); auto img = _data["img"].get(); - DEBUG("img shape: {}", img.shape()); + MMDEPLOY_DEBUG("img shape: {}", img.shape()); Device cpu_device{"cpu"}; OUTCOME_TRY(auto conf, MakeAvailableOnDevice(_prob["output"].get(), cpu_device, stream_)); OUTCOME_TRY(stream_.Wait()); - DEBUG("shape: {}", conf.shape()); + MMDEPLOY_DEBUG("shape: {}", conf.shape()); if (!(conf.shape().size() == 4 && conf.data_type() == DataType::kFLOAT)) { - ERROR("unsupported `output` tensor, shape: {}, dtype: {}", conf.shape(), - (int)conf.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", conf.shape(), + (int)conf.data_type()); return Status(eNotSupported); } @@ -103,7 +103,7 @@ class DBHead : public MMOCR { } else { assert(0); } - DEBUG("score: {}", score); + MMDEPLOY_DEBUG("score: {}", score); // cv::drawContours(score_map, vector>{approx}, -1, 1); vector scaled(begin(approx), end(approx)); @@ -133,7 +133,7 @@ class DBHead : public MMOCR { cv::Mat mask(rect.size(), CV_8U, cv::Scalar(0)); - cv::fillPoly(mask, std::vector{box}, 1, cv::LINE_8, 0, -rect.tl()); + cv::fillPoly(mask, std::vector>{box}, 1, cv::LINE_8, 0, -rect.tl()); auto mean = cv::mean(bitmap(rect), mask)[0]; return static_cast(mean); } diff --git a/csrc/codebase/mmocr/mmocr.cpp b/csrc/codebase/mmocr/mmocr.cpp index 2935f03b5a..f34f918afa 100644 --- a/csrc/codebase/mmocr/mmocr.cpp +++ b/csrc/codebase/mmocr/mmocr.cpp @@ -5,8 +5,12 @@ #include "core/registry.h" #include "core/utils/formatter.h" -namespace mmdeploy::mmocr { +namespace mmdeploy { +namespace mmocr { REGISTER_CODEBASE(MMOCR); -} // namespace mmdeploy::mmocr +} // namespace mmocr + +MMDEPLOY_DEFINE_REGISTRY(mmocr::MMOCR); +} // namespace mmdeploy diff --git a/csrc/codebase/mmocr/mmocr.h b/csrc/codebase/mmocr/mmocr.h index 42098af746..1871b6755c 100644 --- a/csrc/codebase/mmocr/mmocr.h +++ b/csrc/codebase/mmocr/mmocr.h @@ -7,7 +7,8 @@ #include "core/device.h" #include "core/module.h" -namespace mmdeploy::mmocr { +namespace mmdeploy { +namespace mmocr { struct TextDetectorOutput { std::vector> boxes; @@ -23,6 +24,9 @@ struct TextRecognizerOutput { DECLARE_CODEBASE(MMOCR, mmocr); -} // namespace mmdeploy::mmocr +} // namespace mmocr + +MMDEPLOY_DECLARE_REGISTRY(mmocr::MMOCR); +} // namespace mmdeploy #endif // MMDEPLOY_MMOCR_H diff --git a/csrc/codebase/mmocr/resize_ocr.cpp b/csrc/codebase/mmocr/resize_ocr.cpp index 2da9bac7a7..69d7602d18 100644 --- a/csrc/codebase/mmocr/resize_ocr.cpp +++ b/csrc/codebase/mmocr/resize_ocr.cpp @@ -4,12 +4,14 @@ #include "archive/json_archive.h" #include "archive/value_archive.h" +#include "core/registry.h" #include "core/tensor.h" #include "core/utils/device_utils.h" #include "core/utils/formatter.h" #include "opencv2/imgproc.hpp" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/resize.h" +#include "preprocess/transform/transform.h" using namespace std; @@ -37,7 +39,7 @@ class ResizeOCRImpl : public Module { ~ResizeOCRImpl() override = default; Result Process(const Value& input) override { - DEBUG("input: {}", input); + MMDEPLOY_DEBUG("input: {}", input); auto dst_height = height_; auto dst_min_width = min_width_; auto dst_max_width = max_width_; @@ -84,7 +86,7 @@ class ResizeOCRImpl : public Module { output["resize_shape"] = to_value(img_resize.desc().shape); output["pad_shape"] = output["resize_shape"]; output["valid_ratio"] = valid_ratio; - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } @@ -95,7 +97,7 @@ class ResizeOCRImpl : public Module { int h = desc.shape[1]; int w = desc.shape[2]; int c = desc.shape[3]; - assert(c == 3 or c == 1); + assert(c == 3 || c == 1); cv::Mat src_mat, dst_mat; if (3 == c) { // rgb src_mat = cv::Mat(h, w, CV_8UC3, const_cast(img.data())); @@ -135,6 +137,8 @@ class ResizeOCRImplCreator : public Creator { ReturnType Create(const Value& args) override { return std::make_unique(args); } }; +MMDEPLOY_DEFINE_REGISTRY(ResizeOCRImpl); + REGISTER_MODULE(ResizeOCRImpl, ResizeOCRImplCreator); class ResizeOCR : public Transform { diff --git a/csrc/codebase/mmocr/warp.cpp b/csrc/codebase/mmocr/warp.cpp index b54e6a7897..56566f6d12 100644 --- a/csrc/codebase/mmocr/warp.cpp +++ b/csrc/codebase/mmocr/warp.cpp @@ -8,7 +8,7 @@ #include "core/utils/formatter.h" #include "core/value.h" #include "experimental/module_adapter.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" namespace mmdeploy { diff --git a/csrc/codebase/mmseg/CMakeLists.txt b/csrc/codebase/mmseg/CMakeLists.txt index 55bb7e9d08..89a15bb32f 100644 --- a/csrc/codebase/mmseg/CMakeLists.txt +++ b/csrc/codebase/mmseg/CMakeLists.txt @@ -3,10 +3,9 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_mmseg) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS ${CMAKE_CURRENT_SOURCE_DIR} "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core opencv_core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_opencv_utils) add_library(mmdeploy::mmseg ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/codebase/mmseg/mmseg.cpp b/csrc/codebase/mmseg/mmseg.cpp index 78e1d490e5..6f080b8221 100644 --- a/csrc/codebase/mmseg/mmseg.cpp +++ b/csrc/codebase/mmseg/mmseg.cpp @@ -4,8 +4,12 @@ using namespace std; -namespace mmdeploy::mmseg { +namespace mmdeploy { +namespace mmseg { REGISTER_CODEBASE(MMSegmentation); -} // namespace mmdeploy::mmseg +} + +MMDEPLOY_DEFINE_REGISTRY(mmseg::MMSegmentation); +} // namespace mmdeploy diff --git a/csrc/codebase/mmseg/mmseg.h b/csrc/codebase/mmseg/mmseg.h index 3685c35bf2..9122047836 100644 --- a/csrc/codebase/mmseg/mmseg.h +++ b/csrc/codebase/mmseg/mmseg.h @@ -8,7 +8,8 @@ #include "core/module.h" #include "core/tensor.h" -namespace mmdeploy::mmseg { +namespace mmdeploy { +namespace mmseg { struct SegmentorOutput { Tensor mask; @@ -20,6 +21,9 @@ struct SegmentorOutput { DECLARE_CODEBASE(MMSegmentation, mmseg); -} // namespace mmdeploy::mmseg +} // namespace mmseg + +MMDEPLOY_DECLARE_REGISTRY(mmseg::MMSegmentation); +} // namespace mmdeploy #endif // MMDEPLOY_MMSEG_H diff --git a/csrc/codebase/mmseg/segment.cpp b/csrc/codebase/mmseg/segment.cpp index 48afa9b572..8d5aeef08e 100644 --- a/csrc/codebase/mmseg/segment.cpp +++ b/csrc/codebase/mmseg/segment.cpp @@ -4,7 +4,7 @@ #include "core/tensor.h" #include "core/utils/device_utils.h" #include "core/utils/formatter.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" namespace mmdeploy::mmseg { @@ -15,19 +15,19 @@ class ResizeMask : public MMSegmentation { try { classes_ = cfg["params"]["num_classes"].get(); } catch (const std::exception &e) { - ERROR("no ['params']['num_classes'] is specified in cfg: {}", cfg); + MMDEPLOY_ERROR("no ['params']['num_classes'] is specified in cfg: {}", cfg); throw_exception(eInvalidArgument); } } Result operator()(const Value &preprocess_result, const Value &inference_result) { - DEBUG("preprocess: {}\ninference: {}", preprocess_result, inference_result); + MMDEPLOY_DEBUG("preprocess: {}\ninference: {}", preprocess_result, inference_result); auto mask = inference_result["output"].get(); - DEBUG("tensor.name: {}, tensor.shape: {}, tensor.data_type: {}", mask.name(), mask.shape(), - mask.data_type()); + MMDEPLOY_DEBUG("tensor.name: {}, tensor.shape: {}, tensor.data_type: {}", mask.name(), + mask.shape(), mask.data_type()); if (!(mask.shape().size() == 4 && mask.shape(0) == 1 && mask.shape(1) == 1)) { - ERROR("unsupported `output` tensor, shape: {}", mask.shape()); + MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}", mask.shape()); return Status(eNotSupported); } @@ -40,16 +40,14 @@ class ResizeMask : public MMSegmentation { OUTCOME_TRY(stream_.Wait()); if (mask.data_type() == DataType::kINT64) { // change kINT64 to 2 INT32 - TensorDesc desc{.device = host_tensor.device(), - .data_type = DataType::kINT32, - .shape = {1, 2, height, width}, - .name = host_tensor.name()}; + TensorDesc desc{ + host_tensor.device(), DataType::kINT32, {1, 2, height, width}, host_tensor.name()}; Tensor _host_tensor(desc, mask.buffer()); return MaskResize(_host_tensor, input_height, input_width); } else if (mask.data_type() == DataType::kINT32) { return MaskResize(host_tensor, input_height, input_width); } else { - ERROR("unsupported `output` tensor, dtype: {}", (int)mask.data_type()); + MMDEPLOY_ERROR("unsupported `output` tensor, dtype: {}", (int)mask.data_type()); return Status(eNotSupported); } } diff --git a/csrc/core/CMakeLists.txt b/csrc/core/CMakeLists.txt index ef539a05b2..19b9a64a97 100644 --- a/csrc/core/CMakeLists.txt +++ b/csrc/core/CMakeLists.txt @@ -11,15 +11,14 @@ reliably on all generators, or if a new generator is added in the future that ca projects using it will be stuck. Even if CONFIGURE_DEPENDS works reliably, there is still a cost to perform the check on every rebuild. #]==] -# file(GLOB_RECURSE CORE_SRCS *.cpp) set(SPDLOG_LIB) find_package(spdlog QUIET) if (spdlog_FOUND) message(STATUS "spdlog is found") - set(SPDLOG_LIB $) + set(SPDLOG_LIB spdlog::spdlog) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS device_impl.cpp @@ -32,11 +31,14 @@ set(SRCS operator.cpp status_code.cpp tensor.cpp + registry.cpp utils/device_utils.cpp utils/formatter.cpp utils/stacktrace.cpp) -build_target(${PROJECT_NAME} "${SRCS}") + +mmdeploy_add_library(${PROJECT_NAME} ${SRCS}) target_compile_definitions(${PROJECT_NAME} PUBLIC -DMMDEPLOY_STATUS_USE_SOURCE_LOCATION=1) + target_include_directories(${PROJECT_NAME} PUBLIC $ @@ -45,7 +47,10 @@ target_include_directories(${PROJECT_NAME} $ $ ) -target_link_libraries(${PROJECT_NAME} PUBLIC ${SPDLOG_LIB} stdc++fs) +target_link_libraries(${PROJECT_NAME} PUBLIC ${SPDLOG_LIB}) +if (NOT MSVC) + target_link_libraries(${PROJECT_NAME} PUBLIC stdc++fs) +endif () add_library(mmdeploy::core ALIAS ${PROJECT_NAME}) install(DIRECTORY ${CMAKE_SOURCE_DIR}/csrc/core @@ -57,5 +62,3 @@ install(FILES ${CMAKE_SOURCE_DIR}/third_party/outcome/outcome-experimental.hpp install(DIRECTORY ${CMAKE_SOURCE_DIR}/csrc/experimental DESTINATION include/cpp FILES_MATCHING PATTERN "*.h") - -export_target(${PROJECT_NAME}) diff --git a/csrc/core/device.h b/csrc/core/device.h index 6695ca3807..f337ad4dcd 100644 --- a/csrc/core/device.h +++ b/csrc/core/device.h @@ -10,6 +10,7 @@ #include #include +#include "core/macro.h" #include "core/status_code.h" namespace mmdeploy { @@ -67,7 +68,7 @@ class Device { constexpr explicit Device(int platform_id, int device_id = 0) : platform_id_(platform_id), device_id_(device_id) {} - explicit Device(const char *platform_name, int device_id = 0); + MMDEPLOY_API explicit Device(const char *platform_name, int device_id = 0); constexpr int device_id() const noexcept { return device_id_; } @@ -100,7 +101,7 @@ class Device { enum class MemcpyKind : int { HtoD, DtoH, DtoD }; -class Platform { +class MMDEPLOY_API Platform { public: // throws if not found explicit Platform(const char *platform_name); @@ -133,7 +134,7 @@ Platform GetPlatform(int platform_id); Platform GetPlatform(const char *platform_name); -class Stream { +class MMDEPLOY_API Stream { public: Stream() = default; @@ -187,7 +188,7 @@ T GetNative(Stream &stream, ErrorCode *ec = nullptr) { return reinterpret_cast(stream.GetNative(ec)); } -class Event { +class MMDEPLOY_API Event { public: Event() = default; @@ -226,7 +227,7 @@ T GetNative(Event &event, ErrorCode *ec = nullptr) { return reinterpret_cast(event.GetNative(ec)); } -class Kernel { +class MMDEPLOY_API Kernel { public: Kernel() = default; explicit Kernel(std::shared_ptr impl) : impl_(std::move(impl)) {} @@ -246,7 +247,7 @@ T GetNative(Kernel &kernel, ErrorCode *ec = nullptr) { return reinterpret_cast(kernel.GetNative(ec)); } -class Allocator { +class MMDEPLOY_API Allocator { friend class Access; public: @@ -259,7 +260,7 @@ class Allocator { std::shared_ptr impl_; }; -class Buffer { +class MMDEPLOY_API Buffer { public: Buffer() = default; @@ -304,7 +305,7 @@ T GetNative(const Buffer &buffer, ErrorCode *ec = nullptr) { return reinterpret_cast(buffer.GetNative(ec)); } -class PlatformRegistry { +class MMDEPLOY_API PlatformRegistry { public: using Creator = std::function()>; @@ -332,6 +333,6 @@ class PlatformRegistry { std::vector entries_; }; -PlatformRegistry &gPlatformRegistry(); +MMDEPLOY_API PlatformRegistry &gPlatformRegistry(); } // namespace mmdeploy diff --git a/csrc/core/device_impl.cpp b/csrc/core/device_impl.cpp index 04b772314e..32ed9e104f 100644 --- a/csrc/core/device_impl.cpp +++ b/csrc/core/device_impl.cpp @@ -144,7 +144,7 @@ Stream::Stream(Device device, uint64_t flags) { r.error().throw_exception(); } } else { - ERROR("{}, {}", device.device_id(), device.platform_id()); + MMDEPLOY_ERROR("{}, {}", device.device_id(), device.platform_id()); throw_exception(eInvalidArgument); } } diff --git a/csrc/core/graph.cpp b/csrc/core/graph.cpp index a824630059..524542928e 100644 --- a/csrc/core/graph.cpp +++ b/csrc/core/graph.cpp @@ -3,8 +3,10 @@ #include "core/graph.h" #include "archive/value_archive.h" +#include "core/registry.h" -namespace mmdeploy::graph { +namespace mmdeploy { +namespace graph { TaskGraph::Handle* TaskGraph::Add(TaskFunction fn) { function_.push_back(std::move(fn)); @@ -14,7 +16,8 @@ TaskGraph::Handle* TaskGraph::Add(TaskFunction fn) { TaskGraph::~TaskGraph() { for (int i = 0; i < time_.size(); ++i) { - INFO("node {} ({}): {} ms", i, handle_[i]->name(), static_cast(time_[i]) / count_); + MMDEPLOY_INFO("node {} ({}): {} ms", i, handle_[i]->name(), + static_cast(time_[i]) / count_); } } @@ -75,4 +78,8 @@ std::vector> Context::Execute(Span()>> return graph_->Execute(tasks); } -} // namespace mmdeploy::graph +} // namespace graph + +MMDEPLOY_DEFINE_REGISTRY(graph::Node); + +} // namespace mmdeploy diff --git a/csrc/core/graph.h b/csrc/core/graph.h index 37c1aa189f..d55afe70fa 100644 --- a/csrc/core/graph.h +++ b/csrc/core/graph.h @@ -14,7 +14,9 @@ #include "taskflow/taskflow.hpp" #endif -namespace mmdeploy::graph { +namespace mmdeploy { + +namespace graph { using std::pair; using std::string; @@ -24,7 +26,7 @@ using std::vector; class TaskGraph; class Node; -class Context { +class MMDEPLOY_API Context { public: explicit Context(TaskGraph* graph) : graph_(graph) {} @@ -48,7 +50,7 @@ class Context { TaskGraph* graph_; }; -class TaskGraph { +class MMDEPLOY_API TaskGraph { friend class Context; public: @@ -65,6 +67,10 @@ class TaskGraph { ~TaskGraph(); + TaskGraph() = default; + TaskGraph(const TaskGraph&) = delete; + TaskGraph& operator=(const TaskGraph&) = delete; + Handle* Add(TaskFunction fn); Result Run(Value inputs); @@ -82,7 +88,7 @@ class TaskGraph { int64_t count_{}; }; -class Node { +class MMDEPLOY_API Node { public: virtual ~Node() = default; virtual void Build(TaskGraph& graph) = 0; @@ -96,6 +102,10 @@ class Node { std::vector outputs_; }; -} // namespace mmdeploy::graph +} // namespace graph + +MMDEPLOY_DECLARE_REGISTRY(graph::Node); + +} // namespace mmdeploy #endif // MMDEPLOY_SRC_EXPERIMENTAL_PIPELINE_IR_H_ diff --git a/csrc/core/logger.cpp b/csrc/core/logger.cpp index b858ce785c..a1499cc072 100644 --- a/csrc/core/logger.cpp +++ b/csrc/core/logger.cpp @@ -9,6 +9,9 @@ #include #else #include +#if defined(_MSC_VER) +#include +#endif #endif #endif diff --git a/csrc/core/logger.h b/csrc/core/logger.h index 4f7c2ebb84..ff326c4511 100644 --- a/csrc/core/logger.h +++ b/csrc/core/logger.h @@ -5,11 +5,13 @@ #include +#include "core/macro.h" + namespace mmdeploy { -spdlog::logger *GetLogger(); +MMDEPLOY_API spdlog::logger *GetLogger(); -void SetLogger(spdlog::logger *logger); +MMDEPLOY_API void SetLogger(spdlog::logger *logger); } // namespace mmdeploy @@ -86,18 +88,4 @@ void SetLogger(spdlog::logger *logger); #define MMDEPLOY_CRITICAL(...) (void)0; #endif -#undef CRITICAL -#undef ERROR -#undef WARN -#undef INFO -#undef DEBUG -#undef TRACE - -#define CRITICAL MMDEPLOY_CRITICAL -#define ERROR MMDEPLOY_ERROR -#define WARN MMDEPLOY_WARN -#define INFO MMDEPLOY_INFO -#define DEBUG MMDEPLOY_DEBUG -#define TRACE MMDEPLOY_TRACE - #endif // !CORE_LOG_H diff --git a/csrc/core/macro.h b/csrc/core/macro.h index f9822094e9..6f52f0b5fa 100644 --- a/csrc/core/macro.h +++ b/csrc/core/macro.h @@ -3,34 +3,119 @@ #ifndef MMDEPLOY_SRC_CORE_MARCO_H_ #define MMDEPLOY_SRC_CORE_MARCO_H_ +#ifndef MMDEPLOY_EXPORT #ifdef _MSC_VER -#ifdef SDK_EXPORTS -#define MM_SDK_API __declspec(dllexport) +#define MMDEPLOY_EXPORT __declspec(dllexport) #else -#define MM_SDK_API -#endif -#else /* _MSC_VER */ -#ifdef SDK_EXPORTS -#define MM_SDK_API __attribute__((visibility("default"))) -#else -#define MM_SDK_API +#define MMDEPLOY_EXPORT __attribute__((visibility("default"))) #endif #endif -#ifdef __cplusplus -#define CV_SDK_API extern "C" MM_SDK_API +#ifndef MMDEPLOY_API +#ifdef MMDEPLOY_API_EXPORTS +#define MMDEPLOY_API MMDEPLOY_EXPORT #else -#define CV_SDK_API MM_SDK_API +#define MMDEPLOY_API +#endif #endif -#define MMDEPLOY_CONCATENATE_IMPL(s1, s2) s1##s2 -#define MMDEPLOY_CONCATENATE(s1, s2) MMDEPLOY_CONCATENATE_IMPL(s1, s2) +#define _MMDEPLOY_PP_CONCAT_IMPL(s1, s2) s1##s2 +#define MMDEPLOY_PP_CONCAT(s1, s2) _MMDEPLOY_PP_CONCAT_IMPL(s1, s2) // ! Be aware of ODR violation when using __COUNTER__ #ifdef __COUNTER__ -#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_CONCATENATE(str, __COUNTER__) +#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_PP_CONCAT(str, __COUNTER__) #else -#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_CONCATENATE(str, __LINE__) +#define MMDEPLOY_ANONYMOUS_VARIABLE(str) MMDEPLOY_PP_CONCAT(str, __LINE__) #endif +#define MMDEPLOY_PP_NARG(...) _MMDEPLOY_PP_NARG(__VA_ARGS__, _MMDEPLOY_PP_RESQ_N()) + +#define _MMDEPLOY_PP_NARG(...) _MMDEPLOY_PP_ARG_N(__VA_ARGS__) + +#define _MMDEPLOY_PP_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \ + _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, \ + _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, \ + _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, \ + _59, _60, _61, _62, _63, N, ...) \ + N + +#define _MMDEPLOY_PP_RESQ_N() \ + 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \ + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, \ + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 + +#define MMDEPLOY_PP_MAP_1(f, x) f(x) +#define MMDEPLOY_PP_MAP_2(f, x, ...) f(x), MMDEPLOY_PP_MAP_1(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_3(f, x, ...) f(x), MMDEPLOY_PP_MAP_2(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_4(f, x, ...) f(x), MMDEPLOY_PP_MAP_3(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_5(f, x, ...) f(x), MMDEPLOY_PP_MAP_4(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_6(f, x, ...) f(x), MMDEPLOY_PP_MAP_5(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_7(f, x, ...) f(x), MMDEPLOY_PP_MAP_6(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_8(f, x, ...) f(x), MMDEPLOY_PP_MAP_7(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_9(f, x, ...) f(x), MMDEPLOY_PP_MAP_8(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_10(f, x, ...) f(x), MMDEPLOY_PP_MAP_9(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_11(f, x, ...) f(x), MMDEPLOY_PP_MAP_10(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_12(f, x, ...) f(x), MMDEPLOY_PP_MAP_11(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_13(f, x, ...) f(x), MMDEPLOY_PP_MAP_12(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_14(f, x, ...) f(x), MMDEPLOY_PP_MAP_13(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_15(f, x, ...) f(x), MMDEPLOY_PP_MAP_14(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_16(f, x, ...) f(x), MMDEPLOY_PP_MAP_15(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_17(f, x, ...) f(x), MMDEPLOY_PP_MAP_16(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_18(f, x, ...) f(x), MMDEPLOY_PP_MAP_17(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_19(f, x, ...) f(x), MMDEPLOY_PP_MAP_18(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_20(f, x, ...) f(x), MMDEPLOY_PP_MAP_19(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_21(f, x, ...) f(x), MMDEPLOY_PP_MAP_20(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_22(f, x, ...) f(x), MMDEPLOY_PP_MAP_21(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_23(f, x, ...) f(x), MMDEPLOY_PP_MAP_22(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_24(f, x, ...) f(x), MMDEPLOY_PP_MAP_23(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_25(f, x, ...) f(x), MMDEPLOY_PP_MAP_24(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_26(f, x, ...) f(x), MMDEPLOY_PP_MAP_25(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_27(f, x, ...) f(x), MMDEPLOY_PP_MAP_26(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_28(f, x, ...) f(x), MMDEPLOY_PP_MAP_27(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_29(f, x, ...) f(x), MMDEPLOY_PP_MAP_28(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_30(f, x, ...) f(x), MMDEPLOY_PP_MAP_29(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_31(f, x, ...) f(x), MMDEPLOY_PP_MAP_30(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_32(f, x, ...) f(x), MMDEPLOY_PP_MAP_31(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_33(f, x, ...) f(x), MMDEPLOY_PP_MAP_32(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_34(f, x, ...) f(x), MMDEPLOY_PP_MAP_33(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_35(f, x, ...) f(x), MMDEPLOY_PP_MAP_34(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_36(f, x, ...) f(x), MMDEPLOY_PP_MAP_35(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_37(f, x, ...) f(x), MMDEPLOY_PP_MAP_36(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_38(f, x, ...) f(x), MMDEPLOY_PP_MAP_37(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_39(f, x, ...) f(x), MMDEPLOY_PP_MAP_38(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_40(f, x, ...) f(x), MMDEPLOY_PP_MAP_39(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_41(f, x, ...) f(x), MMDEPLOY_PP_MAP_40(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_42(f, x, ...) f(x), MMDEPLOY_PP_MAP_41(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_43(f, x, ...) f(x), MMDEPLOY_PP_MAP_42(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_44(f, x, ...) f(x), MMDEPLOY_PP_MAP_43(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_45(f, x, ...) f(x), MMDEPLOY_PP_MAP_44(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_46(f, x, ...) f(x), MMDEPLOY_PP_MAP_45(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_47(f, x, ...) f(x), MMDEPLOY_PP_MAP_46(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_48(f, x, ...) f(x), MMDEPLOY_PP_MAP_47(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_49(f, x, ...) f(x), MMDEPLOY_PP_MAP_48(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_50(f, x, ...) f(x), MMDEPLOY_PP_MAP_49(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_51(f, x, ...) f(x), MMDEPLOY_PP_MAP_50(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_52(f, x, ...) f(x), MMDEPLOY_PP_MAP_51(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_53(f, x, ...) f(x), MMDEPLOY_PP_MAP_52(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_54(f, x, ...) f(x), MMDEPLOY_PP_MAP_53(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_55(f, x, ...) f(x), MMDEPLOY_PP_MAP_54(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_56(f, x, ...) f(x), MMDEPLOY_PP_MAP_55(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_57(f, x, ...) f(x), MMDEPLOY_PP_MAP_56(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_58(f, x, ...) f(x), MMDEPLOY_PP_MAP_57(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_59(f, x, ...) f(x), MMDEPLOY_PP_MAP_58(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_60(f, x, ...) f(x), MMDEPLOY_PP_MAP_59(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_61(f, x, ...) f(x), MMDEPLOY_PP_MAP_60(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_62(f, x, ...) f(x), MMDEPLOY_PP_MAP_61(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_63(f, x, ...) f(x), MMDEPLOY_PP_MAP_62(f, __VA_ARGS__) +#define MMDEPLOY_PP_MAP_64(f, x, ...) f(x), MMDEPLOY_PP_MAP_63(f, __VA_ARGS__) + +#define MMDEPLOY_PP_MAP(f, ...) \ + _MMDEPLOY_PP_MAP_IMPL1(f, MMDEPLOY_PP_NARG(__VA_ARGS__), __VA_ARGS__) + +#define _MMDEPLOY_PP_MAP_IMPL1(f, n, ...) \ + _MMDEPLOY_PP_MAP_IMPL2(f, MMDEPLOY_PP_CONCAT(MMDEPLOY_PP_MAP_, n), __VA_ARGS__) + +#define _MMDEPLOY_PP_MAP_IMPL2(f, M_, ...) M_(f, __VA_ARGS__) + #endif // MMDEPLOY_SRC_CORE_MARCO_H_ diff --git a/csrc/core/mat.h b/csrc/core/mat.h index d6ec4ba206..4702df7e9e 100644 --- a/csrc/core/mat.h +++ b/csrc/core/mat.h @@ -11,7 +11,7 @@ namespace mmdeploy { -class Mat final { +class MMDEPLOY_API Mat final { public: Mat() = default; diff --git a/csrc/core/model.cpp b/csrc/core/model.cpp index 080504a2a6..d4b6361a91 100644 --- a/csrc/core/model.cpp +++ b/csrc/core/model.cpp @@ -4,14 +4,7 @@ #include "core/logger.h" #include "core/model_impl.h" - -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif +#include "core/utils/filesystem.h" using namespace std; @@ -19,7 +12,7 @@ namespace mmdeploy { Model::Model(const std::string& model_path) { if (auto r = Model::Init(model_path); !r) { - ERROR("load model failed. Its file path is '{}'", model_path); + MMDEPLOY_ERROR("load model failed. Its file path is '{}'", model_path); r.error().throw_exception(); } } @@ -28,7 +21,7 @@ Model::Model(const void* buffer, size_t size) { Init(buffer, size).value(); } Result Model::Init(const std::string& model_path) { if (!fs::exists(model_path)) { - ERROR("'{}' doesn't exist", model_path); + MMDEPLOY_ERROR("'{}' doesn't exist", model_path); return Status(eFileNotExist); } @@ -42,13 +35,13 @@ Result Model::Init(const std::string& model_path) { } OUTCOME_TRY(auto meta, impl->ReadMeta()); - INFO("{} successfully load sdk model {}", entry.name, model_path); + MMDEPLOY_INFO("{} successfully load sdk model {}", entry.name, model_path); impl_ = std::move(impl); meta_ = std::move(meta); return success(); } - ERROR("no ModelImpl can read sdk_model {}", model_path); + MMDEPLOY_ERROR("no ModelImpl can read sdk_model {}", model_path); return Status(eNotSupported); } @@ -63,13 +56,13 @@ Result Model::Init(const void* buffer, size_t size) { } OUTCOME_TRY(auto meta, impl->ReadMeta()); - INFO("{} successfully load sdk model {}", entry.name); + MMDEPLOY_INFO("{} successfully load sdk model {}", entry.name); impl_ = std::move(impl); meta_ = std::move(meta); return success(); } - ERROR("no ModelImpl can parse buffer"); + MMDEPLOY_ERROR("no ModelImpl can parse buffer"); return Status(eNotSupported); } @@ -79,7 +72,7 @@ Result Model::GetModelConfig(const std::string& name) const { return info; } } - ERROR("cannot find model '{}' in meta file", name); + MMDEPLOY_ERROR("cannot find model '{}' in meta file", name); return Status(eEntryNotFound); } @@ -87,14 +80,19 @@ Result Model::ReadFile(const std::string& file_path) noexcept { return impl_->ReadFile(file_path); } +ModelRegistry& ModelRegistry::Get() { + static ModelRegistry inst; + return inst; +} + Result ModelRegistry::Register(const std::string& name, Creator creator) { for (auto& entry : entries_) { if (entry.name == name) { - ERROR("{} is already registered", name); + MMDEPLOY_ERROR("{} is already registered", name); return Status(eFail); } } - INFO("Register '{}'", name); + MMDEPLOY_INFO("Register '{}'", name); entries_.push_back({name, std::move(creator)}); return success(); } diff --git a/csrc/core/model.h b/csrc/core/model.h index a9ce11eff3..5193128c31 100644 --- a/csrc/core/model.h +++ b/csrc/core/model.h @@ -39,7 +39,7 @@ class ModelImpl; * in case of faster-rcnn model, it splits into two models, one is rpn and the * other is cnn for roi classification. */ -class Model { +class MMDEPLOY_API Model { public: Model() = default; @@ -115,7 +115,7 @@ class Model { * }; * ANewModelImplRegister a_new_model_impl_register; */ -class ModelRegistry { +class MMDEPLOY_API ModelRegistry { public: using Creator = std::function()>; struct Entry { @@ -126,10 +126,7 @@ class ModelRegistry { /** * @brief Return global instance of `ModelRegistry` */ - static ModelRegistry& Get() { - static ModelRegistry inst; - return inst; - } + static ModelRegistry& Get(); /** * @brief Register an sdk model format denoted by an specified `ModelImpl` diff --git a/csrc/core/module.cpp b/csrc/core/module.cpp index d21ea31bdc..28857f4206 100644 --- a/csrc/core/module.cpp +++ b/csrc/core/module.cpp @@ -6,7 +6,9 @@ namespace mmdeploy { -template class Registry; -template class Creator; +// template class Registry; +// template class Creator; + +MMDEPLOY_DEFINE_REGISTRY(Module); } // namespace mmdeploy diff --git a/csrc/core/module.h b/csrc/core/module.h index 96d0c5cffa..6debc6a6dd 100644 --- a/csrc/core/module.h +++ b/csrc/core/module.h @@ -4,17 +4,20 @@ #define MMDEPLOY_SRC_CORE_MODULE_H_ #include "core/macro.h" +#include "core/registry.h" #include "core/status_code.h" #include "core/value.h" namespace mmdeploy { -class MM_SDK_API Module { +class MMDEPLOY_API Module { public: virtual ~Module() = default; virtual Result Process(const Value& args) = 0; }; +MMDEPLOY_DECLARE_REGISTRY(Module); + } // namespace mmdeploy #endif // MMDEPLOY_SRC_CORE_MODULE_H_ diff --git a/csrc/core/net.cpp b/csrc/core/net.cpp index f548042507..9f057dc88e 100644 --- a/csrc/core/net.cpp +++ b/csrc/core/net.cpp @@ -6,7 +6,9 @@ namespace mmdeploy { -template class Registry; -template class Creator; +// template class Registry; +// template class Creator; + +MMDEPLOY_DEFINE_REGISTRY(Net); } // namespace mmdeploy diff --git a/csrc/core/net.h b/csrc/core/net.h index b96551cf90..c49a7ceeb7 100644 --- a/csrc/core/net.h +++ b/csrc/core/net.h @@ -22,6 +22,8 @@ class Net { virtual Result ForwardAsync(Event* event) = 0; }; +MMDEPLOY_DECLARE_REGISTRY(Net); + } // namespace mmdeploy #endif // MMDEPLOY_SRC_CORE_NET_H_ diff --git a/csrc/core/operator.cpp b/csrc/core/operator.cpp index c40bfee508..e4e536b2c5 100644 --- a/csrc/core/operator.cpp +++ b/csrc/core/operator.cpp @@ -2,6 +2,8 @@ #include "operator.h" +#include + namespace mmdeploy::graph { Result Gather(const Value::Array& array, const vector& idxs, Value::Array& output) { diff --git a/csrc/core/operator.h b/csrc/core/operator.h index 0936fbfaf3..c71c62f051 100644 --- a/csrc/core/operator.h +++ b/csrc/core/operator.h @@ -11,13 +11,18 @@ using std::string; using std::tuple; using std::vector; -Result Gather(const Value::Array& array, const vector& idxs, Value::Array& output); -Result Gather(Value::Array&& array, const vector& idxs, Value::Array& output); -Result Gather(const Value::Object& object, const vector& keys, - Value::Array& output); -Result Gather(Value::Object&& object, const vector& keys, Value::Array& output); -Result Scatter(Value::Array array, const vector& idxs, Value::Array& output); -Result Scatter(Value::Array array, const vector& keys, Value::Object& output); +MMDEPLOY_API Result Gather(const Value::Array& array, const vector& idxs, + Value::Array& output); +MMDEPLOY_API Result Gather(Value::Array&& array, const vector& idxs, + Value::Array& output); +MMDEPLOY_API Result Gather(const Value::Object& object, const vector& keys, + Value::Array& output); +MMDEPLOY_API Result Gather(Value::Object&& object, const vector& keys, + Value::Array& output); +MMDEPLOY_API Result Scatter(Value::Array array, const vector& idxs, + Value::Array& output); +MMDEPLOY_API Result Scatter(Value::Array array, const vector& keys, + Value::Object& output); inline Result Gather(const Value::Array& array, const vector& idxs) { Value::Array output; @@ -95,13 +100,13 @@ Result Unflatten(V&& input, const vector& idxs) { } // object of arrays -> array of objects, all arrays must be of same length -Result DistribOA(const Value& oa); +MMDEPLOY_API Result DistribOA(const Value& oa); // array of objects -> object of arrays, all objects must be isomorphic -Result DistribAO(const Value& ao); +MMDEPLOY_API Result DistribAO(const Value& ao); // array of arrays -> array of arrays, this is equivalent to transpose -Result DistribAA(const Value& a); +MMDEPLOY_API Result DistribAA(const Value& a); } // namespace mmdeploy::graph diff --git a/csrc/core/registry.cpp b/csrc/core/registry.cpp new file mode 100644 index 0000000000..d0d543ee5d --- /dev/null +++ b/csrc/core/registry.cpp @@ -0,0 +1,46 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#include "core/registry.h" + +namespace mmdeploy { + +Registry::Registry() = default; + +Registry::~Registry() = default; + +bool Registry::AddCreator(Creator &creator) { + MMDEPLOY_DEBUG("Adding creator: {}", creator.GetName()); + auto key = creator.GetName(); + if (entries_.find(key) == entries_.end()) { + entries_.insert(std::make_pair(key, &creator)); + return true; + } + + for (auto iter = entries_.lower_bound(key); iter != entries_.upper_bound(key); ++iter) { + if (iter->second->GetVersion() == creator.GetVersion()) { + return false; + } + } + + entries_.insert(std::make_pair(key, &creator)); + return true; +} + +Creator *Registry::GetCreator(const std::string &type, int version) { + auto iter = entries_.find(type); + if (iter == entries_.end()) { + return nullptr; + } + if (0 == version) { + return iter->second; + } + + for (auto iter = entries_.lower_bound(type); iter != entries_.upper_bound(type); ++iter) { + if (iter->second->GetVersion() == version) { + return iter->second; + } + } + return nullptr; +} + +} // namespace mmdeploy diff --git a/csrc/core/registry.h b/csrc/core/registry.h index 03d5e3f234..bde878a35c 100644 --- a/csrc/core/registry.h +++ b/csrc/core/registry.h @@ -9,6 +9,7 @@ #include #include +#include "macro.h" #include "value.h" namespace mmdeploy { @@ -30,73 +31,58 @@ using get_return_type_t = typename get_return_type::type; } // namespace detail +template +class Creator; + +template <> +class Creator { + public: + virtual ~Creator() = default; + virtual const char *GetName() const = 0; + virtual int GetVersion() const { return 0; } +}; + template -class Creator { +class Creator : public Creator { public: using ReturnType = detail::get_return_type_t; public: - virtual ~Creator() = default; - virtual const char *GetName() const = 0; - virtual int GetVersion() const = 0; virtual ReturnType Create(const Value &args) = 0; }; -template -class Registry { +template +class Registry; + +template <> +class MMDEPLOY_API Registry { public: - static Registry &Get() { - static Registry registry; - return registry; - } + Registry(); - bool AddCreator(Creator &creator) { - auto key = creator.GetName(); - if (entries_.find(key) == entries_.end()) { - entries_.insert(std::make_pair(key, &creator)); - return true; - } - - for (auto iter = entries_.lower_bound(key); iter != entries_.upper_bound(key); ++iter) { - if (iter->second->GetVersion() == creator.GetVersion()) { - return false; - } - } - - entries_.insert(std::make_pair(key, &creator)); - return true; - } + ~Registry(); + + bool AddCreator(Creator &creator); + + Creator *GetCreator(const std::string &type, int version = 0); + + private: + std::multimap *> entries_; +}; + +template +class Registry : public Registry { + public: + bool AddCreator(Creator &creator) { return Registry::AddCreator(creator); } Creator *GetCreator(const std::string &type, int version = 0) { - auto iter = entries_.find(type); - if (iter == entries_.end()) { - return nullptr; - } - if (0 == version) { - return iter->second; - } - - for (auto iter = entries_.lower_bound(type); iter != entries_.upper_bound(type); ++iter) { - if (iter->second->GetVersion() == version) { - return iter->second; - } - } - return nullptr; + auto creator = Registry::GetCreator(type, version); + return static_cast *>(creator); } - std::vector ListCreators() { - std::vector keys; - for (const auto &[key, _] : entries_) { - keys.push_back(key); - } - return keys; - } + static Registry &Get(); private: Registry() = default; - - private: - std::multimap *> entries_; }; template @@ -110,6 +96,17 @@ class Registerer { } // namespace mmdeploy +#define MMDEPLOY_DECLARE_REGISTRY(EntryType) \ + template <> \ + Registry &Registry::Get(); + +#define MMDEPLOY_DEFINE_REGISTRY(EntryType) \ + template <> \ + MMDEPLOY_EXPORT Registry &Registry::Get() { \ + static Registry v; \ + return v; \ + } + #define REGISTER_MODULE(EntryType, CreatorType) \ static ::mmdeploy::Registerer g_register_##EntryType##_##CreatorType{}; diff --git a/csrc/core/serialization.h b/csrc/core/serialization.h index aeea43bb50..6a37d8c3ff 100644 --- a/csrc/core/serialization.h +++ b/csrc/core/serialization.h @@ -8,47 +8,14 @@ #include #include +#include "core/macro.h" #include "core/status_code.h" #include "mpl/detected.h" #include "mpl/type_traits.h" namespace mmdeploy { -#define _MMDEPLOY_NTH_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \ - N, ...) \ - N - -#define _MMDEPLOY_ARCHIVE_1(x) MMDEPLOY_NVP(x) -#define _MMDEPLOY_ARCHIVE_2(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_1(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_3(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_2(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_4(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_3(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_5(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_4(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_6(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_5(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_7(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_6(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_8(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_7(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_9(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_8(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_10(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_9(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_11(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_10(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_12(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_11(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_13(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_12(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_14(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_13(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_15(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_14(__VA_ARGS__) -#define _MMDEPLOY_ARCHIVE_16(x, ...) MMDEPLOY_NVP(x), _MMDEPLOY_ARCHIVE_15(__VA_ARGS__) - -#define _MMDEPLOY_ARCHIVE_DISPATCH(...) \ - _MMDEPLOY_NTH_ARG(__VA_ARGS__, _MMDEPLOY_ARCHIVE_16(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_15(__VA_ARGS__), _MMDEPLOY_ARCHIVE_14(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_13(__VA_ARGS__), _MMDEPLOY_ARCHIVE_12(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_11(__VA_ARGS__), _MMDEPLOY_ARCHIVE_10(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_9(__VA_ARGS__), _MMDEPLOY_ARCHIVE_8(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_7(__VA_ARGS__), _MMDEPLOY_ARCHIVE_6(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_5(__VA_ARGS__), _MMDEPLOY_ARCHIVE_4(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_3(__VA_ARGS__), _MMDEPLOY_ARCHIVE_2(__VA_ARGS__), \ - _MMDEPLOY_ARCHIVE_1(__VA_ARGS__)) - -#define MMDEPLOY_ARCHIVE_NVP(archive, ...) archive(_MMDEPLOY_ARCHIVE_DISPATCH(__VA_ARGS__)) - -#define MMDEPLOY_ARCHIVE(archive, ...) archive(__VA_ARGS__) +#define MMDEPLOY_ARCHIVE_NVP(archive, ...) archive(MMDEPLOY_PP_MAP(MMDEPLOY_NVP, __VA_ARGS__)) #define MMDEPLOY_ARCHIVE_MEMBERS(...) \ template \ @@ -273,32 +240,26 @@ void load(Archive &&archive, T &&object) { } } -template -using save_t = decltype(save(std::declval(), std::declval())); - struct save_fn { template - auto operator()(Archive &&a, T &&v) const -> save_t { + auto operator()(Archive &&a, T &&v) const + -> decltype(save(std::forward(a), std::forward(v))) { return save(std::forward(a), std::forward(v)); } }; -template -using load_t = decltype(load(std::declval(), std::declval())); - struct load_fn { template - auto operator()(Archive &&a, T &&v) const -> load_t { + auto operator()(Archive &&a, T &&v) const + -> decltype(load(std::forward(a), std::forward(v))) { return load(std::forward(a), std::forward(v)); } }; -template -using serialize_t = decltype(serialize(std::declval(), std::declval())); - struct serialize_fn { template - auto operator()(Archive &&a, T &&v) const -> serialize_t { + auto operator()(Archive &&a, T &&v) const + -> decltype(serialize(std::forward(a), std::forward(v))) { return serialize(std::forward(a), std::forward(v)); } }; @@ -319,15 +280,18 @@ struct adl_serializer; template struct adl_serializer { template - static auto save(Archive &&a, T &&v) -> detail::save_t { + static auto save(Archive &&a, T &&v) + -> decltype(::mmdeploy::save(std::forward(a), std::forward(v))) { ::mmdeploy::save(std::forward(a), std::forward(v)); } template - static auto load(Archive &&a, T &&v) -> detail::load_t { + static auto load(Archive &&a, T &&v) + -> decltype(::mmdeploy::load(std::forward(a), std::forward(v))) { ::mmdeploy::load(std::forward(a), std::forward(v)); } template - static auto serialize(Archive &&a, T &&v) -> detail::serialize_t { + static auto serialize(Archive &&a, T &&v) + -> decltype(::mmdeploy::serialize(std::forward(a), std::forward(v))) { ::mmdeploy::serialize(std::forward(a), std::forward(v)); } }; diff --git a/csrc/core/status_code.h b/csrc/core/status_code.h index 3b719e677a..f84ce8a886 100644 --- a/csrc/core/status_code.h +++ b/csrc/core/status_code.h @@ -5,6 +5,7 @@ #include +#include "core/macro.h" #include "outcome-experimental.hpp" #if MMDEPLOY_STATUS_USE_SOURCE_LOCATION #include "utils/source_location.h" @@ -71,7 +72,7 @@ inline const char *to_string(ErrorCode code) { } } -struct Status { +struct MMDEPLOY_API Status { ErrorCode ec{}; Status() = default; SYSTEM_ERROR2_NAMESPACE::status_code_domain::string_ref message() const; @@ -94,7 +95,7 @@ class StatusDomain; using StatusCode = SYSTEM_ERROR2_NAMESPACE::status_code; -class StatusDomain : public SYSTEM_ERROR2_NAMESPACE::status_code_domain { +class MMDEPLOY_API StatusDomain : public SYSTEM_ERROR2_NAMESPACE::status_code_domain { using _base = status_code_domain; public: diff --git a/csrc/core/tensor.cpp b/csrc/core/tensor.cpp index aed5d6c3e0..6a040ce046 100644 --- a/csrc/core/tensor.cpp +++ b/csrc/core/tensor.cpp @@ -87,16 +87,16 @@ void Tensor::Reshape(const TensorShape& shape) { Result Tensor::CopyFrom(const Tensor& tensor, Stream stream) { if (desc_.shape.empty() || tensor.desc().shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } if (!(desc_.shape == tensor.desc().shape)) { - ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), - shape_string(tensor.desc().shape)); + MMDEPLOY_ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), + shape_string(tensor.desc().shape)); return Status(eShapeMismatch); } if (desc_.data_type != tensor.desc().data_type) { - ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); + MMDEPLOY_ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); return Status(eShapeMismatch); } Allocate(); @@ -112,17 +112,17 @@ Result Tensor::CopyFrom(const Tensor& tensor, Stream stream) { Result Tensor::CopyTo(Tensor& tensor, Stream stream) const { if (desc_.shape.empty() || tensor.desc().shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } if (!(desc_.shape == tensor.desc().shape)) { - ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), - shape_string(tensor.desc().shape)); + MMDEPLOY_ERROR("mismatched shape {} vs {}", shape_string(desc_.shape), + shape_string(tensor.desc().shape)); return Status(eShapeMismatch); } if (desc_.data_type != tensor.desc().data_type) { - ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); + MMDEPLOY_ERROR("mismatched data type {} vs {}", desc_.data_type, tensor.desc().data_type); return Status(eShapeMismatch); } tensor.Allocate(); @@ -140,7 +140,7 @@ Result Tensor::CopyFrom(void* host_ptr, Stream stream) { return Status(eInvalidArgument); } if (desc_.shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } Allocate(); @@ -157,7 +157,7 @@ Result Tensor::CopyTo(void* host_ptr, Stream stream) const { return Status(eInvalidArgument); } if (desc_.shape.empty()) { - ERROR("uninitialized tensor"); + MMDEPLOY_ERROR("uninitialized tensor"); return Status(eInvalidArgument); } if (!stream) { diff --git a/csrc/core/tensor.h b/csrc/core/tensor.h index 264c6d7b84..78be82ad39 100644 --- a/csrc/core/tensor.h +++ b/csrc/core/tensor.h @@ -19,7 +19,7 @@ struct TensorDesc { std::string name; }; -class Tensor { +class MMDEPLOY_API Tensor { public: Tensor() = default; Tensor(const Tensor&) = default; diff --git a/csrc/core/utils/device_utils.h b/csrc/core/utils/device_utils.h index 81621efecc..65422664e8 100644 --- a/csrc/core/utils/device_utils.h +++ b/csrc/core/utils/device_utils.h @@ -14,7 +14,8 @@ namespace mmdeploy { * @param stream * @return */ -Result MakeAvailableOnDevice(const Mat& src, const Device& device, Stream& stream); +MMDEPLOY_API Result MakeAvailableOnDevice(const Mat& src, const Device& device, + Stream& stream); /** * @@ -23,7 +24,8 @@ Result MakeAvailableOnDevice(const Mat& src, const Device& device, Stream& * @param stream * @return */ -Result MakeAvailableOnDevice(const Tensor& src, const Device& device, Stream& stream); +MMDEPLOY_API Result MakeAvailableOnDevice(const Tensor& src, const Device& device, + Stream& stream); } // namespace mmdeploy #endif // MMDEPLOY_TRANSFORM_UTILS_H diff --git a/csrc/core/utils/filesystem.h b/csrc/core/utils/filesystem.h new file mode 100644 index 0000000000..7aca6a8d8e --- /dev/null +++ b/csrc/core/utils/filesystem.h @@ -0,0 +1,15 @@ +// Copyright (c) OpenMMLab. All rights reserved. + +#ifndef MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ +#define MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ + +// TODO: what about clang? +#if __GNUC__ >= 8 || _MSC_VER +#include +namespace fs = std::filesystem; +#else +#include +namespace fs = std::experimental::filesystem; +#endif + +#endif // MMDEPLOY_CSRC_CORE_UTILS_FILESYSTEM_H_ diff --git a/csrc/core/utils/formatter.h b/csrc/core/utils/formatter.h index 14075b3866..af28f8c9c0 100644 --- a/csrc/core/utils/formatter.h +++ b/csrc/core/utils/formatter.h @@ -13,7 +13,7 @@ namespace mmdeploy { class Value; -std::string format_value(const Value& value); +MMDEPLOY_API std::string format_value(const Value& value); } // namespace mmdeploy diff --git a/csrc/core/utils/source_location.h b/csrc/core/utils/source_location.h index b7362cc12b..f0d579b76b 100644 --- a/csrc/core/utils/source_location.h +++ b/csrc/core/utils/source_location.h @@ -3,7 +3,7 @@ #ifndef MMDEPLOY_SRC_UTILS_SOURCE_LOCATION_H_ #define MMDEPLOY_SRC_UTILS_SOURCE_LOCATION_H_ -#if __has_include() +#if __has_include() && !_MSC_VER #include namespace mmdeploy { using SourceLocation = std::source_location; diff --git a/csrc/core/utils/stacktrace.h b/csrc/core/utils/stacktrace.h index 1c1cacbb7d..53b1a44b99 100644 --- a/csrc/core/utils/stacktrace.h +++ b/csrc/core/utils/stacktrace.h @@ -4,6 +4,7 @@ #define MMDEPLOY_SRC_CORE_STACKTRACE_H_ #include +#include namespace mmdeploy { diff --git a/csrc/core/value.h b/csrc/core/value.h index b73fba4302..3241330565 100644 --- a/csrc/core/value.h +++ b/csrc/core/value.h @@ -650,22 +650,22 @@ class Value { template bool contains(Key&& key) const { - return _unwrap().template _contains(std::forward(key)); + return _unwrap()._contains(std::forward(key)); } template iterator find(Key&& key) { - return _unwrap().template _find(std::forward(key)); + return _unwrap()._find(std::forward(key)); } template const_iterator find(Key&& key) const { - return _unwrap().template _find(std::forward(key)); + return _unwrap()._find(std::forward(key)); } template T value(const typename Object::key_type& key, const T& default_value) const { - return _unwrap().template _value(key, default_value); + return _unwrap()._value(key, default_value); } iterator begin() { return _unwrap()._begin(); } diff --git a/csrc/device/cpu/CMakeLists.txt b/csrc/device/cpu/CMakeLists.txt index 226d0894bf..f7e7c46117 100644 --- a/csrc/device/cpu/CMakeLists.txt +++ b/csrc/device/cpu/CMakeLists.txt @@ -2,10 +2,14 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_cpu_device) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) file(GLOB_RECURSE SRCS "*.cpp") -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PUBLIC pthread PRIVATE mmdeploy::core) + +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") + +set(THREADS_PREFER_PTHREAD_FLAG ON) +find_package(Threads REQUIRED) +target_link_libraries(${PROJECT_NAME} PRIVATE Threads::Threads) + add_library(mmdeploy::device::cpu ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/device/cuda/CMakeLists.txt b/csrc/device/cuda/CMakeLists.txt index 6a36e513bd..1ac67bb8b8 100644 --- a/csrc/device/cuda/CMakeLists.txt +++ b/csrc/device/cuda/CMakeLists.txt @@ -9,17 +9,13 @@ if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.18.0") cmake_policy(SET CMP0104 OLD) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -set_targets(${PROJECT_NAME} CUDA_DEVICE_OBJ CUDA_DEVICE_STATIC CUDA_DEVICE_SHARED) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS cuda_device.cpp cuda_builtin_kernels.cu) -build_target(${PROJECT_NAME} "${SRCS}") +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_include_directories(${PROJECT_NAME} PUBLIC ${CUDA_INCLUDE_DIRS}) target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64) -target_link_libraries(${PROJECT_NAME} - PRIVATE mmdeploy::core - PUBLIC cudart cuda) +target_link_libraries(${PROJECT_NAME} PRIVATE cudart cuda) add_library(mmdeploy::device::cuda ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/device/cuda/buddy_allocator.h b/csrc/device/cuda/buddy_allocator.h index f94ec1c046..3e26a0edff 100644 --- a/csrc/device/cuda/buddy_allocator.h +++ b/csrc/device/cuda/buddy_allocator.h @@ -25,7 +25,7 @@ class BuddyAllocator { block_count_ = size / block_size_; if (!IsPowerOfTwo(block_count_)) { block_count_ = RoundToPowerOfTwo(block_count_); - WARN("Rounding up block_count to next power of 2 {}", block_count_); + MMDEPLOY_WARN("Rounding up block_count to next power of 2 {}", block_count_); } base_ = LogPowerOfTwo(block_count_); size_ = block_size_ * block_count_; @@ -34,17 +34,18 @@ class BuddyAllocator { free_.resize(base_ + 1); Build(1, 0); Add(1, 0); - ERROR("size = {}, block_size = {}, block_count = {}", size_, block_size_, block_count_); + MMDEPLOY_ERROR("size = {}, block_size = {}, block_count = {}", size_, block_size_, + block_count_); size = size_; for (int i = 0; i <= base_; ++i) { - ERROR("level {}, size = {}", i, size); + MMDEPLOY_ERROR("level {}, size = {}", i, size); size /= 2; } } ~BuddyAllocator() { for (int i = 0; i < free_.size(); ++i) { - ERROR("free_[{}].size(): {}", i, free_[i].size()); + MMDEPLOY_ERROR("free_[{}].size(): {}", i, free_[i].size()); } gDefaultAllocator().Deallocate(memory_, size_); } @@ -62,7 +63,7 @@ class BuddyAllocator { } } if (level < 0) { - WARN("failed to allocate memory size = {} bytes", n); + MMDEPLOY_WARN("failed to allocate memory size = {} bytes", n); return nullptr; } for (; level < n_level; ++level) { @@ -80,7 +81,7 @@ class BuddyAllocator { std::lock_guard lock{mutex_}; auto offset = static_cast(p) - static_cast(memory_); if (offset < 0 || offset % block_size_) { - ERROR("invalid address: {}", p); + MMDEPLOY_ERROR("invalid address: {}", p); } offset /= static_cast(block_size_); auto level = GetLevel(n); diff --git a/csrc/device/cuda/cuda_builtin_kernels.cu b/csrc/device/cuda/cuda_builtin_kernels.cu index c2cf5460cf..463da81369 100644 --- a/csrc/device/cuda/cuda_builtin_kernels.cu +++ b/csrc/device/cuda/cuda_builtin_kernels.cu @@ -1,5 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include + namespace mmdeploy { namespace cuda { @@ -17,8 +19,8 @@ __global__ void FillKernel(void* dst, size_t dst_size, const void* pattern, size int Fill(void* dst, size_t dst_size, const void* pattern, size_t pattern_size, cudaStream_t stream) { - const uint n_threads = 256; - const uint n_blocks = (dst_size + n_threads - 1) / n_threads; + const unsigned int n_threads = 256; + const unsigned int n_blocks = (dst_size + n_threads - 1) / n_threads; FillKernel<<>>(dst, dst_size, pattern, pattern_size); diff --git a/csrc/device/cuda/cuda_device.cpp b/csrc/device/cuda/cuda_device.cpp index 768a267d90..9825182e9e 100644 --- a/csrc/device/cuda/cuda_device.cpp +++ b/csrc/device/cuda/cuda_device.cpp @@ -79,9 +79,9 @@ Allocator CreateDefaultAllocator() { using namespace device_allocator; AllocatorImplPtr allocator = std::make_shared(); allocator = std::make_shared(allocator, "cudaMalloc"); - allocator = std::make_shared(allocator, -1, .0); + allocator = std::make_shared(allocator, -1, .5); allocator = std::make_shared(allocator, "Tree"); - INFO("Default CUDA allocator initialized"); + MMDEPLOY_INFO("Default CUDA allocator initialized"); return Access::create(allocator); } @@ -265,7 +265,7 @@ void CudaPlatformImpl::PerDeviceData::init() { CudaPlatformImpl::CudaPlatformImpl() { int count{}; if (auto err = cudaGetDeviceCount(&count); err != cudaSuccess) { - ERROR("error getting device count: {}", cudaGetErrorString(err)); + MMDEPLOY_ERROR("error getting device count: {}", cudaGetErrorString(err)); throw_exception(eFail); } per_device_data_storage_.reserve(count); diff --git a/csrc/device/cuda/cuda_device.h b/csrc/device/cuda/cuda_device.h index 71623d42b6..d4588d2fd9 100644 --- a/csrc/device/cuda/cuda_device.h +++ b/csrc/device/cuda/cuda_device.h @@ -16,6 +16,16 @@ class CudaPlatformImpl : public PlatformImpl { public: CudaPlatformImpl(); + ~CudaPlatformImpl() override { + // The CUDA driver may have already shutdown before the platform dtor is called. + // As a workaround, simply leak per device resources and let the driver handle it + // FIXME: maybe a pair of global mmdeploy_init/deinit function would be a + // better solution + for (auto& data : per_device_data_storage_) { + data.release(); + } + } + const char* GetPlatformName() const noexcept override { return "cuda"; } shared_ptr CreateBuffer(Device device) override; diff --git a/csrc/device/cuda/default_allocator.h b/csrc/device/cuda/default_allocator.h index ca4d794e9d..a8b2177ccc 100644 --- a/csrc/device/cuda/default_allocator.h +++ b/csrc/device/cuda/default_allocator.h @@ -16,11 +16,11 @@ class DefaultAllocator { public: DefaultAllocator() = default; ~DefaultAllocator() { - ERROR("=== CUDA Default Allocator ==="); - ERROR(" Allocation: count={}, size={}MB, time={}ms", alloc_count_, - alloc_size_ / (1024 * 1024.f), alloc_time_ / 1000000.f); - ERROR("Deallocation: count={}, size={}MB, time={}ms", dealloc_count_, - dealloc_size_ / (1024 * 1024.f), dealloc_time_ / 1000000.f); + MMDEPLOY_ERROR("=== CUDA Default Allocator ==="); + MMDEPLOY_ERROR(" Allocation: count={}, size={}MB, time={}ms", alloc_count_, + alloc_size_ / (1024 * 1024.f), alloc_time_ / 1000000.f); + MMDEPLOY_ERROR("Deallocation: count={}, size={}MB, time={}ms", dealloc_count_, + dealloc_size_ / (1024 * 1024.f), dealloc_time_ / 1000000.f); } [[nodiscard]] void* Allocate(std::size_t n) { void* p{}; @@ -29,7 +29,7 @@ class DefaultAllocator { auto t1 = std::chrono::high_resolution_clock::now(); alloc_time_ += (int64_t)std::chrono::duration(t1 - t0).count(); if (ret != cudaSuccess) { - ERROR("error allocating cuda memory: {}", cudaGetErrorString(ret)); + MMDEPLOY_ERROR("error allocating cuda memory: {}", cudaGetErrorString(ret)); return nullptr; } alloc_count_ += 1; @@ -43,7 +43,7 @@ class DefaultAllocator { auto t1 = std::chrono::high_resolution_clock::now(); dealloc_time_ += (int64_t)std::chrono::duration(t1 - t0).count(); if (ret != cudaSuccess) { - ERROR("error deallocating cuda memory: {}", cudaGetErrorString(ret)); + MMDEPLOY_ERROR("error deallocating cuda memory: {}", cudaGetErrorString(ret)); return; } dealloc_count_ += 1; diff --git a/csrc/device/cuda/linear_allocator.h b/csrc/device/cuda/linear_allocator.h index 15be01bc36..59133e9332 100644 --- a/csrc/device/cuda/linear_allocator.h +++ b/csrc/device/cuda/linear_allocator.h @@ -25,11 +25,11 @@ class LinearAllocator { std::size_t space = base_ + size_ - ptr_; if (std::align(16, n, ptr, space)) { - ERROR("success n={}, total={}, count={}", n, total_, count_); + MMDEPLOY_ERROR("success n={}, total={}, count={}", n, total_, count_); ptr_ = static_cast(ptr) + n; return ptr; } - ERROR("fallback {}, total={}, count={}", n, total_, count_); + MMDEPLOY_ERROR("fallback {}, total={}, count={}", n, total_, count_); return gDefaultAllocator().Allocate(n); } void Deallocate(void* _p, std::size_t n) { @@ -43,7 +43,7 @@ class LinearAllocator { } total_ -= n; --count_; - ERROR("deallocate total={}, count={}", total_, count_); + MMDEPLOY_ERROR("deallocate total={}, count={}", total_, count_); if (total_ == 0) { assert(count_ == 0); ptr_ = base_; diff --git a/csrc/device/device_allocator.h b/csrc/device/device_allocator.h index 4539e12d4e..06bb5730df 100644 --- a/csrc/device/device_allocator.h +++ b/csrc/device/device_allocator.h @@ -162,12 +162,14 @@ class Stats : public AllocatorImpl { : allocator_(std::move(allocator)), name_(std::move(name)) {} ~Stats() override { - INFO("=== {} ===", name_); - INFO(" Allocation: count={}, size={}MB, time={}ms", data_.allocation_count, - data_.allocated_bytes / (1024 * 1024.f), static_cast(data_.allocation_time)); - INFO("Deallocation: count={}, size={}MB, time={}ms", data_.deallocation_count, - data_.deallocated_bytes / (1024 * 1024.f), static_cast(data_.deallocation_time)); - INFO("Peak memory usage: size={}MB", data_.peak / (1024 * 1024.f)); + MMDEPLOY_INFO("=== {} ===", name_); + MMDEPLOY_INFO(" Allocation: count={}, size={}MB, time={}ms", data_.allocation_count, + data_.allocated_bytes / (1024 * 1024.f), + static_cast(data_.allocation_time)); + MMDEPLOY_INFO("Deallocation: count={}, size={}MB, time={}ms", data_.deallocation_count, + data_.deallocated_bytes / (1024 * 1024.f), + static_cast(data_.deallocation_time)); + MMDEPLOY_INFO("Peak memory usage: size={}MB", data_.peak / (1024 * 1024.f)); } Block Allocate(size_t size) noexcept override { @@ -281,10 +283,10 @@ class Bucketizer : public AllocatorImpl { Bucketizer(const AllocatorCreator& creator, size_t min_size, size_t max_size, size_t step_size) : min_size_(min_size), max_size_(max_size), step_size_(step_size) { for (auto base = min_size_; base < max_size_; base += step_size_) { - // ERROR("{}, {}", base, base + step_size - 1); + // MMDEPLOY_ERROR("{}, {}", base, base + step_size - 1); allocator_.push_back(creator(base, base + step_size - 1)); } - // ERROR("{}", allocator_.size()); + // MMDEPLOY_ERROR("{}", allocator_.size()); } Block Allocate(size_t size) noexcept override { diff --git a/csrc/experimental/collection.h b/csrc/experimental/collection.h index e26be6da99..a65de2ec65 100644 --- a/csrc/experimental/collection.h +++ b/csrc/experimental/collection.h @@ -1,93 +1,93 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#ifndef MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ -#define MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ - -#include "token.h" - -namespace mmdeploy { - -class Collection { - public: - template - friend Collection& operator<<(Collection& c, const Token& value) { - c.put(value); - return c; - } - - template - friend const Collection& operator>>(const Collection& c, Token& value) { - c.get(value); - return c; - } - - template - Result maybe() const { - T token; - if (get(token)) { - return token; - } - return Status(eFail); - } - - private: - std::vector keys_; - std::vector> values_; - - template - void put(const Token& value) { - keys_.push_back(Token::key()); - values_.push_back(std::make_shared>(value)); - } - - template - bool get(Token& value) const { - for (int i = 0; i < keys_.size(); ++i) { - if (keys_[i] == Token::key()) { - value = *static_cast*>(values_[i].get()); - return true; - } - } - return false; - } -}; - -namespace detail { - -template -struct function_traits { - template - static std::tuple get_args(std::function); - - template - static R get_ret(std::function); - - using args_t = decltype(get_args(std::function{std::declval()})); - using ret_t = decltype(get_ret(std::function{std::declval()})); -}; - -// TODO: obtain first error -// TODO: combine all errors -template > -Result Apply(F&& f, const Result&... args) { - if ((... && args)) { - return std::invoke(std::forward(f), args.value()...); - } - return Status(eFail); -} - -template > -Result ApplyImpl(F&& f, const Collection& c, std::tuple*) { - return Apply(std::forward(f), c.maybe()...); -} - -} // namespace detail - -template ::args_t> -decltype(auto) Apply(F&& f, const Collection& c) { - return detail::ApplyImpl(std::forward(f), c, std::add_pointer_t{}); -} - -} // namespace mmdeploy - -#endif // MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ +//// Copyright (c) OpenMMLab. All rights reserved. +// +//#ifndef MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ +//#define MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ +// +//#include "token.h" +// +// namespace mmdeploy { +// +// class Collection { +// public: +// template +// friend Collection& operator<<(Collection& c, const Token& value) { +// c.put(value); +// return c; +// } +// +// template +// friend const Collection& operator>>(const Collection& c, Token& value) { +// c.get(value); +// return c; +// } +// +// template +// Result maybe() const { +// T token; +// if (get(token)) { +// return token; +// } +// return Status(eFail); +// } +// +// private: +// std::vector keys_; +// std::vector> values_; +// +// template +// void put(const Token& value) { +// keys_.push_back(Token::key()); +// values_.push_back(std::make_shared>(value)); +// } +// +// template +// bool get(Token& value) const { +// for (int i = 0; i < keys_.size(); ++i) { +// if (keys_[i] == Token::key()) { +// value = *static_cast*>(values_[i].get()); +// return true; +// } +// } +// return false; +// } +//}; +// +// namespace detail { +// +// template +// struct function_traits { +// template +// static std::tuple get_args(std::function); +// +// template +// static R get_ret(std::function); +// +// using args_t = decltype(get_args(std::function{std::declval()})); +// using ret_t = decltype(get_ret(std::function{std::declval()})); +//}; +// +//// TODO: obtain first error +//// TODO: combine all errors +// template > +// Result Apply(F&& f, const Result&... args) { +// if ((... && args)) { +// return std::invoke(std::forward(f), args.value()...); +// } +// return Status(eFail); +// } +// +// template > +// Result ApplyImpl(F&& f, const Collection& c, std::tuple*) { +// return Apply(std::forward(f), c.maybe()...); +// } +// +// } // namespace detail +// +// template ::args_t> +// decltype(auto) Apply(F&& f, const Collection& c) { +// return detail::ApplyImpl(std::forward(f), c, std::add_pointer_t{}); +// } +// +// } // namespace mmdeploy +// +//#endif // MMDEPLOY_SRC_EXPERIMENTAL_COLLECTION_H_ diff --git a/csrc/experimental/module_adapter.h b/csrc/experimental/module_adapter.h index 8d652cfb4f..581e7f2bea 100644 --- a/csrc/experimental/module_adapter.h +++ b/csrc/experimental/module_adapter.h @@ -31,7 +31,7 @@ struct InvokeImpl { std::forward(ts)...); return make_ret_val(std::move(ret)); } catch (const std::exception& e) { - ERROR("unhandled exception: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception: {}", e.what()); return Status(eFail); } catch (...) { return Status(eFail); diff --git a/csrc/experimental/token.h b/csrc/experimental/token.h index e1c951352e..6d6ae7f884 100644 --- a/csrc/experimental/token.h +++ b/csrc/experimental/token.h @@ -1,72 +1,72 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#ifndef MMDEPLOY_SRC_TOKEN_TOKEN_H_ -#define MMDEPLOY_SRC_TOKEN_TOKEN_H_ - -#include -#include -#include -#include -#include -#include - -#include "core/status_code.h" - -namespace mmdeploy { - -namespace token { - -template -using String = std::integer_sequence; - -// this is a GCC only extension -template -constexpr String operator""_ts() { - return {}; -} - -template -const char* c_str(String) { - static constexpr const char str[sizeof...(cs) + 1] = {cs..., '\0'}; - return str; -} - -} // namespace token - -// template -// static void* signature() { -// static char id = 0; -// return &id; +//// Copyright (c) OpenMMLab. All rights reserved. +// +//#ifndef MMDEPLOY_SRC_TOKEN_TOKEN_H_ +//#define MMDEPLOY_SRC_TOKEN_TOKEN_H_ +// +//#include +//#include +//#include +//#include +//#include +//#include +// +//#include "core/status_code.h" +// +// namespace mmdeploy { +// +// namespace token { +// +// template +// using String = std::integer_sequence; +// +//// this is a GCC only extension +// template +// constexpr String operator""_ts() { +// return {}; // } // -// using signature_t = decltype(signature()); - -template -struct Token { - using signature_t = void*; - using value_type = T; - - Token(T value = {}) : value_(value) {} // NOLINT - - operator T() const { return value_; } // NOLINT - static const char* key() { return token::c_str(Key{}); } - - T& operator*() { return value_; } - T* operator->() { return &value_; } - - private: - T value_; -}; - -template -class Identifier { - public: - constexpr explicit Identifier(const char* key) : key_(key) {} - const char* key_; -}; - -constexpr inline Identifier batch_size{"batch_size"}; - -} // namespace mmdeploy - -#endif // MMDEPLOY_SRC_TOKEN_TOKEN_H_ +// template +// const char* c_str(String) { +// static constexpr const char str[sizeof...(cs) + 1] = {cs..., '\0'}; +// return str; +// } +// +// } // namespace token +// +//// template +//// static void* signature() { +//// static char id = 0; +//// return &id; +//// } +//// +//// using signature_t = decltype(signature()); +// +// template +// struct Token { +// using signature_t = void*; +// using value_type = T; +// +// Token(T value = {}) : value_(value) {} // NOLINT +// +// operator T() const { return value_; } // NOLINT +// static const char* key() { return token::c_str(Key{}); } +// +// T& operator*() { return value_; } +// T* operator->() { return &value_; } +// +// private: +// T value_; +//}; +// +// template +// class Identifier { +// public: +// constexpr explicit Identifier(const char* key) : key_(key) {} +// const char* key_; +//}; +// +// constexpr inline Identifier batch_size{"batch_size"}; +// +//} // namespace mmdeploy +// +//#endif // MMDEPLOY_SRC_TOKEN_TOKEN_H_ diff --git a/csrc/graph/CMakeLists.txt b/csrc/graph/CMakeLists.txt index a0c09946e1..e39fbcf5c2 100644 --- a/csrc/graph/CMakeLists.txt +++ b/csrc/graph/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_graph) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS common.cpp inference.cpp @@ -10,7 +10,5 @@ set(SRCS task.cpp flatten.cpp unflatten.cpp) -build_target(${PROJECT_NAME} "${SRCS}") -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") add_library(mmdeploy::graph ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/graph/common.cpp b/csrc/graph/common.cpp index 7881b91f81..fba4b23619 100644 --- a/csrc/graph/common.cpp +++ b/csrc/graph/common.cpp @@ -10,7 +10,7 @@ mmdeploy::graph::BaseNode::BaseNode(const mmdeploy::Value& cfg) { from_value(cfg["output"], outputs_); name_ = cfg.value("name", ""); } catch (...) { - ERROR("error parsing config: {}", cfg); + MMDEPLOY_ERROR("error parsing config: {}", cfg); throw; } } diff --git a/csrc/graph/common.h b/csrc/graph/common.h index 95e8c2d08c..a94d8c5fff 100644 --- a/csrc/graph/common.h +++ b/csrc/graph/common.h @@ -12,15 +12,16 @@ namespace mmdeploy::graph { template ::ReturnType> inline Result CreateFromRegistry(const Value& config, const char* key = "type") { - INFO("config: {}", config); + MMDEPLOY_INFO("config: {}", config); auto type = config[key].get(); auto creator = Registry::Get().GetCreator(type); if (!creator) { + MMDEPLOY_ERROR("failed to find module creator: {}", type); return Status(eEntryNotFound); } auto inst = creator->Create(config); if (!inst) { - ERROR("failed to create module: {}", type); + MMDEPLOY_ERROR("failed to create module: {}", type); return Status(eFail); } return std::move(inst); diff --git a/csrc/graph/flatten.cpp b/csrc/graph/flatten.cpp index 3153e586b5..d180470eb6 100644 --- a/csrc/graph/flatten.cpp +++ b/csrc/graph/flatten.cpp @@ -20,7 +20,7 @@ void FlattenNode::Build(TaskGraph& graph) { if (idxs.empty()) { idxs = std::move(idx); } else if (idx != idxs) { - ERROR("args does not have same structure"); + MMDEPLOY_ERROR("args does not have same structure"); return Status(eInvalidArgument); } rets.push_back(std::move(ret)); diff --git a/csrc/graph/inference.cpp b/csrc/graph/inference.cpp index 34d9c8a374..15c11d581a 100644 --- a/csrc/graph/inference.cpp +++ b/csrc/graph/inference.cpp @@ -17,7 +17,7 @@ Inference::Inference(const Value& cfg) : BaseNode(cfg) { auto model_path = model_value.get(); model_ = Model(model_path); } else { - ERROR("unsupported model specification"); + MMDEPLOY_ERROR("unsupported model specification"); throw_exception(eInvalidArgument); } @@ -31,7 +31,7 @@ Inference::Inference(const Value& cfg) : BaseNode(cfg) { value["context"] = context; pipeline_ = std::make_unique(value); if (!pipeline_) { - ERROR("failed to create pipeline"); + MMDEPLOY_ERROR("failed to create pipeline"); throw_exception(eFail); } } diff --git a/csrc/graph/pipeline.cpp b/csrc/graph/pipeline.cpp index ef77a242be..4d668c249a 100644 --- a/csrc/graph/pipeline.cpp +++ b/csrc/graph/pipeline.cpp @@ -21,7 +21,7 @@ Pipeline::Pipeline(const Value& cfg) : BaseNode(cfg["pipeline"]) { node_input_idx_.push_back(UpdateBindings(nodes_.back()->inputs(), kRead)); node_output_idx_.push_back(UpdateBindings(nodes_.back()->outputs(), kWrite)); } else { - ERROR("could not create {}:{}", name, type); + MMDEPLOY_ERROR("could not create {}:{}", name, type); throw_exception(eFail); } } @@ -57,7 +57,7 @@ std::vector Pipeline::UpdateBindings(const vector& names, Bind auto it = binding_name_to_idx_.lower_bound(name); if (it == binding_name_to_idx_.end() || it->first != name) { if (type == kRead) { - ERROR("unknown binding name: {}", name); + MMDEPLOY_ERROR("unknown binding name: {}", name); throw_exception(eEntryNotFound); } else { auto index = static_cast(binding_name_to_idx_.size()); diff --git a/csrc/graph/task.cpp b/csrc/graph/task.cpp index 0791a84a04..ee1f0ebf84 100644 --- a/csrc/graph/task.cpp +++ b/csrc/graph/task.cpp @@ -36,7 +36,7 @@ static size_t GetBatchSize(const Value& args) { Task::Task(const Value& cfg) : BaseNode(cfg) { auto module = CreateFromRegistry(cfg, "module"); if (!module) { - ERROR("failed to create task: {}", cfg); + MMDEPLOY_ERROR("failed to create task: {}", cfg); throw_exception(eFail); } module_ = std::move(module).value(); @@ -50,7 +50,8 @@ void Task::Build(TaskGraph& graph) { auto args = ctx.pop().array(); auto rets = Value::Array{}; auto batch_size = GetBatchSize(args); - // ERROR("name: {}, is_batched: {}, INPUT batch_size: {}", name_, is_batched_, batch_size); + // MMDEPLOY_ERROR("name: {}, is_batched: {}, INPUT batch_size: {}", name_, is_batched_, + // batch_size); if (!is_batched_ && batch_size) { rets.resize(outputs_.size(), Value::kArray); if (!is_thread_safe_) { @@ -86,7 +87,7 @@ void Task::Build(TaskGraph& graph) { rets = std::move(tmp).array(); } ctx.push(std::move(rets)); - // ERROR("name: {}, is_batched: {}, OUTPUT batch_size: {}", name_, is_batched_, + // MMDEPLOY_ERROR("name: {}, is_batched: {}, OUTPUT batch_size: {}", name_, is_batched_, // GetBatchSize(rets)); return success(); }); diff --git a/csrc/model/CMakeLists.txt b/csrc/model/CMakeLists.txt index fbeaaf6050..ebfbf2167e 100644 --- a/csrc/model/CMakeLists.txt +++ b/csrc/model/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(model) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(MODEL_NAMES "directory_model") if (${MMDEPLOY_ZIP_MODEL}) @@ -11,18 +11,17 @@ endif () foreach (MODEL_NAME ${MODEL_NAMES}) set(TARGET_MODEL_NAME mmdeploy_${MODEL_NAME}) - build_target(${TARGET_MODEL_NAME} ${MODEL_NAME}_impl.cpp) - target_link_libraries(${TARGET_MODEL_NAME} - PRIVATE mmdeploy::core - PUBLIC stdc++fs) + mmdeploy_add_module(${TARGET_MODEL_NAME} ${MODEL_NAME}_impl.cpp) + if (NOT MSVC) + target_link_libraries(${TARGET_MODEL_NAME} PUBLIC stdc++fs) + endif () if (${MODEL_NAME} STREQUAL "zip_model") find_package(libzip QUIET) if (libzip_FOUND) target_link_libraries(${TARGET_MODEL_NAME} PUBLIC libzip::zip) - else() + else () target_link_libraries(${TARGET_MODEL_NAME} PUBLIC zip) - endif() + endif () endif () add_library(mmdeploy::${MODEL_NAME} ALIAS ${TARGET_MODEL_NAME}) - export_module(${TARGET_MODEL_NAME}) endforeach () diff --git a/csrc/model/directory_model_impl.cpp b/csrc/model/directory_model_impl.cpp index 2de9d5ec23..202eafb12f 100644 --- a/csrc/model/directory_model_impl.cpp +++ b/csrc/model/directory_model_impl.cpp @@ -5,17 +5,10 @@ #include "archive/json_archive.h" #include "core/model.h" #include "core/model_impl.h" +#include "core/utils/filesystem.h" using nlohmann::json; -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif - namespace mmdeploy { class DirectoryModelImpl : public ModelImpl { @@ -52,7 +45,7 @@ class DirectoryModelImpl : public ModelImpl { from_json(json::parse(deploy_json), meta); return meta; } catch (std::exception& e) { - ERROR("exception happened: {}", e.what()); + MMDEPLOY_ERROR("exception happened: {}", e.what()); return Status(eFail); } } diff --git a/csrc/model/zip_model_impl.cpp b/csrc/model/zip_model_impl.cpp index 0f1479f64c..54545860f8 100644 --- a/csrc/model/zip_model_impl.cpp +++ b/csrc/model/zip_model_impl.cpp @@ -7,14 +7,8 @@ #include "core/logger.h" #include "core/model.h" #include "core/model_impl.h" +#include "core/utils/filesystem.h" #include "zip.h" -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif using nlohmann::json; @@ -40,10 +34,10 @@ class ZipModelImpl : public ModelImpl { int ret = 0; zip_ = zip_open(model_path.c_str(), 0, &ret); if (ret != 0) { - INFO("open zip file {} failed, ret {}", model_path.c_str(), ret); + MMDEPLOY_INFO("open zip file {} failed, ret {}", model_path.c_str(), ret); return Status(eInvalidArgument); } - INFO("open sdk model file {} successfully", model_path.c_str()); + MMDEPLOY_INFO("open sdk model file {} successfully", model_path.c_str()); return InitZip(); } @@ -70,24 +64,25 @@ class ZipModelImpl : public ModelImpl { auto iter = file_index_.find(file_path); if (iter == file_index_.end()) { - ERROR("cannot find file {} under dir {}", file_path.c_str(), root_dir_.c_str()); + MMDEPLOY_ERROR("cannot find file {} under dir {}", file_path.c_str(), root_dir_.c_str()); return Status(eFail); } index = iter->second; struct zip_file* pzip = zip_fopen_index(zip_, index, 0); if (nullptr == pzip) { - ERROR("read file {} in zip file failed, whose index is {}", file_path.c_str(), index); + MMDEPLOY_ERROR("read file {} in zip file failed, whose index is {}", file_path.c_str(), + index); return Status(eFail); } struct zip_stat stat {}; if ((ret = zip_stat_index(zip_, index, 0, &stat)) < 0) { - ERROR("get stat of file {} error, ret {}", file_path.c_str(), ret); + MMDEPLOY_ERROR("get stat of file {} error, ret {}", file_path.c_str(), ret); return Status(eFail); } - DEBUG("file size {}", (int)stat.size); + MMDEPLOY_DEBUG("file size {}", (int)stat.size); std::vector buf(stat.size); if ((ret = zip_fread(pzip, buf.data(), stat.size)) < 0) { - ERROR("read data of file {} error, ret {}", file_path.c_str(), ret); + MMDEPLOY_ERROR("read data of file {} error, ret {}", file_path.c_str(), ret); return Status(eFail); } return std::string(buf.begin(), buf.end()); @@ -100,7 +95,7 @@ class ZipModelImpl : public ModelImpl { from_json(json::parse(deploy_json), meta); return meta; } catch (std::exception& e) { - ERROR("exception happened: {}", e.what()); + MMDEPLOY_ERROR("exception happened: {}", e.what()); return Status(eFail); } } @@ -108,7 +103,7 @@ class ZipModelImpl : public ModelImpl { private: Result InitZip() { int files = zip_get_num_files(zip_); - INFO("there are {} files in sdk model file", files); + MMDEPLOY_INFO("there are {} files in sdk model file", files); if (files == 0) { return Status(eFail); } @@ -119,9 +114,9 @@ class ZipModelImpl : public ModelImpl { fs::path path(stat.name); auto file_name = path.filename().string(); if (file_name == ".") { - DEBUG("{}-th file name is: {}, which is a directory", i, stat.name); + MMDEPLOY_DEBUG("{}-th file name is: {}, which is a directory", i, stat.name); } else { - DEBUG("{}-th file name is: {}, which is a file", i, stat.name); + MMDEPLOY_DEBUG("{}-th file name is: {}, which is a file", i, stat.name); file_index_[file_name] = i; } } diff --git a/csrc/net/CMakeLists.txt b/csrc/net/CMakeLists.txt index f339801c04..f411abe5e4 100644 --- a/csrc/net/CMakeLists.txt +++ b/csrc/net/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_net_module) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) if ("trt" IN_LIST MMDEPLOY_TARGET_BACKENDS) add_subdirectory(trt) @@ -24,7 +24,5 @@ if ("openvino" IN_LIST MMDEPLOY_TARGET_BACKENDS) add_subdirectory(openvino) endif () -build_target(${PROJECT_NAME} net_module.cpp) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +mmdeploy_add_module(${PROJECT_NAME} net_module.cpp) add_library(mmdeploy::net_module ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/net/ncnn/CMakeLists.txt b/csrc/net/ncnn/CMakeLists.txt index 5e83abbf53..3c0e1ff6e7 100644 --- a/csrc/net/ncnn/CMakeLists.txt +++ b/csrc/net/ncnn/CMakeLists.txt @@ -2,19 +2,17 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ncnn_net) -if("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) - include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) + include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) - find_package(ncnn REQUIRED) - add_library(${PROJECT_NAME} SHARED ncnn_net.cpp) - target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core ncnn) - target_link_libraries( - ${PROJECT_NAME} PRIVATE -Wl,--whole-archive mmdeploy::ncnn_ops::static - -Wl,--no-whole-archive) - add_library(mmdeploy::ncnn_net ALIAS ${PROJECT_NAME}) - export_module(${PROJECT_NAME}) -else() - message( - ERROR - "'ncnn_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") -endif() + find_package(ncnn REQUIRED) + + mmdeploy_add_module(${PROJECT_NAME} ncnn_net.cpp) + target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_ncnn_ops_obj) + target_link_libraries(${PROJECT_NAME} PRIVATE ncnn) + add_library(mmdeploy::ncnn_net ALIAS ${PROJECT_NAME}) +else () + message( + ERROR + "'ncnn_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") +endif () diff --git a/csrc/net/ncnn/ncnn_net.cpp b/csrc/net/ncnn/ncnn_net.cpp index 29b37242e3..041580d343 100644 --- a/csrc/net/ncnn/ncnn_net.cpp +++ b/csrc/net/ncnn/ncnn_net.cpp @@ -5,6 +5,7 @@ #include "core/logger.h" #include "core/model.h" #include "core/utils/formatter.h" +#include "ncnn_ops_register.h" namespace mmdeploy { @@ -33,6 +34,8 @@ Result NCNNNet::Init(const Value& args) { OUTCOME_TRY(params_, model.ReadFile(config.net)); OUTCOME_TRY(weights_, model.ReadFile(config.weights)); + register_mmdeploy_custom_layers(net_); + OUTCOME_TRY(ncnn_status(net_.load_param_mem(params_.c_str()))); net_.load_model(reinterpret_cast(weights_.data())); @@ -107,7 +110,7 @@ class NCNNNetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating NCNNNet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating NCNNNet: {}", r.error().message().c_str()); return nullptr; } } diff --git a/csrc/net/net_module.cpp b/csrc/net/net_module.cpp index 7ebd78df94..1216d4d7ee 100644 --- a/csrc/net/net_module.cpp +++ b/csrc/net/net_module.cpp @@ -24,7 +24,7 @@ struct NetModule::Impl { using Output = std::map; explicit Impl(const Value& args) { - DEBUG("Net Module cfg: {}", args); + MMDEPLOY_DEBUG("Net Module cfg: {}", args); auto init = [&]() -> Result { auto name = args["name"].get(); auto& context = args["context"]; @@ -34,7 +34,7 @@ struct NetModule::Impl { stream_ = context.value("stream", Stream::GetDefault(device_)); auto creator = Registry::Get().GetCreator(config.backend); if (!creator) { - ERROR("Net backend not found: {}", config.backend); + MMDEPLOY_ERROR("Net backend not found: {}", config.backend); return Status(eEntryNotFound); } auto net_cfg = args; @@ -82,13 +82,13 @@ struct NetModule::Impl { return shape; } if (shape[0] != 1) { - ERROR("unsupported shape for batch assemble: {}", shape); + MMDEPLOY_ERROR("unsupported shape for batch assemble: {}", shape); return Status(eNotSupported); } for (int i = 1; i < input.size(); ++i) { auto& sample = input[i]; if (sample.shape() != shape) { - ERROR("shapes are not consistent across the batch"); + MMDEPLOY_ERROR("shapes are not consistent across the batch"); return Status(eNotSupported); } } @@ -122,7 +122,7 @@ struct NetModule::Impl { if (auto it = sample.find(name); it != sample.end()) { tmp.push_back(it->second); } else { - ERROR("sample {} missing key {}", i, name); + MMDEPLOY_ERROR("sample {} missing key {}", i, name); return Status(eInvalidArgument); } } @@ -140,7 +140,7 @@ struct NetModule::Impl { auto& src = input_samples[i]; auto& dst = inputs_[i]; if (dst.shape() != input_shapes[i]) { - ERROR("inconsistent input shape, expect {}, got {}", input_shapes[i], dst.shape()); + MMDEPLOY_ERROR("inconsistent input shape, expect {}, got {}", input_shapes[i], dst.shape()); return Status(eFail); } if (src.size() > 1) { @@ -165,7 +165,7 @@ struct NetModule::Impl { if (tmp.size()) { OUTCOME_TRY(t.CopyTo(tmp, stream_)); } else { - WARN("copy skipped due to zero sized tensor"); + MMDEPLOY_WARN("copy skipped due to zero sized tensor"); } if (output.size() > 1) { for (int i = 0; i < output.size(); ++i) { diff --git a/csrc/net/openvino/CMakeLists.txt b/csrc/net/openvino/CMakeLists.txt index 14542aa94c..6963739e21 100644 --- a/csrc/net/openvino/CMakeLists.txt +++ b/csrc/net/openvino/CMakeLists.txt @@ -3,15 +3,13 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_openvino_net) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) - include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) + include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) find_package(InferenceEngine REQUIRED) - add_library(${PROJECT_NAME} SHARED openvino_net.cpp) + mmdeploy_add_module(${PROJECT_NAME} openvino_net.cpp) target_link_libraries(${PROJECT_NAME} PRIVATE - mmdeploy::core ${InferenceEngine_LIBRARIES}) add_library(mmdeploy::openvino_net ALIAS ${PROJECT_NAME}) - export_module(${PROJECT_NAME}) else () message(ERROR "'openvino_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") endif () diff --git a/csrc/net/openvino/openvino_net.cpp b/csrc/net/openvino/openvino_net.cpp index af20899a6b..1176967c9a 100644 --- a/csrc/net/openvino/openvino_net.cpp +++ b/csrc/net/openvino/openvino_net.cpp @@ -3,17 +3,11 @@ #include -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else -#include -namespace fs = std::experimental::filesystem; -#endif #include #include "core/logger.h" #include "core/model.h" +#include "core/utils/filesystem.h" #include "core/utils/formatter.h" namespace mmdeploy { @@ -40,7 +34,7 @@ static Result ConvertElementType(InferenceEngine::Precision prec) { case InferenceEngine::Precision::ePrecision::I64: return DataType::kINT64; default: - ERROR("unsupported InferenceEngine Precision: {}", static_cast(type)); + MMDEPLOY_ERROR("unsupported InferenceEngine Precision: {}", static_cast(type)); return Status(eNotSupported); } } @@ -58,7 +52,7 @@ static Result ConvertPrecision(DataType case DataType::kINT64: return InferenceEngine::Precision::ePrecision::I64; default: - ERROR("unsupported DataType: {}", static_cast(type)); + MMDEPLOY_ERROR("unsupported DataType: {}", static_cast(type)); return Status(eNotSupported); } } @@ -99,7 +93,7 @@ Result OpenVINONet::Init(const Value& args) { bin_out << raw_bin; bin_out.close(); } catch (const std::exception& e) { - ERROR("unhandled exception when creating tmp xml/bin: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating tmp xml/bin: {}", e.what()); return Status(eFail); } @@ -116,8 +110,7 @@ Result OpenVINONet::Init(const Value& args) { OUTCOME_TRY(auto data_type, ConvertElementType(input_data->getPrecision())); const auto& size_vector = input_data->getTensorDesc().getDims(); TensorShape shape{size_vector.begin(), size_vector.end()}; - input_tensors_.emplace_back(TensorDesc{ - .device = device_, .data_type = data_type, .shape = shape, .name = input_name}); + input_tensors_.emplace_back(TensorDesc{device_, data_type, shape, input_name}); } // set output tensor @@ -128,8 +121,7 @@ Result OpenVINONet::Init(const Value& args) { OUTCOME_TRY(auto data_type, ConvertElementType(output_data->getPrecision())); const auto& size_vector = output_data->getDims(); TensorShape shape{size_vector.begin(), size_vector.end()}; - output_tensors_.emplace_back(TensorDesc{ - .device = device_, .data_type = data_type, .shape = shape, .name = output_name}); + output_tensors_.emplace_back(TensorDesc{device_, data_type, shape, output_name}); } // create request @@ -141,7 +133,7 @@ Result OpenVINONet::Init(const Value& args) { request_ = executable_network.CreateInferRequest(); } catch (const std::exception& e) { - ERROR("unhandled exception when creating OpenVINO: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating OpenVINO: {}", e.what()); return Status(eFail); } return success(); @@ -190,7 +182,7 @@ static Result SetBlob(InferenceEngine::InferRequest& request, Tensor& tens InferenceEngine::make_shared_blob(ie_desc, tensor.data())); break; default: - ERROR("unsupported DataType: {}", static_cast(desc.data_type)); + MMDEPLOY_ERROR("unsupported DataType: {}", static_cast(desc.data_type)); return Status(eNotSupported); } return success(); @@ -211,9 +203,7 @@ static Result GetBlob(InferenceEngine::InferRequest& request, Tensor& tens auto moutputHolder = moutput->rmap(); std::shared_ptr data(const_cast(moutputHolder.as()), [](void*) {}); - Tensor blob_tensor = { - TensorDesc{.device = device, .data_type = data_type, .shape = shape, .name = output_name}, - data}; + Tensor blob_tensor = {TensorDesc{device, data_type, shape, output_name}, data}; if (!std::equal(blob_tensor.shape().begin(), blob_tensor.shape().end(), tensor.shape().begin())) tensor.Reshape(shape); OUTCOME_TRY(tensor.CopyFrom(blob_tensor, stream)); @@ -272,11 +262,11 @@ class OpenVINONetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating OpenVINONet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating OpenVINONet: {}", r.error().message().c_str()); return nullptr; } } catch (const std::exception& e) { - ERROR("unhandled exception when creating OpenVINONet: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating OpenVINONet: {}", e.what()); return nullptr; } } diff --git a/csrc/net/ort/CMakeLists.txt b/csrc/net/ort/CMakeLists.txt index 4b7af7aa52..b4b78eff47 100644 --- a/csrc/net/ort/CMakeLists.txt +++ b/csrc/net/ort/CMakeLists.txt @@ -3,18 +3,12 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_ort_net) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) - include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) - add_library(${PROJECT_NAME} SHARED ort_net.cpp) - target_include_directories(${PROJECT_NAME} PUBLIC ${ONNXRUNTIME_DIR}/include) + include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) + mmdeploy_add_module(${PROJECT_NAME} ort_net.cpp) + target_include_directories(${PROJECT_NAME} PRIVATE ${ONNXRUNTIME_DIR}/include) target_link_directories(${PROJECT_NAME} PUBLIC ${ONNXRUNTIME_DIR}/lib) - target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core - PUBLIC onnxruntime) - target_link_libraries(${PROJECT_NAME} PRIVATE - -Wl,--whole-archive - mmdeploy::onnxruntime::ops::static - -Wl,--no-whole-archive) + target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_onnxruntime_ops_obj) add_library(mmdeploy::ort_net ALIAS ${PROJECT_NAME}) - export_module(${PROJECT_NAME}) else () message(ERROR "'ort_net' is NOT supported in target devices: ${MMDEPLOY_TARGET_DEVICES}") endif () diff --git a/csrc/net/ort/ort_net.cpp b/csrc/net/ort/ort_net.cpp index 5a2fb2e637..10ab9f6e1f 100644 --- a/csrc/net/ort/ort_net.cpp +++ b/csrc/net/ort/ort_net.cpp @@ -1,9 +1,13 @@ // Copyright (c) OpenMMLab. All rights reserved. + #include "ort_net.h" +#include + #include "core/logger.h" #include "core/model.h" #include "core/utils/formatter.h" +#include "onnxruntime_register.h" namespace mmdeploy { @@ -25,7 +29,7 @@ static Result ConvertElementType(ONNXTensorElementDataType type) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64: return DataType::kINT64; default: - ERROR("unsupported ONNXTensorElementDataType: {}", static_cast(type)); + MMDEPLOY_ERROR("unsupported ONNXTensorElementDataType: {}", static_cast(type)); return Status(eNotSupported); } } @@ -45,6 +49,9 @@ Result OrtNet::Init(const Value& args) { Ort::SessionOptions options; options.SetLogSeverityLevel(3); + + RegisterCustomOps(options, OrtGetApiBase()); + if (device_.is_device()) { OrtCUDAProviderOptions cuda_options{}; cuda_options.device_id = device_.device_id(); @@ -69,12 +76,11 @@ Result OrtNet::Init(const Value& args) { auto input_name = session_.GetInputName(i, allocator); auto type_info = session_.GetInputTypeInfo(i); auto shape = to_shape(type_info); - INFO("input {}, shape = {}", i, shape); + MMDEPLOY_INFO("input {}, shape = {}", i, shape); filter_shape(shape); OUTCOME_TRY(auto data_type, ConvertElementType(type_info.GetTensorTypeAndShapeInfo().GetElementType())); - input_tensors_.emplace_back( - TensorDesc{.device = device_, .data_type = data_type, .shape = shape, .name = input_name}); + input_tensors_.emplace_back(TensorDesc{device_, data_type, shape, input_name}); allocator.Free(input_name); } @@ -84,12 +90,11 @@ Result OrtNet::Init(const Value& args) { auto output_name = session_.GetOutputName(i, allocator); auto type_info = session_.GetOutputTypeInfo(i); auto shape = to_shape(type_info); - INFO("output {}, shape = {}", i, shape); + MMDEPLOY_INFO("output {}, shape = {}", i, shape); filter_shape(shape); OUTCOME_TRY(auto data_type, ConvertElementType(type_info.GetTensorTypeAndShapeInfo().GetElementType())); - output_tensors_.emplace_back( - TensorDesc{.device = device_, .data_type = data_type, .shape = shape, .name = output_name}); + output_tensors_.emplace_back(TensorDesc{device_, data_type, shape, output_name}); allocator.Free(output_name); } @@ -166,7 +171,7 @@ Result OrtNet::Forward() { OUTCOME_TRY(stream_.Wait()); } catch (const std::exception& e) { - ERROR(e.what()); + MMDEPLOY_ERROR(e.what()); return Status(eFail); } return success(); @@ -182,11 +187,11 @@ class OrtNetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating OrtNet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating OrtNet: {}", r.error().message().c_str()); return nullptr; } } catch (const std::exception& e) { - ERROR("unhandled exception when creating ORTNet: {}", e.what()); + MMDEPLOY_ERROR("unhandled exception when creating ORTNet: {}", e.what()); return nullptr; } } diff --git a/csrc/net/ppl/CMakeLists.txt b/csrc/net/ppl/CMakeLists.txt index cb6c0fb31c..dd859f8e29 100644 --- a/csrc/net/ppl/CMakeLists.txt +++ b/csrc/net/ppl/CMakeLists.txt @@ -2,10 +2,10 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_pplnn_net) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) find_package(pplnn REQUIRED) -add_library(${PROJECT_NAME} SHARED ppl_net.cpp) +mmdeploy_add_module(${PROJECT_NAME} ppl_net.cpp) target_include_directories(${PROJECT_NAME} PUBLIC $) if ("cpu" IN_LIST MMDEPLOY_TARGET_DEVICES) @@ -17,7 +17,5 @@ if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) target_link_directories(${PROJECT_NAME} PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/lib64) endif () target_link_libraries(${PROJECT_NAME} - PRIVATE mmdeploy::core ${PPLNN_LIBRARIES} - PUBLIC nvrtc) + PRIVATE ${PPLNN_LIBRARIES} nvrtc) add_library(mmdeploy::pplnn_net ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/net/ppl/ppl_net.cpp b/csrc/net/ppl/ppl_net.cpp index 72e8f99ac6..f0d4b16030 100644 --- a/csrc/net/ppl/ppl_net.cpp +++ b/csrc/net/ppl/ppl_net.cpp @@ -22,7 +22,7 @@ Result ppl_try(int code) { if (code == 0) { return success(); } - ERROR("ppl error: {}", ppl::common::GetRetCodeStr(code)); + MMDEPLOY_ERROR("ppl error: {}", ppl::common::GetRetCodeStr(code)); return Status(eFail); } @@ -86,9 +86,9 @@ Result PPLNet::Init(const Value& args) { /// debug only auto& desc = inputs_internal_[i]->GetShape(); std::vector shape_(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); - DEBUG("input {}: datatype = {}, dataformat = {}, shape = {}", i, - ppl::common::GetDataTypeStr(desc.GetDataType()), - ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); + MMDEPLOY_DEBUG("input {}: datatype = {}, dataformat = {}, shape = {}", i, + ppl::common::GetDataTypeStr(desc.GetDataType()), + ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); } for (int i = 0; i < runtime->GetOutputCount(); ++i) { @@ -98,9 +98,9 @@ Result PPLNet::Init(const Value& args) { auto desc = outputs_internal_[i]->GetShape(); std::vector shape_(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); - DEBUG("output {}: datatype = {}, dataformat = {}, shape = {}", i, - ppl::common::GetDataTypeStr(desc.GetDataType()), - ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); + MMDEPLOY_DEBUG("output {}: datatype = {}, dataformat = {}, shape = {}", i, + ppl::common::GetDataTypeStr(desc.GetDataType()), + ppl::common::GetDataFormatStr(desc.GetDataFormat()), shape_); TensorShape shape(desc.GetDims(), desc.GetDims() + desc.GetDimCount()); } @@ -176,8 +176,8 @@ Result PPLNet::Forward() { auto& internal = *outputs_internal_[i]; auto format = internal.GetShape().GetDataFormat(); if (format != ppl::common::DATAFORMAT_NDARRAY) { - ERROR("output {}'s format is {}, only NDARRAY is currently supported", i, - ppl::common::GetDataFormatStr(format)); + MMDEPLOY_ERROR("output {}'s format is {}, only NDARRAY is currently supported", i, + ppl::common::GetDataFormatStr(format)); return Status(eNotSupported); } auto& external = outputs_external_[i]; @@ -200,7 +200,8 @@ Result PPLNet::Forward() { if (external.size() > 0) { OUTCOME_TRY(Tensor(external.desc(), data).CopyTo(external, stream_)); } else { - WARN("copy skipped due to zero sized tensor: {} {}", external.name(), external.shape()); + MMDEPLOY_WARN("copy skipped due to zero sized tensor: {} {}", external.name(), + external.shape()); } } } @@ -235,7 +236,7 @@ Result PPLNet::Reshape(Span input_shapes) { if (can_infer_output_shapes_) { OUTCOME_TRY(auto output_shapes, InferOutputShapes(input_shapes, prev_in_shapes, prev_out_shapes)); - // ERROR("inferred output shapes: {}", output_shapes); + // MMDEPLOY_ERROR("inferred output shapes: {}", output_shapes); for (int i = 0; i < outputs_external_.size(); ++i) { auto& output = outputs_external_[i]; output.Reshape(output_shapes[i]); @@ -304,7 +305,7 @@ class PPLNetCreator : public Creator { if (auto r = p->Init(args)) { return p; } else { - ERROR("error creating PPLNet: {}", r.error().message().c_str()); + MMDEPLOY_ERROR("error creating PPLNet: {}", r.error().message().c_str()); return nullptr; } } diff --git a/csrc/net/trt/CMakeLists.txt b/csrc/net/trt/CMakeLists.txt index 1368e93352..94f08070b0 100644 --- a/csrc/net/trt/CMakeLists.txt +++ b/csrc/net/trt/CMakeLists.txt @@ -2,24 +2,16 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_trt_net) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) include(${CMAKE_SOURCE_DIR}/cmake/tensorrt.cmake) -add_library(${PROJECT_NAME} SHARED trt_net.cpp) +mmdeploy_add_module(${PROJECT_NAME} trt_net.cpp) target_include_directories(${PROJECT_NAME} PRIVATE ${TENSORRT_INCLUDE_DIR}) target_include_directories(${PROJECT_NAME} PRIVATE ${CUDNN_DIR}/include) target_include_directories(${PROJECT_NAME} PRIVATE ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_link_directories(${PROJECT_NAME} PUBLIC ${CUDNN_DIR}/lib64) +target_link_directories(${PROJECT_NAME} PUBLIC ${CUDNN_DIR}/lib64 ${CUDNN_DIR}/lib/x64) +target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy_tensorrt_ops_obj) target_link_libraries(${PROJECT_NAME} PUBLIC ${TENSORRT_LIBRARY} cudnn) -target_link_libraries(${PROJECT_NAME} - PRIVATE mmdeploy::core - ) -target_link_libraries(${PROJECT_NAME} - PRIVATE -Wl,--whole-archive - mmdeploy::tensorrt_ops::static - -Wl,--no-whole-archive - ) add_library(mmdeploy::trt_net ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/net/trt/trt_net.cpp b/csrc/net/trt/trt_net.cpp index 6f4cb940a1..9300aad10e 100644 --- a/csrc/net/trt/trt_net.cpp +++ b/csrc/net/trt/trt_net.cpp @@ -18,14 +18,14 @@ class TRTLogger : public nvinfer1::ILogger { void log(Severity severity, const char* msg) noexcept override { switch (severity) { case Severity::kINFO: - // INFO("TRTNet: {}", msg); + // MMDEPLOY_INFO("TRTNet: {}", msg); break; case Severity::kWARNING: - WARN("TRTNet: {}", msg); + MMDEPLOY_WARN("TRTNet: {}", msg); break; case Severity::kERROR: case Severity::kINTERNAL_ERROR: - ERROR("TRTNet: {}", msg); + MMDEPLOY_ERROR("TRTNet: {}", msg); break; default: break; @@ -72,7 +72,7 @@ static inline Result trt_try(bool code, const char* msg = nullptr, Status return success(); } if (msg) { - ERROR("{}", msg); + MMDEPLOY_ERROR("{}", msg); } return e; } @@ -102,7 +102,7 @@ Result TRTNet::Init(const Value& args) { auto& context = args["context"]; device_ = context["device"].get(); if (device_.is_host()) { - ERROR("TRTNet: device must be a GPU!"); + MMDEPLOY_ERROR("TRTNet: device must be a GPU!"); return Status(eNotSupported); } stream_ = context["stream"].get(); @@ -129,19 +129,18 @@ Result TRTNet::Init(const Value& args) { auto binding_name = engine_->getBindingName(i); auto dims = engine_->getBindingDimensions(i); if (engine_->isShapeBinding(i)) { - ERROR("shape binding is not supported."); + MMDEPLOY_ERROR("shape binding is not supported."); return Status(eNotSupported); } OUTCOME_TRY(auto dtype, MapDataType(engine_->getBindingDataType(i))); - TensorDesc desc{ - .device = device_, .data_type = dtype, .shape = to_shape(dims), .name = binding_name}; + TensorDesc desc{device_, dtype, to_shape(dims), binding_name}; if (engine_->bindingIsInput(i)) { - DEBUG("input binding {} {} {}", i, binding_name, to_string(dims)); + MMDEPLOY_DEBUG("input binding {} {} {}", i, binding_name, to_string(dims)); input_ids_.push_back(i); input_names_.emplace_back(binding_name); input_tensors_.emplace_back(desc, Buffer()); } else { - DEBUG("output binding {} {} {}", i, binding_name, to_string(dims)); + MMDEPLOY_DEBUG("output binding {} {} {}", i, binding_name, to_string(dims)); output_ids_.push_back(i); output_names_.emplace_back(binding_name); output_tensors_.emplace_back(desc, Buffer()); @@ -169,17 +168,17 @@ Result TRTNet::Reshape(Span input_shapes) { } for (int i = 0; i < input_tensors_.size(); ++i) { auto dims = to_dims(input_shapes[i]); - // ERROR("input shape: {}", to_string(dims)); + // MMDEPLOY_ERROR("input shape: {}", to_string(dims)); TRT_TRY(context_->setBindingDimensions(input_ids_[i], dims)); input_tensors_[i].Reshape(input_shapes[i]); } if (!context_->allInputDimensionsSpecified()) { - ERROR("not all input dimensions specified"); + MMDEPLOY_ERROR("not all input dimensions specified"); return Status(eFail); } for (int i = 0; i < output_tensors_.size(); ++i) { auto dims = context_->getBindingDimensions(output_ids_[i]); - // ERROR("output shape: {}", to_string(dims)); + // MMDEPLOY_ERROR("output shape: {}", to_string(dims)); output_tensors_[i].Reshape(to_shape(dims)); } return success(); diff --git a/csrc/preprocess/CMakeLists.txt b/csrc/preprocess/CMakeLists.txt index ab1084198e..503ead8f8d 100644 --- a/csrc/preprocess/CMakeLists.txt +++ b/csrc/preprocess/CMakeLists.txt @@ -8,8 +8,7 @@ if ("cuda" IN_LIST MMDEPLOY_TARGET_DEVICES) add_subdirectory(cuda) endif () -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) -build_target(${PROJECT_NAME} transform_module.cpp) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) +mmdeploy_add_module(${PROJECT_NAME} transform_module.cpp) +target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::transform) add_library(mmdeploy::transform_module ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cpu/CMakeLists.txt b/csrc/preprocess/cpu/CMakeLists.txt index 00c87cd7ee..d2a75b10e8 100644 --- a/csrc/preprocess/cpu/CMakeLists.txt +++ b/csrc/preprocess/cpu/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_cpu_transform_impl) include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS collect_impl.cpp @@ -11,13 +11,10 @@ set(SRCS image2tensor_impl.cpp load_impl.cpp normalize_impl.cpp - opencv_utils.cpp - opencv_utils.h pad_impl.cpp resize_impl.cpp) -build_target(${PROJECT_NAME} "${SRCS}") +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") target_link_libraries(${PROJECT_NAME} - PUBLIC opencv_imgproc opencv_core - PRIVATE mmdeploy::core) + PRIVATE mmdeploy::transform + mmdeploy_opencv_utils) add_library(mmdeploy::transform_impl::cpu ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cpu/pad_impl.cpp b/csrc/preprocess/cpu/pad_impl.cpp index 3a139d70ad..c75ba4139b 100644 --- a/csrc/preprocess/cpu/pad_impl.cpp +++ b/csrc/preprocess/cpu/pad_impl.cpp @@ -17,7 +17,7 @@ class PadImpl : public ::mmdeploy::PadImpl { {"reflect", cv::BORDER_REFLECT_101}, {"symmetric", cv::BORDER_REFLECT}}; if (border_map.find(arg_.padding_mode) == border_map.end()) { - ERROR("unsupported padding_mode '{}'", arg_.padding_mode); + MMDEPLOY_ERROR("unsupported padding_mode '{}'", arg_.padding_mode); throw std::invalid_argument("unsupported padding_mode"); } border_type_ = border_map[arg_.padding_mode]; diff --git a/csrc/preprocess/cuda/CMakeLists.txt b/csrc/preprocess/cuda/CMakeLists.txt index ac4def77d3..76caeb214b 100644 --- a/csrc/preprocess/cuda/CMakeLists.txt +++ b/csrc/preprocess/cuda/CMakeLists.txt @@ -2,29 +2,29 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_cuda_transform_impl CUDA CXX) -if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.18.0") - # suppress 'CMAKE_CUDA_ARCHITECTURES' warning - cmake_policy(SET CMP0104 OLD) -endif() +if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.18.0") + # suppress 'CMAKE_CUDA_ARCHITECTURES' warning + cmake_policy(SET CMP0104 OLD) +endif () find_package(pplcv REQUIRED) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS - crop_impl.cpp - image2tensor_impl.cpp - load_impl.cpp - normalize_impl.cpp - pad_impl.cpp - resize_impl.cpp - cast.cu - crop.cu - normalize.cu - transpose.cu) -build_target(${PROJECT_NAME} "${SRCS}") + crop_impl.cpp + image2tensor_impl.cpp + load_impl.cpp + normalize_impl.cpp + pad_impl.cpp + resize_impl.cpp + cast.cu + crop.cu + normalize.cu + transpose.cu) +mmdeploy_add_module(${PROJECT_NAME} "${SRCS}") +target_link_libraries(${PROJECT_NAME} PRIVATE + mmdeploy::transform ${PPLCV_LIBRARIES}) target_include_directories(${PROJECT_NAME} - PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include) -target_link_libraries(${PROJECT_NAME} PRIVATE ${PPLCV_LIBRARIES} mmdeploy::core) + PUBLIC ${CUDA_TOOLKIT_ROOT_DIR}/include ${PPLCV_INCLUDE_DIRS}) add_library(mmdeploy::transform_impl::cuda ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cuda/crop_impl.cpp b/csrc/preprocess/cuda/crop_impl.cpp index 0808e8c733..eb6f64f835 100644 --- a/csrc/preprocess/cuda/crop_impl.cpp +++ b/csrc/preprocess/cuda/crop_impl.cpp @@ -43,7 +43,7 @@ class CenterCropImpl : public ::mmdeploy::CenterCropImpl { } else if (1 == c) { Crop(input, desc.shape[2], output, h, w, top, left, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else if (DataType::kFLOAT == type) { @@ -54,11 +54,11 @@ class CenterCropImpl : public ::mmdeploy::CenterCropImpl { } else if (1 == c) { Crop(input, desc.shape[2], output, h, w, top, left, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } return dst_tensor; diff --git a/csrc/preprocess/cuda/load_impl.cpp b/csrc/preprocess/cuda/load_impl.cpp index 2d8df26b87..e7ffe506d2 100644 --- a/csrc/preprocess/cuda/load_impl.cpp +++ b/csrc/preprocess/cuda/load_impl.cpp @@ -70,11 +70,11 @@ class PrepareImageImpl : public ::mmdeploy::PrepareImageImpl { BGRA2BGR(stream, src_h, src_w, src_stride, src_ptr, dst_stride, dst_ptr); break; default: - ERROR("src type: unknown type {}", img.pixel_format()); + MMDEPLOY_ERROR("src type: unknown type {}", img.pixel_format()); return Status(eNotSupported); } if (ret != 0) { - ERROR("color transfer from {} to BGR failed, ret {}", img.pixel_format(), ret); + MMDEPLOY_ERROR("color transfer from {} to BGR failed, ret {}", img.pixel_format(), ret); return Status(eFail); } if (arg_.to_float32) { @@ -140,11 +140,11 @@ class PrepareImageImpl : public ::mmdeploy::PrepareImageImpl { BGRA2GRAY(stream, src_h, src_w, src_stride, src_ptr, dst_stride, dst_ptr); break; default: - ERROR("src type: unknown type {}", img.pixel_format()); + MMDEPLOY_ERROR("src type: unknown type {}", img.pixel_format()); throw Status(eNotSupported); } if (ret != 0) { - ERROR("color transfer from {} to Gray failed", img.pixel_format()); + MMDEPLOY_ERROR("color transfer from {} to Gray failed", img.pixel_format()); throw Status(eFail); } if (arg_.to_float32) { diff --git a/csrc/preprocess/cuda/normalize.cu b/csrc/preprocess/cuda/normalize.cu index 696abcc7d3..9536ecd054 100644 --- a/csrc/preprocess/cuda/normalize.cu +++ b/csrc/preprocess/cuda/normalize.cu @@ -1,5 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include + #include #include @@ -12,7 +14,7 @@ __global__ void normalize(const T* src, int height, int width, int stride, float int x = (int)(blockIdx.x * blockDim.x + threadIdx.x); int y = (int)(blockIdx.y * blockDim.y + threadIdx.y); - if (x >= width or y >= height) { + if (x >= width || y >= height) { return; } diff --git a/csrc/preprocess/cuda/normalize_impl.cpp b/csrc/preprocess/cuda/normalize_impl.cpp index 639f31aa98..48e6647990 100644 --- a/csrc/preprocess/cuda/normalize_impl.cpp +++ b/csrc/preprocess/cuda/normalize_impl.cpp @@ -41,7 +41,7 @@ class NormalizeImpl : public ::mmdeploy::NormalizeImpl { Normalize(input, h, w, stride, output, arg_.mean.data(), arg_.std.data(), arg_.to_rgb, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else if (DataType::kFLOAT == src_desc.data_type) { @@ -53,11 +53,11 @@ class NormalizeImpl : public ::mmdeploy::NormalizeImpl { Normalize(input, h, w, stride, output, arg_.mean.data(), arg_.std.data(), arg_.to_rgb, stream); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } } else { - ERROR("unsupported data type {}", src_desc.data_type); + MMDEPLOY_ERROR("unsupported data type {}", src_desc.data_type); assert(0); return Status(eNotSupported); } diff --git a/csrc/preprocess/cuda/pad_impl.cpp b/csrc/preprocess/cuda/pad_impl.cpp index ae567cb092..77781c0485 100644 --- a/csrc/preprocess/cuda/pad_impl.cpp +++ b/csrc/preprocess/cuda/pad_impl.cpp @@ -14,12 +14,20 @@ namespace cuda { class PadImpl : public ::mmdeploy::PadImpl { public: explicit PadImpl(const Value& args) : ::mmdeploy::PadImpl(args) { +#if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 + map border_map{{"constant", ppl::cv::BORDER_CONSTANT}, + {"edge", ppl::cv::BORDER_REPLICATE}, + {"reflect", ppl::cv::BORDER_REFLECT_101}, + { "symmetric", + ppl::cv::BORDER_REFLECT }}; +#else map border_map{{"constant", ppl::cv::BORDER_TYPE_CONSTANT}, {"edge", ppl::cv::BORDER_TYPE_REPLICATE}, {"reflect", ppl::cv::BORDER_TYPE_REFLECT_101}, {"symmetric", ppl::cv::BORDER_TYPE_REFLECT}}; +#endif if (border_map.find(arg_.padding_mode) == border_map.end()) { - ERROR("unsupported padding_mode '{}'", arg_.padding_mode); + MMDEPLOY_ERROR("unsupported padding_mode '{}'", arg_.padding_mode); throw_exception(eNotSupported); } padding_mode_ = border_map[arg_.padding_mode]; @@ -55,7 +63,7 @@ class PadImpl : public ::mmdeploy::PadImpl { dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, arg_.pad_val); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); assert(0); return Status(eNotSupported); } @@ -71,17 +79,17 @@ class PadImpl : public ::mmdeploy::PadImpl { stream, height, width, width * c, src_buffer, dst_width * c, dst_buffer, padding[1], padding[3], padding[0], padding[2], padding_mode_, (ppl::cv::uchar)arg_.pad_val); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); assert(0); return Status(eNotSupported); } } else { - ERROR("unsupported data type {}", desc.data_type); + MMDEPLOY_ERROR("unsupported data type {}", desc.data_type); assert(0); return Status(eNotSupported); } if (ret != 0) { - ERROR("unexpected exception happened"); + MMDEPLOY_ERROR("unexpected exception happened"); assert(0); return Status(eNotSupported); } diff --git a/csrc/preprocess/cuda/resize_impl.cpp b/csrc/preprocess/cuda/resize_impl.cpp index ce0a891c3c..8a37664801 100644 --- a/csrc/preprocess/cuda/resize_impl.cpp +++ b/csrc/preprocess/cuda/resize_impl.cpp @@ -14,7 +14,7 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { public: explicit ResizeImpl(const Value& args) : ::mmdeploy::ResizeImpl(args) { if (arg_.interpolation != "bilinear" && arg_.interpolation != "nearest") { - ERROR("{} interpolation is not supported", arg_.interpolation); + MMDEPLOY_ERROR("{} interpolation is not supported", arg_.interpolation); throw_exception(eNotSupported); } } @@ -33,7 +33,7 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { } else if (tensor.data_type() == DataType::kFLOAT) { OUTCOME_TRY(ResizeDispatch(src_tensor, dst_tensor, stream)); } else { - ERROR("unsupported data type {}", tensor.data_type()); + MMDEPLOY_ERROR("unsupported data type {}", tensor.data_type()); return Status(eNotSupported); } return dst_tensor; @@ -42,23 +42,23 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { private: template ppl::common::RetCode DispatchImpl(Args&&... args) { -#ifdef PPLCV_VERSION_MAJOR +#if PPLCV_VERSION_MAJOR >= 0 && PPLCV_VERSION_MINOR >= 6 && PPLCV_VERSION_PATCH >= 2 if (arg_.interpolation == "bilinear") { return ppl::cv::cuda::Resize(std::forward(args)..., - ppl::cv::INTERPOLATION_TYPE_LINEAR); + ppl::cv::INTERPOLATION_LINEAR); } if (arg_.interpolation == "nearest") { return ppl::cv::cuda::Resize(std::forward(args)..., - ppl::cv::INTERPOLATION_TYPE_NEAREST_POINT); + ppl::cv::INTERPOLATION_NEAREST_POINT); } - #else -#warning "support for ppl.cv < 0.6 is deprecated and will be dropped in the future" if (arg_.interpolation == "bilinear") { - return ppl::cv::cuda::ResizeLinear(std::forward(args)...); + return ppl::cv::cuda::Resize(std::forward(args)..., + ppl::cv::INTERPOLATION_TYPE_LINEAR); } if (arg_.interpolation == "nearest") { - return ppl::cv::cuda::ResizeNearestPoint(std::forward(args)...); + return ppl::cv::cuda::Resize(std::forward(args)..., + ppl::cv::INTERPOLATION_TYPE_NEAREST_POINT); } #endif return ppl::common::RC_UNSUPPORTED; @@ -82,7 +82,7 @@ class ResizeImpl final : public ::mmdeploy::ResizeImpl { } else if (4 == c) { ret = DispatchImpl(stream, h, w, w * c, input, dst_h, dst_w, dst_w * c, output); } else { - ERROR("unsupported channels {}", c); + MMDEPLOY_ERROR("unsupported channels {}", c); return Status(eNotSupported); } return ret == 0 ? success() : Result(Status(eFail)); diff --git a/csrc/preprocess/transform/CMakeLists.txt b/csrc/preprocess/transform/CMakeLists.txt index e9a9c14026..8e13a67ae1 100644 --- a/csrc/preprocess/transform/CMakeLists.txt +++ b/csrc/preprocess/transform/CMakeLists.txt @@ -2,21 +2,19 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy_transform) -include(${CMAKE_SOURCE_DIR}/cmake/common.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) set(SRCS - collect.cpp - compose.cpp - crop.cpp - image2tensor.cpp - load.cpp - normalize.cpp - pad.cpp - resize.cpp - transform.cpp) -build_target(${PROJECT_NAME} "${SRCS}") + collect.cpp + compose.cpp + crop.cpp + image2tensor.cpp + load.cpp + normalize.cpp + pad.cpp + resize.cpp + transform.cpp) +mmdeploy_add_module(${PROJECT_NAME} LIBRARY "${SRCS}") target_include_directories( - ${PROJECT_NAME} PUBLIC $) -target_link_libraries(${PROJECT_NAME} PRIVATE mmdeploy::core) + ${PROJECT_NAME} PUBLIC $) add_library(mmdeploy::transform ALIAS ${PROJECT_NAME}) -export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/transform/collect.cpp b/csrc/preprocess/transform/collect.cpp index 673514c780..d01d1cf4b3 100644 --- a/csrc/preprocess/transform/collect.cpp +++ b/csrc/preprocess/transform/collect.cpp @@ -26,7 +26,7 @@ CollectImpl::CollectImpl(const Value &args) { } Result CollectImpl::Process(const Value &input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output; // collect 'ori_img' and 'attribute' from `input`, because those two fields @@ -45,7 +45,7 @@ Result CollectImpl::Process(const Value &input) { } for (auto &key : arg_.keys) { if (!input.contains(key)) { - ERROR("missed key '{}' in input", key); + MMDEPLOY_ERROR("missed key '{}' in input", key); // return Status(eInvalidArgument); return Status(eInvalidArgument); } else { @@ -53,7 +53,7 @@ Result CollectImpl::Process(const Value &input) { } } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } @@ -77,4 +77,6 @@ class CollectCreator : public Creator { REGISTER_MODULE(Transform, CollectCreator); +MMDEPLOY_DEFINE_REGISTRY(CollectImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/collect.h b/csrc/preprocess/transform/collect.h index 92439120f1..327c5191e4 100644 --- a/csrc/preprocess/transform/collect.h +++ b/csrc/preprocess/transform/collect.h @@ -6,7 +6,7 @@ #include "transform.h" namespace mmdeploy { -class CollectImpl : public Module { +class MMDEPLOY_API CollectImpl : public Module { public: explicit CollectImpl(const Value& args); ~CollectImpl() = default; @@ -24,7 +24,7 @@ class CollectImpl : public Module { ArgType arg_; }; -class Collect : public Transform { +class MMDEPLOY_API Collect : public Transform { public: explicit Collect(const Value& args, int version = 0); ~Collect() = default; @@ -35,6 +35,8 @@ class Collect : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(CollectImpl); + } // namespace mmdeploy #endif // MMDEPLOY_COLLECT_H diff --git a/csrc/preprocess/transform/compose.cpp b/csrc/preprocess/transform/compose.cpp index 57417eb480..a52b6848a1 100644 --- a/csrc/preprocess/transform/compose.cpp +++ b/csrc/preprocess/transform/compose.cpp @@ -17,15 +17,15 @@ Compose::Compose(const Value& args, int version) : Transform(args) { for (auto cfg : args["transforms"]) { cfg["context"] = context; auto type = cfg.value("type", std::string{}); - DEBUG("creating transform: {} with cfg: {}", type, mmdeploy::to_json(cfg).dump(2)); + MMDEPLOY_DEBUG("creating transform: {} with cfg: {}", type, mmdeploy::to_json(cfg).dump(2)); auto creator = Registry::Get().GetCreator(type, version); if (!creator) { - ERROR("unable to find creator: {}", type); + MMDEPLOY_ERROR("unable to find creator: {}", type); throw std::invalid_argument("unable to find creator"); } auto transform = creator->Create(cfg); if (!transform) { - ERROR("failed to create transform: {}", type); + MMDEPLOY_ERROR("failed to create transform: {}", type); throw std::invalid_argument("failed to create transform"); } transforms_.push_back(std::move(transform)); diff --git a/csrc/preprocess/transform/compose.h b/csrc/preprocess/transform/compose.h index 3472d3e209..41f170371c 100644 --- a/csrc/preprocess/transform/compose.h +++ b/csrc/preprocess/transform/compose.h @@ -7,7 +7,7 @@ namespace mmdeploy { -class Compose : public Transform { +class MMDEPLOY_API Compose : public Transform { public: explicit Compose(const Value& args, int version = 0); ~Compose() override = default; diff --git a/csrc/preprocess/transform/crop.cpp b/csrc/preprocess/transform/crop.cpp index d2b9977dc4..1ea8867cab 100644 --- a/csrc/preprocess/transform/crop.cpp +++ b/csrc/preprocess/transform/crop.cpp @@ -24,7 +24,7 @@ CenterCropImpl::CenterCropImpl(const Value& args) : TransformImpl(args) { } Result CenterCropImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); auto img_fields = GetImageFields(input); // copy input data, and update its properties @@ -63,14 +63,14 @@ Result CenterCropImpl::Process(const Value& input) { } } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } CenterCrop::CenterCrop(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'CenterCrop' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'CenterCrop' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Resize' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -87,4 +87,5 @@ class CenterCropCreator : public Creator { }; REGISTER_MODULE(Transform, CenterCropCreator); +MMDEPLOY_DEFINE_REGISTRY(CenterCropImpl); } // namespace mmdeploy diff --git a/csrc/preprocess/transform/crop.h b/csrc/preprocess/transform/crop.h index 46bd50737b..76c567271e 100644 --- a/csrc/preprocess/transform/crop.h +++ b/csrc/preprocess/transform/crop.h @@ -3,12 +3,14 @@ #ifndef MMDEPLOY_CROP_H #define MMDEPLOY_CROP_H +#include + #include "core/tensor.h" #include "transform.h" namespace mmdeploy { -class CenterCropImpl : public TransformImpl { +class MMDEPLOY_API CenterCropImpl : public TransformImpl { public: explicit CenterCropImpl(const Value& args); ~CenterCropImpl() = default; @@ -29,7 +31,7 @@ class CenterCropImpl : public TransformImpl { ArgType arg_; }; -class CenterCrop : public Transform { +class MMDEPLOY_API CenterCrop : public Transform { public: explicit CenterCrop(const Value& args, int version = 0); ~CenterCrop() = default; @@ -40,6 +42,8 @@ class CenterCrop : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(CenterCropImpl); + } // namespace mmdeploy #endif // MMDEPLOY_CROP_H diff --git a/csrc/preprocess/transform/image2tensor.cpp b/csrc/preprocess/transform/image2tensor.cpp index 2adf959904..e2ccd3bb5d 100644 --- a/csrc/preprocess/transform/image2tensor.cpp +++ b/csrc/preprocess/transform/image2tensor.cpp @@ -16,7 +16,7 @@ ImageToTensorImpl::ImageToTensorImpl(const Value& args) : TransformImpl(args) { } Result ImageToTensorImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output = input; for (auto& key : arg_.keys) { assert(input.contains(key)); @@ -28,14 +28,14 @@ Result ImageToTensorImpl::Process(const Value& input) { OUTCOME_TRY(output[key], HWC2CHW(src_tensor)); } // for key - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } ImageToTensor::ImageToTensor(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'ImageToTensor' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'ImageToTensor' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'ImageToTensor' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -53,4 +53,5 @@ class ImageToTensorCreator : public Creator { int version_{1}; }; REGISTER_MODULE(Transform, ImageToTensorCreator); +MMDEPLOY_DEFINE_REGISTRY(ImageToTensorImpl); } // namespace mmdeploy diff --git a/csrc/preprocess/transform/image2tensor.h b/csrc/preprocess/transform/image2tensor.h index cca2c5db7a..49eefd9f47 100644 --- a/csrc/preprocess/transform/image2tensor.h +++ b/csrc/preprocess/transform/image2tensor.h @@ -14,7 +14,7 @@ namespace mmdeploy { * it to (1, C, H, W). * */ -class ImageToTensorImpl : public TransformImpl { +class MMDEPLOY_API ImageToTensorImpl : public TransformImpl { public: ImageToTensorImpl(const Value& args); ~ImageToTensorImpl() = default; @@ -34,7 +34,7 @@ class ImageToTensorImpl : public TransformImpl { ArgType arg_; }; -class ImageToTensor : public Transform { +class MMDEPLOY_API ImageToTensor : public Transform { public: explicit ImageToTensor(const Value& args, int version = 0); ~ImageToTensor() = default; @@ -45,6 +45,8 @@ class ImageToTensor : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(ImageToTensorImpl); + } // namespace mmdeploy #endif // MMDEPLOY_IMAGE2TENSOR_H diff --git a/csrc/preprocess/transform/load.cpp b/csrc/preprocess/transform/load.cpp index 671948f2d8..462c70a837 100644 --- a/csrc/preprocess/transform/load.cpp +++ b/csrc/preprocess/transform/load.cpp @@ -31,7 +31,7 @@ PrepareImageImpl::PrepareImageImpl(const Value& args) : TransformImpl(args) { */ Result PrepareImageImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); assert(input.contains("ori_img")); // copy input data, and update its properties later @@ -50,7 +50,7 @@ Result PrepareImageImpl::Process(const Value& input) { } output["ori_shape"] = {1, src_mat.height(), src_mat.width(), src_mat.channel()}; output["img_fields"].push_back("img"); - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } @@ -58,7 +58,7 @@ Result PrepareImageImpl::Process(const Value& input) { PrepareImage::PrepareImage(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'PrepareImage' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'PrepareImage' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'PrepareImage' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -80,4 +80,7 @@ class PrepareImageCreator : public Creator { }; REGISTER_MODULE(Transform, PrepareImageCreator); + +MMDEPLOY_DEFINE_REGISTRY(PrepareImageImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/load.h b/csrc/preprocess/transform/load.h index 32f0bdfdbd..a05d4c136f 100644 --- a/csrc/preprocess/transform/load.h +++ b/csrc/preprocess/transform/load.h @@ -8,7 +8,7 @@ #include "transform.h" namespace mmdeploy { -class PrepareImageImpl : public TransformImpl { +class MMDEPLOY_API PrepareImageImpl : public TransformImpl { public: explicit PrepareImageImpl(const Value& args); ~PrepareImageImpl() = default; @@ -29,7 +29,7 @@ class PrepareImageImpl : public TransformImpl { ArgType arg_; }; -class PrepareImage : public Transform { +class MMDEPLOY_API PrepareImage : public Transform { public: explicit PrepareImage(const Value& args, int version = 0); ~PrepareImage() = default; @@ -40,6 +40,8 @@ class PrepareImage : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(PrepareImageImpl); + } // namespace mmdeploy #endif // MMDEPLOY_LOAD_H diff --git a/csrc/preprocess/transform/normalize.cpp b/csrc/preprocess/transform/normalize.cpp index a0bc5f7ba2..7fc9c2ad31 100644 --- a/csrc/preprocess/transform/normalize.cpp +++ b/csrc/preprocess/transform/normalize.cpp @@ -10,9 +10,11 @@ using namespace std; namespace mmdeploy { +// MMDEPLOY_DEFINE_REGISTRY(NormalizeImpl); + NormalizeImpl::NormalizeImpl(const Value& args) : TransformImpl(args) { - if (!args.contains("mean") or !args.contains("std")) { - ERROR("no 'mean' or 'std' is configured"); + if (!args.contains("mean") || !args.contains("std")) { + MMDEPLOY_ERROR("no 'mean' or 'std' is configured"); throw std::invalid_argument("no 'mean' or 'std' is configured"); } for (auto& v : args["mean"]) { @@ -50,7 +52,7 @@ NormalizeImpl::NormalizeImpl(const Value& args) : TransformImpl(args) { */ Result NormalizeImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); // copy input data, and update its properties later Value output = input; @@ -73,14 +75,14 @@ Result NormalizeImpl::Process(const Value& input) { } output["img_norm_cfg"]["to_rgb"] = arg_.to_rgb; } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } Normalize::Normalize(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'Normalize' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'Normalize' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Normalize' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -98,4 +100,6 @@ class NormalizeCreator : public Creator { REGISTER_MODULE(Transform, NormalizeCreator); +MMDEPLOY_DEFINE_REGISTRY(NormalizeImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/normalize.h b/csrc/preprocess/transform/normalize.h index 14a4edb43e..fef8fd17c6 100644 --- a/csrc/preprocess/transform/normalize.h +++ b/csrc/preprocess/transform/normalize.h @@ -8,7 +8,7 @@ namespace mmdeploy { -class NormalizeImpl : public TransformImpl { +class MMDEPLOY_API NormalizeImpl : public TransformImpl { public: explicit NormalizeImpl(const Value& args); ~NormalizeImpl() = default; @@ -28,7 +28,7 @@ class NormalizeImpl : public TransformImpl { ArgType arg_; }; -class Normalize : public Transform { +class MMDEPLOY_API Normalize : public Transform { public: explicit Normalize(const Value& args, int version = 0); ~Normalize() = default; @@ -39,5 +39,7 @@ class Normalize : public Transform { std::unique_ptr impl_; }; +MMDEPLOY_DECLARE_REGISTRY(NormalizeImpl); + } // namespace mmdeploy #endif // MMDEPLOY_NORMALIZE_H diff --git a/csrc/preprocess/transform/pad.cpp b/csrc/preprocess/transform/pad.cpp index 4d9c6c69a2..9eb60748f0 100644 --- a/csrc/preprocess/transform/pad.cpp +++ b/csrc/preprocess/transform/pad.cpp @@ -28,7 +28,7 @@ PadImpl::PadImpl(const Value& args) : TransformImpl(args) { } Result PadImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output = input; auto img_fields = GetImageFields(input); @@ -38,7 +38,7 @@ Result PadImpl::Process(const Value& input) { assert(tensor.desc().shape.size() == 4); assert(tensor.desc().shape[0] == 1); - assert(tensor.desc().shape[3] == 3 or tensor.desc().shape[3] == 1); + assert(tensor.desc().shape[3] == 3 || tensor.desc().shape[3] == 1); int height = tensor.desc().shape[1]; int width = tensor.desc().shape[2]; @@ -75,14 +75,14 @@ Result PadImpl::Process(const Value& input) { } } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } Pad::Pad(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'Pad' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'Pad' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Pad' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -100,4 +100,6 @@ class PadCreator : public Creator { REGISTER_MODULE(Transform, PadCreator); +MMDEPLOY_DEFINE_REGISTRY(PadImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/pad.h b/csrc/preprocess/transform/pad.h index e684791a5c..1b5ccbcd5f 100644 --- a/csrc/preprocess/transform/pad.h +++ b/csrc/preprocess/transform/pad.h @@ -3,12 +3,14 @@ #ifndef MMDEPLOY_PAD_H #define MMDEPLOY_PAD_H +#include + #include "core/tensor.h" #include "transform.h" namespace mmdeploy { -class PadImpl : public TransformImpl { +class MMDEPLOY_API PadImpl : public TransformImpl { public: explicit PadImpl(const Value& args); ~PadImpl() override = default; @@ -33,7 +35,7 @@ class PadImpl : public TransformImpl { ArgType arg_; }; -class Pad : public Transform { +class MMDEPLOY_API Pad : public Transform { public: explicit Pad(const Value& args, int version = 0); ~Pad() override = default; @@ -43,6 +45,9 @@ class Pad : public Transform { protected: std::unique_ptr impl_; }; + +MMDEPLOY_DECLARE_REGISTRY(PadImpl); + } // namespace mmdeploy #endif // MMDEPLOY_PAD_H diff --git a/csrc/preprocess/transform/resize.cpp b/csrc/preprocess/transform/resize.cpp index 604103e6c6..98398e3dce 100644 --- a/csrc/preprocess/transform/resize.cpp +++ b/csrc/preprocess/transform/resize.cpp @@ -19,14 +19,14 @@ ResizeImpl::ResizeImpl(const Value& args) : TransformImpl(args) { arg_.img_scale = {size, size}; } else if (args["size"].is_array()) { if (args["size"].size() != 2) { - ERROR("'size' expects an array of size 2, but got {}", args["size"].size()); + MMDEPLOY_ERROR("'size' expects an array of size 2, but got {}", args["size"].size()); throw std::length_error("'size' expects an array of size 2"); } auto height = args["size"][0].get(); auto width = args["size"][1].get(); arg_.img_scale = {height, width}; } else { - ERROR("'size' is expected to be an integer or and array of size 2"); + MMDEPLOY_ERROR("'size' is expected to be an integer or and array of size 2"); throw std::domain_error("'size' is expected to be an integer or and array of size 2"); } } @@ -35,13 +35,13 @@ ResizeImpl::ResizeImpl(const Value& args) : TransformImpl(args) { vector interpolations{"nearest", "bilinear", "bicubic", "area", "lanczos"}; if (std::find(interpolations.begin(), interpolations.end(), arg_.interpolation) == interpolations.end()) { - ERROR("'{}' interpolation is not supported", arg_.interpolation); + MMDEPLOY_ERROR("'{}' interpolation is not supported", arg_.interpolation); throw std::invalid_argument("unexpected interpolation"); } } Result ResizeImpl::Process(const Value& input) { - DEBUG("input: {}", to_json(input).dump(2)); + MMDEPLOY_DEBUG("input: {}", to_json(input).dump(2)); Value output = input; auto img_fields = GetImageFields(input); @@ -66,7 +66,7 @@ Result ResizeImpl::Process(const Value& input) { dst_h = int(h * scale_factor + 0.5); dst_w = int(w * scale_factor + 0.5); } else if (!arg_.img_scale.empty()) { - DEBUG( + MMDEPLOY_WARN( "neither 'scale' or 'scale_factor' is provided in input value. " "'img_scale' will be used"); if (-1 == arg_.img_scale[1]) { @@ -82,7 +82,7 @@ Result ResizeImpl::Process(const Value& input) { dst_w = arg_.img_scale[1]; } } else { - ERROR("no resize related parameter is provided"); + MMDEPLOY_ERROR("no resize related parameter is provided"); return Status(eInvalidArgument); } if (arg_.keep_ratio) { @@ -111,14 +111,14 @@ Result ResizeImpl::Process(const Value& input) { output[key] = dst_img; } - DEBUG("output: {}", to_json(output).dump(2)); + MMDEPLOY_DEBUG("output: {}", to_json(output).dump(2)); return output; } Resize::Resize(const Value& args, int version) : Transform(args) { auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - ERROR("'Resize' is not supported on '{}' platform", specified_platform_); + MMDEPLOY_ERROR("'Resize' is not supported on '{}' platform", specified_platform_); throw std::domain_error("'Resize' is not supported on specified platform"); } impl_ = impl_creator->Create(args); @@ -136,4 +136,6 @@ class ResizeCreator : public Creator { REGISTER_MODULE(Transform, ResizeCreator); +MMDEPLOY_DEFINE_REGISTRY(ResizeImpl); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/resize.h b/csrc/preprocess/transform/resize.h index 1b9c10034d..54947bee48 100644 --- a/csrc/preprocess/transform/resize.h +++ b/csrc/preprocess/transform/resize.h @@ -3,11 +3,13 @@ #ifndef MMDEPLOY_RESIZE_H #define MMDEPLOY_RESIZE_H +#include + #include "core/tensor.h" #include "transform.h" namespace mmdeploy { -class ResizeImpl : public TransformImpl { +class MMDEPLOY_API ResizeImpl : public TransformImpl { public: explicit ResizeImpl(const Value& args); ~ResizeImpl() override = default; @@ -29,7 +31,7 @@ class ResizeImpl : public TransformImpl { ArgType arg_; }; -class Resize : public Transform { +class MMDEPLOY_API Resize : public Transform { public: explicit Resize(const Value& args, int version = 0); ~Resize() override = default; @@ -40,5 +42,8 @@ class Resize : public Transform { std::unique_ptr impl_; static const std::string name_; }; + +MMDEPLOY_DECLARE_REGISTRY(ResizeImpl); + } // namespace mmdeploy #endif // MMDEPLOY_RESIZE_H diff --git a/csrc/preprocess/transform/transform.cpp b/csrc/preprocess/transform/transform.cpp index d57cfa393d..f2be7519c4 100644 --- a/csrc/preprocess/transform/transform.cpp +++ b/csrc/preprocess/transform/transform.cpp @@ -48,4 +48,6 @@ Transform::Transform(const Value &args) { } } +MMDEPLOY_DEFINE_REGISTRY(Transform); + } // namespace mmdeploy diff --git a/csrc/preprocess/transform/transform.h b/csrc/preprocess/transform/transform.h index fa1a700ea2..ba96e91a14 100644 --- a/csrc/preprocess/transform/transform.h +++ b/csrc/preprocess/transform/transform.h @@ -9,7 +9,7 @@ namespace mmdeploy { -class TransformImpl : public Module { +class MMDEPLOY_API TransformImpl : public Module { public: TransformImpl() = default; explicit TransformImpl(const Value& args); @@ -23,41 +23,36 @@ class TransformImpl : public Module { Stream stream_; }; -class Transform : public Module { +class MMDEPLOY_API Transform : public Module { public: + ~Transform() override = default; + Transform() = default; explicit Transform(const Value& args); - ~Transform() override = default; + Transform(const Transform&) = delete; + Transform& operator=(const Transform&) = delete; const std::string& RuntimePlatform() const { return runtime_platform_; } protected: template - [[deprecated]] - /* - * We cannot LOG the error message, because WARN/INFO/ERROR causes - * redefinition when building UTs "catch2.hpp" used in UTs has the same LOG - * declaration - */ - std::unique_ptr - Instantiate(const char* transform_type, const Value& args, int version = 0) { + [[deprecated]] std::unique_ptr Instantiate(const char* transform_type, const Value& args, + int version = 0) { std::unique_ptr impl(nullptr); auto impl_creator = Registry::Get().GetCreator(specified_platform_, version); if (nullptr == impl_creator) { - // WARN("cannot find {} implementation on specific platform {} ", - // transform_type, specified_platform_); + MMDEPLOY_WARN("cannot find {} implementation on specific platform {} ", transform_type, + specified_platform_); for (auto& name : candidate_platforms_) { impl_creator = Registry::Get().GetCreator(name); if (impl_creator) { - // INFO("fallback {} implementation to platform {}", transform_type, - // name); + MMDEPLOY_INFO("fallback {} implementation to platform {}", transform_type, name); break; } } } if (nullptr == impl_creator) { - // ERROR("cannot find {} implementation on any registered platform ", - // transform_type); + MMDEPLOY_ERROR("cannot find {} implementation on any registered platform ", transform_type); return nullptr; } else { return impl_creator->Create(args); @@ -70,6 +65,8 @@ class Transform : public Module { std::vector candidate_platforms_; }; +MMDEPLOY_DECLARE_REGISTRY(Transform); + } // namespace mmdeploy #endif // MMDEPLOY_TRANSFORM_H diff --git a/csrc/preprocess/transform_module.cpp b/csrc/preprocess/transform_module.cpp index b769878212..9b7b2f01fd 100644 --- a/csrc/preprocess/transform_module.cpp +++ b/csrc/preprocess/transform_module.cpp @@ -3,6 +3,7 @@ #include "transform_module.h" #include "archive/value_archive.h" +#include "core/module.h" #include "core/utils/formatter.h" #include "experimental/module_adapter.h" #include "preprocess/transform/transform.h" @@ -15,12 +16,12 @@ TransformModule::TransformModule(const Value& args) { const auto type = "Compose"; auto creator = Registry::Get().GetCreator(type, 1); if (!creator) { - ERROR("unable to find creator: {}", type); + MMDEPLOY_ERROR("unable to find creator: {}", type); throw_exception(eEntryNotFound); } auto cfg = args; if (cfg.contains("device")) { - WARN("force using device: {}", cfg["device"].get()); + MMDEPLOY_WARN("force using device: {}", cfg["device"].get()); auto device = Device(cfg["device"].get()); cfg["context"]["device"] = device; cfg["context"]["stream"] = Stream::GetDefault(device); @@ -31,7 +32,7 @@ TransformModule::TransformModule(const Value& args) { Result TransformModule::operator()(const Value& input) { auto output = transform_->Process(input); if (!output) { - ERROR("error: {}", output.error().message().c_str()); + MMDEPLOY_ERROR("error: {}", output.error().message().c_str()); } auto& ret = output.value(); if (ret.is_object()) { @@ -39,13 +40,13 @@ Result TransformModule::operator()(const Value& input) { } else if (ret.is_array() && ret.size() == 1 && ret[0].is_object()) { ret = ret[0]; } else { - ERROR("unsupported return value: {}", ret); + MMDEPLOY_ERROR("unsupported return value: {}", ret); return Status(eNotSupported); } return ret; } -class TransformModuleCreator : public Creator { +class MMDEPLOY_API TransformModuleCreator : public Creator { public: const char* GetName() const override { return "Transform"; } int GetVersion() const override { return 0; } diff --git a/csrc/utils/CMakeLists.txt b/csrc/utils/CMakeLists.txt new file mode 100644 index 0000000000..1eef35940a --- /dev/null +++ b/csrc/utils/CMakeLists.txt @@ -0,0 +1,3 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +add_subdirectory(opencv) diff --git a/csrc/utils/opencv/CMakeLists.txt b/csrc/utils/opencv/CMakeLists.txt new file mode 100644 index 0000000000..6eb8bd2e91 --- /dev/null +++ b/csrc/utils/opencv/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +cmake_minimum_required(VERSION 3.14) +project(mmdeploy_opencv_utils) + +include(${CMAKE_SOURCE_DIR}/cmake/opencv.cmake) +include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) + +mmdeploy_add_library(${PROJECT_NAME} opencv_utils.cpp) + +target_link_libraries(${PROJECT_NAME} + PRIVATE mmdeploy::core + PUBLIC ${OpenCV_LIBS}) + +target_include_directories(${PROJECT_NAME} + INTERFACE $) + +#export_module(${PROJECT_NAME}) diff --git a/csrc/preprocess/cpu/opencv_utils.cpp b/csrc/utils/opencv/opencv_utils.cpp similarity index 91% rename from csrc/preprocess/cpu/opencv_utils.cpp rename to csrc/utils/opencv/opencv_utils.cpp index d02d5571cd..ef05dbf2eb 100644 --- a/csrc/preprocess/cpu/opencv_utils.cpp +++ b/csrc/utils/opencv/opencv_utils.cpp @@ -42,7 +42,7 @@ cv::Mat Mat2CVMat(const Mat& mat) { {DataType::kINT32, CV_32S}}; auto type = CV_MAKETYPE(type_mapper[mat.type()], mat.channel()); auto format = mat.pixel_format(); - if (PixelFormat::kBGR == format or PixelFormat::kRGB == format) { + if (PixelFormat::kBGR == format || PixelFormat::kRGB == format) { return cv::Mat(mat.height(), mat.width(), type, mat.data()); } else if (PixelFormat::kGRAYSCALE == format) { return cv::Mat(mat.height(), mat.width(), type, mat.data()); @@ -59,7 +59,7 @@ cv::Mat Mat2CVMat(const Mat& mat) { } else if (PixelFormat::kBGRA == format) { return cv::Mat(mat.height(), mat.width(), type, mat.data()); } else { - ERROR("unsupported mat format {}", format); + MMDEPLOY_ERROR("unsupported mat format {}", format); return {}; } } @@ -78,7 +78,7 @@ cv::Mat Tensor2CVMat(const Tensor& tensor) { return {h, w, CV_32SC(c), const_cast(tensor.data())}; } else { assert(0); - ERROR("unsupported type: {}", desc.data_type); + MMDEPLOY_ERROR("unsupported type: {}", desc.data_type); return {}; } } @@ -95,7 +95,7 @@ Tensor CVMat2Tensor(const cv::Mat& mat) { shape = {1, mat.rows, mat.cols, mat.channels()}; data_type = DataType::kINT32; } else { - ERROR("unsupported mat dat type {}", mat.type()); + MMDEPLOY_ERROR("unsupported mat dat type {}", mat.type()); assert(0); return {}; } @@ -118,7 +118,7 @@ cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, } else if (interpolation == "lanczos") { cv::resize(src, dst, dst.size(), 0, 0, cv::INTER_LANCZOS4); } else { - ERROR("{} interpolation is not supported", interpolation); + MMDEPLOY_ERROR("{} interpolation is not supported", interpolation); assert(0); } return dst; @@ -189,7 +189,7 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds cv::cvtColor(src, dst, cv::COLOR_BGRA2BGR); break; default: - ERROR("unsupported src mat's element type {}", src_format); + MMDEPLOY_ERROR("unsupported src mat's element type {}", src_format); assert(0); return {}; } @@ -214,7 +214,7 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds cv::cvtColor(src, dst, cv::COLOR_BGRA2RGB); break; default: - ERROR("unsupported src mat's element type {}", src_format); + MMDEPLOY_ERROR("unsupported src mat's element type {}", src_format); assert(0); return {}; } @@ -239,12 +239,12 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds cv::cvtColor(src, dst, cv::COLOR_BGRA2GRAY); break; default: - ERROR("unsupported src mat's element type {}", src_format); + MMDEPLOY_ERROR("unsupported src mat's element type {}", src_format); assert(0); return {}; } } else { - ERROR("unsupported target mat's element type {}", dst_format); + MMDEPLOY_ERROR("unsupported target mat's element type {}", dst_format); assert(0); return {}; } @@ -267,7 +267,7 @@ bool Compare(const cv::Mat& src1, const cv::Mat& src2) { cv::subtract(_src1, _src2, diff); diff = cv::abs(diff); auto sum = cv::sum(cv::sum(diff)); - DEBUG("sum: {}, average: {}", sum[0], sum[0] * 1.0 / (src1.rows * src1.cols)); + MMDEPLOY_DEBUG("sum: {}, average: {}", sum[0], sum[0] * 1.0 / (src1.rows * src1.cols)); return sum[0] / (src1.rows * src1.cols) < 0.5f; } diff --git a/csrc/preprocess/cpu/opencv_utils.h b/csrc/utils/opencv/opencv_utils.h similarity index 69% rename from csrc/preprocess/cpu/opencv_utils.h rename to csrc/utils/opencv/opencv_utils.h index 45aa360ff3..05f8405eb4 100644 --- a/csrc/preprocess/cpu/opencv_utils.h +++ b/csrc/utils/opencv/opencv_utils.h @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. -#ifndef MMDEPLOY_OPENCV_UTILS_H -#define MMDEPLOY_OPENCV_UTILS_H +#ifndef MMDEPLOY_CSRC_UTILS_OPENCV_OPENCV_UTILS_H_ +#define MMDEPLOY_CSRC_UTILS_OPENCV_OPENCV_UTILS_H_ #include "core/mat.h" #include "core/mpl/type_traits.h" @@ -12,11 +12,11 @@ namespace mmdeploy { namespace cpu { -cv::Mat Mat2CVMat(const Mat& mat); -cv::Mat Tensor2CVMat(const Tensor& tensor); +MMDEPLOY_API cv::Mat Mat2CVMat(const Mat& mat); +MMDEPLOY_API cv::Mat Tensor2CVMat(const Tensor& tensor); -Mat CVMat2Mat(const cv::Mat& mat, PixelFormat format); -Tensor CVMat2Tensor(const cv::Mat& mat); +MMDEPLOY_API Mat CVMat2Mat(const cv::Mat& mat, PixelFormat format); +MMDEPLOY_API Tensor CVMat2Tensor(const cv::Mat& mat); /** * @brief resize an image to specified size @@ -26,7 +26,8 @@ Tensor CVMat2Tensor(const cv::Mat& mat); * @param dst_width output image's width * @return output image if success, error code otherwise */ -cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, const std::string& interpolation); +MMDEPLOY_API cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, + const std::string& interpolation); /** * @brief crop an image @@ -38,7 +39,7 @@ cv::Mat Resize(const cv::Mat& src, int dst_height, int dst_width, const std::str * @param right * @return cv::Mat */ -cv::Mat Crop(const cv::Mat& src, int top, int left, int bottom, int right); +MMDEPLOY_API cv::Mat Crop(const cv::Mat& src, int top, int left, int bottom, int right); /** * @brief Do normalization to an image @@ -50,8 +51,8 @@ cv::Mat Crop(const cv::Mat& src, int top, int left, int bottom, int right); * @param inplace * @return cv::Mat */ -cv::Mat Normalize(cv::Mat& src, const std::vector& mean, const std::vector& std, - bool to_rgb, bool inplace = true); +MMDEPLOY_API cv::Mat Normalize(cv::Mat& src, const std::vector& mean, + const std::vector& std, bool to_rgb, bool inplace = true); /** * @brief tranpose an image, from {h, w, c} to {c, h, w} @@ -59,7 +60,7 @@ cv::Mat Normalize(cv::Mat& src, const std::vector& mean, const std::vecto * @param src input image * @return */ -cv::Mat Transpose(const cv::Mat& src); +MMDEPLOY_API cv::Mat Transpose(const cv::Mat& src); /** * @brief convert an image to another color space @@ -69,7 +70,8 @@ cv::Mat Transpose(const cv::Mat& src); * @param dst_format * @return */ -cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat dst_format); +MMDEPLOY_API cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, + PixelFormat dst_format); /** * @@ -82,8 +84,8 @@ cv::Mat ColorTransfer(const cv::Mat& src, PixelFormat src_format, PixelFormat ds * @param val * @return */ -cv::Mat Pad(const cv::Mat& src, int top, int left, int bottom, int right, int border_type, - float val); +MMDEPLOY_API cv::Mat Pad(const cv::Mat& src, int top, int left, int bottom, int right, + int border_type, float val); /** * @brief compare two images @@ -92,7 +94,7 @@ cv::Mat Pad(const cv::Mat& src, int top, int left, int bottom, int right, int bo * @param src2 the other input image * @return bool true means the images are the same */ -bool Compare(const cv::Mat& src1, const cv::Mat& src2); +MMDEPLOY_API bool Compare(const cv::Mat& src1, const cv::Mat& src2); } // namespace cpu @@ -104,8 +106,6 @@ struct IsCvPoint : std::false_type {}; template struct IsCvPoint<::cv::Point_> : std::true_type {}; -} // namespace detail - template >::value, int> = 0> void serialize(Archive&& archive, T&& p) { @@ -146,6 +146,8 @@ void load(Archive& archive, std::vector& v) { } } +} // namespace detail + } // namespace mmdeploy -#endif // MMDEPLOY_OPENCV_UTILS_H +#endif // MMDEPLOY_CSRC_UTILS_OPENCV_OPENCV_UTILS_H_ diff --git a/demo/csrc/CMakeLists.txt b/demo/csrc/CMakeLists.txt index 3da3b04b79..3e1bdcc6fb 100644 --- a/demo/csrc/CMakeLists.txt +++ b/demo/csrc/CMakeLists.txt @@ -2,13 +2,18 @@ cmake_minimum_required(VERSION 3.14) project(mmdeploy-example) -find_package(OpenCV REQUIRED) find_package(MMDeploy REQUIRED) function(add_example name) - add_executable(${name} ${name}.cpp) - target_link_libraries(${name} ${MMDeploy_LIBS} -Wl,--disable-new-dtags - opencv_imgcodecs opencv_imgproc opencv_core) + file(GLOB _SRCS ${name}.c*) + add_executable(${name} ${_SRCS}) + if (NOT MSVC) + # disable new dtags so that executables can run even without LD_LIBRARY_PATH set + target_link_libraries(${name} PRIVATE -Wl,--disable-new-dtags) + endif () + mmdeploy_load_static(${name} MMDeployStaticModules) + mmdeploy_load_dynamic(${name} MMDeployDynamicModules) + target_link_libraries(${name} PRIVATE MMDeployLibs ${OpenCV_LIBS}) endfunction() add_example(image_classification) diff --git a/demo/csrc/image_classification.cpp b/demo/csrc/image_classification.cpp index be618659be..18d1e0793e 100644 --- a/demo/csrc/image_classification.cpp +++ b/demo/csrc/image_classification.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include "classifier.h" diff --git a/demo/csrc/image_restorer.cpp b/demo/csrc/image_restorer.cpp index 4e462ce083..3984c88a8a 100644 --- a/demo/csrc/image_restorer.cpp +++ b/demo/csrc/image_restorer.cpp @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include +#include #include #include diff --git a/demo/csrc/image_segmentation.cpp b/demo/csrc/image_segmentation.cpp index 71b3108152..8502ecec0c 100644 --- a/demo/csrc/image_segmentation.cpp +++ b/demo/csrc/image_segmentation.cpp @@ -1,7 +1,7 @@ // Copyright (c) OpenMMLab. All rights reserved. #include -#include +#include #include #include #include @@ -13,7 +13,7 @@ using namespace std; vector gen_palette(int num_classes) { std::mt19937 gen; - std::uniform_int_distribution uniform_dist(0, 255); + std::uniform_int_distribution uniform_dist(0, 255); vector palette; palette.reserve(num_classes); diff --git a/demo/csrc/object_detection.cpp b/demo/csrc/object_detection.cpp index a57b4f41f6..1843407532 100644 --- a/demo/csrc/object_detection.cpp +++ b/demo/csrc/object_detection.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/demo/csrc/ocr.cpp b/demo/csrc/ocr.cpp index f82d1eca67..1bb8d43ef2 100644 --- a/demo/csrc/ocr.cpp +++ b/demo/csrc/ocr.cpp @@ -1,5 +1,5 @@ #include -#include +#include #include #include diff --git a/docs/en/build/linux.md b/docs/en/build/linux.md new file mode 100644 index 0000000000..1333ed77b7 --- /dev/null +++ b/docs/en/build/linux.md @@ -0,0 +1 @@ +TODO diff --git a/docs/en/build/windows.md b/docs/en/build/windows.md new file mode 100644 index 0000000000..1333ed77b7 --- /dev/null +++ b/docs/en/build/windows.md @@ -0,0 +1 @@ +TODO diff --git a/docs/zh_cn/build/linux.md b/docs/zh_cn/build/linux.md new file mode 100644 index 0000000000..1333ed77b7 --- /dev/null +++ b/docs/zh_cn/build/linux.md @@ -0,0 +1 @@ +TODO diff --git a/docs/zh_cn/build/windows.md b/docs/zh_cn/build/windows.md new file mode 100644 index 0000000000..e0bcdfbc37 --- /dev/null +++ b/docs/zh_cn/build/windows.md @@ -0,0 +1,336 @@ +- [Windows 下构建方式](#windows-下构建方式) + - [源码安装](#源码安装) + - [安装构建和编译工具链](#安装构建和编译工具链) + - [安装依赖包](#安装依赖包) + - [安装 MMDeploy Converter 依赖](#安装-mmdeploy-converter-依赖) + - [安装 MMDeploy SDK 依赖](#安装-mmdeploy-sdk-依赖) + - [安装推理引擎](#安装推理引擎) + - [编译 MMDeploy](#编译-mmdeploy) + - [编译安装 Model Converter](#编译安装-model-converter) + - [编译自定义算子](#编译自定义算子) + - [安装 Model Converter](#安装-model-converter) + - [编译 SDK](#编译-sdk) + - [编译选项说明](#编译选项说明) + - [编译样例](#编译样例) + - [编译 SDK Demo](#编译-sdk-demo) + - [注意事项](#注意事项) + +--- +# Windows 下构建方式 + +目前,MMDeploy 在 Windows 平台下仅提供源码编译安装方式。未来会提供预编译包方式。 + +## 源码安装 +下述安装方式,均是在 **Windows 10** 下进行 +### 安装构建和编译工具链 +1. 下载并安装 [Visual Studio 2019](https://visualstudio.microsoft.com) 。安装时请勾选 "使用C++的桌面开发, "Windows 10 SDK
+2. 把 cmake 路径加入到环境变量 PATH 中, "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin"
+3. 如果系统中配置了 NVIDIA 显卡,根据官网教程,下载并安装 cuda toolkit。
+### 安装依赖包 + +#### 安装 MMDeploy Converter 依赖 + + + + + + + + + + + + + + + + + + + + + +
名称 安装方法
conda 强烈建议安装conda,或者miniconda。比如,
https://repo.anaconda.com/miniconda/Miniconda3-py37_4.11.0-Windows-x86_64.exe
安装完毕后,打开系统开始菜单,输入prompt,选择并打开 anaconda powershell prompt。
下文中的安装命令均是在 anaconda powershell 中测试验证的。
pytorch
(>=1.8.0)
+ 参考pytorch官网,根据系统环境, 选择合适的预编译包进行安装。比如,
+

+    pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
+    
+
mmcv-full 参考mmcv官网,根据系统环境,选择预编译包进行安装。比如,
+

+    $env:cu_version="cu111"
+    $env:torch_version="torch1.8.0"
+    pip install mmcv-full==1.4.0 -f https://download.openmmlab.com/mmcv/dist/$env:cu_version/$env:torch_version/index.html
+    
+
+ + +#### 安装 MMDeploy SDK 依赖 + + + + + + + + + + + + + + + + + + + + + + +
名称 安装方法
spdlog spdlog是一个精巧的日志管理库。请参考如下命令安装:
+ 1. 下载 https://github.com/gabime/spdlog/archive/refs/tags/v1.9.2.zip
+ 2. 解压后,进入到文件夹 spdlog-v1.9.2
+ 3. 执行编译安装命令
+

+    mkdir build
+    cd build
+    cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_INSTALL_PREFIX=install -DCMAKE_BUILD_TYPE=Release
+    cmake --build . --target install -j --config Release
+    
+
OpenCV + 1. 下载并安装 OpenCV 在 windows 下的预编译包: https://github.com/opencv/opencv/releases/download/4.5.5/opencv-4.5.5-vc14_vc15.exe
+ 2. 把 OpenCV 库的路径加入到环境变量 PATH 中
pplcv pplcv 是在x86和cuda平台下的高性能图像处理库。 此依赖项为可选项,只有在cuda平台下,才需安装。而且,目前必须使用v0.6.2,且需要使用git clone的方式下载源码并编译安装
+

+    git clone --recursive git@github.com:openppl-public/ppl.cv.git
+    cd ppl.cv
+    git checkout tags/v0.6.2 -b v0.6.2
+    ./build.bat -G "Visual Studio 16 2019" -T v142 -A x64 -DHPCC_USE_CUDA=ON -DHPCC_MSVC_MD=ON
+    
+
+ + + +#### 安装推理引擎 +目前,在 Windows 平台下,MMDeploy 支持 ONNXRuntime 和 TensorRT 两种推理引擎。其他推理引擎尚未进行验证,或者验证未通过。后续将陆续予以支持 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
推理引擎 依赖包安装方法
ONNXRuntimeonnxruntime + 1. 下载二进制包:https://github.com/microsoft/onnxruntime/releases/download/v1.8.0/onnxruntime-win-x64-1.8.0.zip
+ 2. 解压到目标路径。我们使用 onnxruntime_dir 代表此路径
+ 3. 在 PATH 中增加 onnxruntime libs 路径, +

+    $env:path = "{onnxruntime_dir}/lib;" + $env:path
+    
+
TensorRT
TensorRT
+ 1. 从NVIDIA官网下载二进制包, 比如,
+ https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/8.2.3.0/zip/TensorRT-8.2.3.0.Windows10.x86_64.cuda-11.4.cudnn8.2.zip
+ 1. 解压二进制包到目标路径。我们使用 tensorrt_dir 代表此路径
+ 2. 安装 tensorrt 的 python package
+ 3. 在 PATH 中增加 tensorrt libs 路径 +

+   pip install {tensorrt_dir}/python/tensorrt-8.2.3.0-cp37-none-win_amd64.whl
+   $env:path = "{tensorrt_dir}/lib;" + $env:path
+   
+
cudnn + 1. 从NVIDIA官网下载二进制包, 比如,
+ https://developer.nvidia.com/compute/machine-learning/cudnn/secure/8.2.1.32/11.3_06072021/cudnn-11.3-windows-x64-v8.2.1.32.zip
+ 1. 解压二进制包到目标路径。我们使用 cudnn_dir 代表此路径
+ 2. 在 PATH 中增加 cudnn libs 路径 +

+   $env:path = "{cudnn_dir}/bin;" + $env:path
+   
+   
PPL.NNppl.nn TODO
OpenVINOopenvino TODO
ncnn ncnn TODO
+ +### 编译 MMDeploy + +#### 编译安装 Model Converter +##### 编译自定义算子 +- **ONNXRuntime** 自定义算子 +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="ort" -DONNXRUNTIME_DIR={onnxruntime_dir} +cmake --build . --config Release -- /maxcpucount:4 +``` + +- **TensorRT** 自定义算子 + +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 -DMMDEPLOY_TARGET_BACKENDS="trt" -DTENSORRT_DIR={tensorrt_dir} -DCUDNN_DIR={cudnn_dir} +cmake --build . --config Release -- /maxcpucount:4 +``` + +- **ncnn** 自定义算子 + + TODO + +##### 安装 Model Converter +```powershell +cd root/path/of/MMDeploy +pip install -e . +``` +#### 编译 SDK +##### 编译选项说明 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
编译选项取值范围缺省值说明
MMDEPLOY_BUILD_SDK{ON, OFF}OFFMMDeploy SDK 编译开关
MMDEPLOY_BUILD_SDK_PYTHON_API{ON, OFF}OFFMMDeploy SDK python package的编译开关
MMDEPLOY_BUILD_TEST{ON, OFF}OFFMMDeploy SDK的测试程序编译开关
MMDEPLOY_TARGET_DEVICES{"cpu", "cuda"}cpu设置目标设备。当有多个设备时,设备名称之间使用分号隔开。 比如,-DMMDEPLOY_TARGET_DEVICES="cpu;cuda"
MMDEPLOY_TARGET_BACKENDS{"trt", "ort", "pplnn", "ncnn", "openvino"}N/A 默认情况下,SDK不设置任何后端, 因为它与应用场景高度相关。 当选择多个后端时, 中间使用分号隔开。比如,
-DMMDEPLOY_TARGET_BACKENDS="trt;ort;pplnn;ncnn;openvino"
+ 构建时,几乎每个后端,都需设置一些环境变量,用来查找依赖包。
+ 1. trt: 表示 TensorRT, 需要设置 TENSORRT_DIR 和 CUDNN_DIR。类似,
-DTENSORRT_DIR={tensorrt_dir}
-DCUDNN_DIR={cudnn_dir}
+ 2. ort: 表示 ONNXRuntime,需要设置 ONNXRUNTIME_DIR。类似,
-DONNXRUNTIME_DIR={onnxruntime_dir}
+ 3. pplnn: 表示 PPL.NN,需要设置 pplnn_DIR。当前版本尚未验证
+ 4. ncnn:需要设置 ncnn_DIR。当前版本尚未验证
+ 5. openvino: 表示 OpenVINO,需要设置 InferenceEngine_DIR。当前版本尚未验证通过 +
MMDEPLOY_CODEBASES{"mmcls", "mmdet", "mmseg", "mmedit", "mmocr", "all"}N/A用来设置SDK后处理组件,加载OpenMMLab算法仓库的后处理功能。已支持的算法仓库有'mmcls','mmdet','mmedit','mmseg'和'mmocr'。如果选择多个codebase,中间使用分号隔开。比如,-DMMDEPLOY_CODEBASES="mmcls;mmdet"。也可以通过 -DMMDEPLOY_CODEBASES=all 方式,加载所有codebase。
BUILD_SHARED_LIBS{ON, OFF}ON动态库的编译开关。设置OFF时,编译静态库
+ + +##### 编译样例 + +下文展示2个构建SDK的样例,分别用于不同的运行环境。 + +- cpu + ONNXRuntime + + ```PowerShell + mkdir build + cd build + cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDEPLOY_BUILD_SDK=ON ` + -DMMDEPLOY_TARGET_DEVICES="cpu" ` + -DMMDEPLOY_TARGET_BACKENDS="ort" ` + -DMMDEPLOY_CODEBASES="all" ` + -DONNXRUNTIME_DIR={onnxruntime_dir} ` + -Dspdlog_DIR={spdlog_dir}/build/install/lib/cmake/spdlog ` + -DOpenCV_DIR={opencv_dir}/build + cmake --build . --config Release -- /maxcpucount:4 + cmake --install . --config Release + ``` + +- cuda + TensorRT + + ```PowerShell + mkdir build + cd build + cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDEPLOY_BUILD_SDK=ON ` + -DMMDEPLOY_TARGET_DEVICES="cuda" ` + -DMMDEPLOY_TARGET_BACKENDS="trt" ` + -DMMDEPLOY_CODEBASES="all" ` + -Dpplcv_DIR={pplcv_dir}/pplcv-build/install/lib/cmake/ppl ` + -DTENSORRT_DIR={tensorrt_dir} ` + -DCUDNN_DIR={cudnn_dir} ` + -Dspdlog_DIR={spdlog_dir}/build/install/lib/cmake/spdlog ` + -DOpenCV_DIR={opencv_dir}/build + cmake --build . --config Release -- /maxcpucount:4 + cmake --install . --config Release + ``` +- 其他 + + 请参考上述两个示例,以及前述 SDK 的编译选项,在其他运行环境下编译 SDK + +##### 编译 SDK Demo + +```PowerShell +cd install/example +mkdir build +cd build +cmake .. -G "Visual Studio 16 2019" -A x64 -T v142 ` + -DMMDeploy_DIR={mmdeploy_dir}/build/install/lib/cmake/MMDeploy ` + -Dspdlog_DIR={spdlog_dir}/build/install/lib/cmake/spdlog ` + -DOpenCV_DIR={opencv_dir}/build +cmake --build . --config Release -- /maxcpucount:4 + +$env:path = "${mmdeploy_dir}/build/install/bin;" + $env:path + +``` + +### 注意事项 + 1. Release / Debug 库不能混用。MMDeploy要是编译Debug版本,所有第三方依赖都要是Debug版本。 diff --git a/mmdeploy/backend/ncnn/init_plugins.py b/mmdeploy/backend/ncnn/init_plugins.py index 97667defd4..80cd871c53 100644 --- a/mmdeploy/backend/ncnn/init_plugins.py +++ b/mmdeploy/backend/ncnn/init_plugins.py @@ -1,7 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. -import glob import os +from mmdeploy.utils import get_file_path + def get_ops_path() -> str: """Get NCNN custom ops library path. @@ -9,14 +10,11 @@ def get_ops_path() -> str: Returns: str: The library path of NCNN custom ops. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '../../../build/lib/libmmdeploy_ncnn_ops.so')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/lib/libmmdeploy_ncnn_ops.so', + '../../../build/bin/*/mmdeploy_ncnn_ops.pyd' + ] + return get_file_path(os.path.dirname(__file__), candidates) def get_onnx2ncnn_path() -> str: @@ -25,10 +23,7 @@ def get_onnx2ncnn_path() -> str: Returns: str: A path of onnx2ncnn tool. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), '../../../build/bin/onnx2ncnn')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/bin/onnx2ncnn', '../../../build/bin/*/onnx2ncnn' + ] + return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/onnxruntime/init_plugins.py b/mmdeploy/backend/onnxruntime/init_plugins.py index 06cd001502..e8622eedf3 100644 --- a/mmdeploy/backend/onnxruntime/init_plugins.py +++ b/mmdeploy/backend/onnxruntime/init_plugins.py @@ -1,7 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. -import glob import os +from mmdeploy.utils import get_file_path + def get_ops_path() -> str: """Get the library path of onnxruntime custom ops. @@ -9,11 +10,8 @@ def get_ops_path() -> str: Returns: str: The library path to onnxruntime custom ops. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '../../../build/lib/libmmdeploy_onnxruntime_ops.so')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/lib/libmmdeploy_onnxruntime_ops.so', + '../../../build/bin/*/mmdeploy_onnxruntime_ops.dll', + ] + return get_file_path(os.path.dirname(__file__), candidates) diff --git a/mmdeploy/backend/sdk/__init__.py b/mmdeploy/backend/sdk/__init__.py index 95a9548505..ef648c4d5b 100644 --- a/mmdeploy/backend/sdk/__init__.py +++ b/mmdeploy/backend/sdk/__init__.py @@ -3,14 +3,24 @@ import os import sys -lib_dir = os.path.abspath( - os.path.join(os.path.dirname(__file__), '../../../build/lib')) - -sys.path.insert(0, lib_dir) +from mmdeploy.utils import get_file_path _is_available = False -if importlib.util.find_spec('mmdeploy_python') is not None: +module_name = 'mmdeploy_python' + +candidates = [ + f'../../../build/lib/{module_name}.*.so', + f'../../../build/bin/*/{module_name}.*.pyd' +] + +lib_path = get_file_path(os.path.dirname(__file__), candidates) + +if lib_path: + lib_dir = os.path.dirname(lib_path) + sys.path.insert(0, lib_dir) + +if importlib.util.find_spec(module_name) is not None: from .wrapper import SDKWrapper __all__ = ['SDKWrapper'] _is_available = True diff --git a/mmdeploy/backend/tensorrt/init_plugins.py b/mmdeploy/backend/tensorrt/init_plugins.py index 9bb0da7f43..80c6eea4d7 100644 --- a/mmdeploy/backend/tensorrt/init_plugins.py +++ b/mmdeploy/backend/tensorrt/init_plugins.py @@ -1,9 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. import ctypes -import glob import os -from mmdeploy.utils import get_root_logger +from mmdeploy.utils import get_file_path, get_root_logger def get_ops_path() -> str: @@ -12,14 +11,11 @@ def get_ops_path() -> str: Returns: str: A path of the TensorRT plugin library. """ - wildcard = os.path.abspath( - os.path.join( - os.path.dirname(__file__), - '../../../build/lib/libmmdeploy_tensorrt_ops.so')) - - paths = glob.glob(wildcard) - lib_path = paths[0] if len(paths) > 0 else '' - return lib_path + candidates = [ + '../../../build/lib/libmmdeploy_tensorrt_ops.so', + '../../../build/bin/*/mmdeploy_tensorrt_ops.dll' + ] + return get_file_path(os.path.dirname(__file__), candidates) def load_tensorrt_plugin() -> bool: diff --git a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py index 903899b6ee..e00ea7ef8c 100644 --- a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py +++ b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py @@ -204,11 +204,7 @@ def forward(self, output = self.wrapper.invoke([img])[0] if test_mode: output = torch.from_numpy(output) - output = torch.permute(output, ( - 2, - 0, - 1, - )) + output = output.permute(2, 0, 1) output = output / 255. results = self.test_post_process([output], lq, gt) return results diff --git a/mmdeploy/utils/__init__.py b/mmdeploy/utils/__init__.py index 03543f9d5f..b4b05bd070 100644 --- a/mmdeploy/utils/__init__.py +++ b/mmdeploy/utils/__init__.py @@ -8,7 +8,7 @@ is_dynamic_batch, is_dynamic_shape, load_config) from .constants import SDK_TASK_MAP, Backend, Codebase, Task from .device import parse_cuda_device_id, parse_device_id -from .utils import get_root_logger, target_wrapper +from .utils import get_file_path, get_root_logger, target_wrapper __all__ = [ 'is_dynamic_batch', 'is_dynamic_shape', 'get_task_type', 'get_codebase', @@ -18,5 +18,5 @@ 'get_model_inputs', 'cfg_apply_marks', 'get_input_shape', 'parse_device_id', 'parse_cuda_device_id', 'get_codebase_config', 'get_backend_config', 'get_root_logger', 'get_dynamic_axes', - 'target_wrapper', 'SDK_TASK_MAP' + 'target_wrapper', 'SDK_TASK_MAP', 'get_file_path' ] diff --git a/mmdeploy/utils/utils.py b/mmdeploy/utils/utils.py index 9917dd4770..47a5a18c5b 100644 --- a/mmdeploy/utils/utils.py +++ b/mmdeploy/utils/utils.py @@ -1,5 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. +import glob import logging +import os import sys import traceback from typing import Callable, Optional @@ -56,3 +58,21 @@ def get_root_logger(log_file=None, log_level=logging.INFO) -> logging.Logger: name='mmdeploy', log_file=log_file, log_level=log_level) return logger + + +def get_file_path(prefix, candidates) -> str: + """Search for file in candidates. + + Args: + prefix (str): Prefix of the paths. + cancidates (str): Candidate paths + Returns: + str: file path or '' if not found + """ + for candidate in candidates: + wildcard = os.path.abspath(os.path.join(prefix, candidate)) + paths = glob.glob(wildcard) + if paths: + lib_path = paths[0] + return lib_path + return '' diff --git a/tests/test_csrc/CMakeLists.txt b/tests/test_csrc/CMakeLists.txt index 3ff7e2d155..34cc0349dd 100644 --- a/tests/test_csrc/CMakeLists.txt +++ b/tests/test_csrc/CMakeLists.txt @@ -14,60 +14,63 @@ aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/net NET_TC) aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/model MODEL_TC) set(DEVICE_TC) -foreach(DEVICE IN LISTS MMDEPLOY_TARGET_DEVICES) - list(APPEND DEVICE_TC - ${CMAKE_CURRENT_SOURCE_DIR}/device/test_${DEVICE}_device.cpp) -endforeach() +foreach (DEVICE IN LISTS MMDEPLOY_TARGET_DEVICES) + list(APPEND DEVICE_TC + ${CMAKE_CURRENT_SOURCE_DIR}/device/test_${DEVICE}_device.cpp) +endforeach () set(CAPI_TC) -if("all" IN_LIST MMDEPLOY_CODEBASES) - set(TASK_LIST - "classifier;detector;segmentor;text_detector;text_recognizer;restorer;model" - ) - set(CODEBASES "mmcls;mmdet;mmseg;mmedit;mmocr") -else() - set(TASK_LIST "model") - set(CODEBASES "${MMDEPLOY_CODEBASES}") - if("mmcls" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "classifier") - endif() - if("mmdet" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "detector") - endif() - if("mmseg" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "segmentor") - endif() - if("mmedit" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "restorer") - endif() - if("mmocr" IN_LIST MMDEPLOY_CODEBASES) - list(APPEND TASK_LIST "text_detector") - list(APPEND TASK_LIST "text_recognizer") - endif() -endif() -foreach(TASK ${TASK_LIST}) - list(APPEND CAPI_TC ${CMAKE_CURRENT_SOURCE_DIR}/capi/test_${TASK}.cpp) -endforeach() +if ("all" IN_LIST MMDEPLOY_CODEBASES) + set(TASK_LIST + "classifier;detector;segmentor;text_detector;text_recognizer;restorer;model" + ) + set(CODEBASES "mmcls;mmdet;mmseg;mmedit;mmocr") +else () + set(TASK_LIST "model") + set(CODEBASES "${MMDEPLOY_CODEBASES}") + if ("mmcls" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "classifier") + endif () + if ("mmdet" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "detector") + endif () + if ("mmseg" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "segmentor") + endif () + if ("mmedit" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "restorer") + endif () + if ("mmocr" IN_LIST MMDEPLOY_CODEBASES) + list(APPEND TASK_LIST "text_detector") + list(APPEND TASK_LIST "text_recognizer") + endif () +endif () +foreach (TASK ${TASK_LIST}) + list(APPEND CAPI_TC ${CMAKE_CURRENT_SOURCE_DIR}/capi/test_${TASK}.cpp) +endforeach () # generate the header file configure_file(config/test_define.h.in - ${CMAKE_CURRENT_SOURCE_DIR}/test_define.h) + ${CMAKE_CURRENT_SOURCE_DIR}/test_define.h) set(TC_SRCS - ${TC_SRCS} - ${ARCHIVE_TC} - ${CORE_TC} - ${TRANSFORM_TC} - ${MODEL_TC} - ${NET_TC} - ${DEVICE_TC} - ${CAPI_TC}) + ${TC_SRCS} + ${ARCHIVE_TC} + ${CORE_TC} + ${TRANSFORM_TC} + ${MODEL_TC} + ${NET_TC} + ${DEVICE_TC} + ${CAPI_TC}) add_executable(mmdeploy_tests ${TC_SRCS}) target_include_directories(mmdeploy_tests - PRIVATE ${CMAKE_SOURCE_DIR}/third_party/catch2) + PRIVATE ${CMAKE_SOURCE_DIR}/third_party/catch2) target_include_directories(mmdeploy_tests PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries( - mmdeploy_tests PRIVATE ${MMDEPLOY_LIBS} ${OpenCV_LIBS} - -Wl,--no-as-needed ${MMDEPLOY_DYNAMIC_MODULES} -Wl,--as-need - -Wl,--whole-archive ${MMDEPLOY_STATIC_MODULES} -Wl,--no-whole-archive) + +mmdeploy_load_static(mmdeploy_tests MMDeployStaticModules) +mmdeploy_load_dynamic(mmdeploy_tests MMDeployDynamicModules) +target_link_libraries(mmdeploy_tests PRIVATE + MMDeployLibs + mmdeploy_transform + mmdeploy_opencv_utils) diff --git a/tests/test_csrc/archive/test_value_archive.cpp b/tests/test_csrc/archive/test_value_archive.cpp index 9e53f12269..f46316e355 100644 --- a/tests/test_csrc/archive/test_value_archive.cpp +++ b/tests/test_csrc/archive/test_value_archive.cpp @@ -1,5 +1,12 @@ // Copyright (c) OpenMMLab. All rights reserved. +// clang-format off + +#include "catch.hpp" + +// clang-format on + +#include #include #include #include @@ -8,10 +15,9 @@ #include #include #include -#include "core/utils/formatter.h" #include "archive/value_archive.h" -#include "catch.hpp" +#include "core/utils/formatter.h" // clang-format off @@ -41,8 +47,8 @@ TEMPLATE_LIST_TEST_CASE("test array-like for value", "[value]", ArrayLikeTypes) } TEST_CASE("test native array for value archive", "[value1]") { - const int a[10] = {0,1,2,3,4,5,6,7,8,9}; - int b[10] = {0,0,0,0,0,0,0,0,0,0}; + const int a[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + int b[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; mmdeploy::Value value; mmdeploy::ValueOutputArchive oa(value); oa(a); diff --git a/tests/test_csrc/capi/test_classifier.cpp b/tests/test_csrc/capi/test_classifier.cpp index 5e0b0f4136..1b4ff1bbf0 100644 --- a/tests/test_csrc/capi/test_classifier.cpp +++ b/tests/test_csrc/capi/test_classifier.cpp @@ -33,11 +33,11 @@ TEST_CASE("test classifier's c api", "[classifier]") { ret = mmdeploy_classifier_apply(handle, mats.data(), (int)mats.size(), &results, &result_count); REQUIRE(ret == MM_SUCCESS); auto result_ptr = results; - INFO("model_path: {}", model_path); + MMDEPLOY_INFO("model_path: {}", model_path); for (auto i = 0; i < (int)mats.size(); ++i) { - INFO("the {}-th classification result: ", i); + MMDEPLOY_INFO("the {}-th classification result: ", i); for (int j = 0; j < *result_count; ++j, ++result_ptr) { - INFO("\t label: {}, score: {}", result_ptr->label_id, result_ptr->score); + MMDEPLOY_INFO("\t label: {}, score: {}", result_ptr->label_id, result_ptr->score); } } @@ -46,12 +46,12 @@ TEST_CASE("test classifier's c api", "[classifier]") { }; auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmcls/images"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmcls"} / "images"); REQUIRE(!img_lists.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmcls/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmcls/"} / backend); REQUIRE(!model_list.empty()); for (auto& model_path : model_list) { for (auto& device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_detector.cpp b/tests/test_csrc/capi/test_detector.cpp index f7a72e5410..a801d352a1 100644 --- a/tests/test_csrc/capi/test_detector.cpp +++ b/tests/test_csrc/capi/test_detector.cpp @@ -6,12 +6,13 @@ #include "apis/c/detector.h" #include "core/logger.h" +#include "core/utils/formatter.h" #include "opencv2/opencv.hpp" #include "test_resource.h" - using namespace std; TEST_CASE("test detector's c api", "[detector]") { + MMDEPLOY_INFO("test detector"); auto test = [](const string &device, const string &model_path, const vector &img_list) { mm_handle_t handle{nullptr}; auto ret = mmdeploy_detector_create_by_path(model_path.c_str(), device.c_str(), 0, &handle); @@ -32,26 +33,30 @@ TEST_CASE("test detector's c api", "[detector]") { REQUIRE(ret == MM_SUCCESS); auto result_ptr = results; for (auto i = 0; i < mats.size(); ++i) { - INFO("the '{}-th' image has '{}' objects", i, result_count[i]); + MMDEPLOY_INFO("the '{}-th' image has '{}' objects", i, result_count[i]); for (auto j = 0; j < result_count[i]; ++j, ++result_ptr) { auto &bbox = result_ptr->bbox; - INFO(" >> bbox[{}, {}, {}, {}], label_id {}, score {}", bbox.left, bbox.top, bbox.right, - bbox.bottom, result_ptr->label_id, result_ptr->score); + MMDEPLOY_INFO(" >> bbox[{}, {}, {}, {}], label_id {}, score {}", bbox.left, bbox.top, + bbox.right, bbox.bottom, result_ptr->label_id, result_ptr->score); } } mmdeploy_detector_release_result(results, result_count, (int)mats.size()); mmdeploy_detector_destroy(handle); }; - - auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmdet/images"); + MMDEPLOY_INFO("get test resources"); + auto &gResources = MMDeployTestResources::Get(); + MMDEPLOY_INFO("locate image resources"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmdet"} / "images"); + MMDEPLOY_INFO("{}", img_lists.size()); REQUIRE(!img_lists.empty()); for (auto &backend : gResources.backends()) { + MMDEPLOY_INFO("backend: {}", backend); DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmdet/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmdet"} / backend); REQUIRE(!model_list.empty()); for (auto &model_path : model_list) { + MMDEPLOY_INFO("model: {}", model_path); for (auto &device_name : gResources.device_names(backend)) { test(device_name, model_path, img_lists); } diff --git a/tests/test_csrc/capi/test_model.cpp b/tests/test_csrc/capi/test_model.cpp index af0a983628..d9bab881f3 100644 --- a/tests/test_csrc/capi/test_model.cpp +++ b/tests/test_csrc/capi/test_model.cpp @@ -12,7 +12,7 @@ TEST_CASE("test model c capi", "[model]") { std::string model_path; for (auto const &codebase : gResource.codebases()) { for (auto const &backend : gResource.backends()) { - if (auto _model_list = gResource.LocateModelResources(codebase + "/" + backend); + if (auto _model_list = gResource.LocateModelResources(fs::path{codebase} / backend); !_model_list.empty()) { model_path = _model_list.front(); break; diff --git a/tests/test_csrc/capi/test_restorer.cpp b/tests/test_csrc/capi/test_restorer.cpp index 502d377021..4e56537174 100644 --- a/tests/test_csrc/capi/test_restorer.cpp +++ b/tests/test_csrc/capi/test_restorer.cpp @@ -40,12 +40,12 @@ TEST_CASE("test restorer's c api", "[restorer]") { }; auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmedit/images"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmedit"} / "images"); REQUIRE(!img_lists.empty()); for (auto &backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmedit/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmedit"} / backend); REQUIRE(!model_list.empty()); for (auto &model_path : model_list) { for (auto &device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_segmentor.cpp b/tests/test_csrc/capi/test_segmentor.cpp index b042d793c5..6de6150bf1 100644 --- a/tests/test_csrc/capi/test_segmentor.cpp +++ b/tests/test_csrc/capi/test_segmentor.cpp @@ -43,12 +43,12 @@ TEST_CASE("test segmentor's c api", "[segmentor]") { }; auto gResources = MMDeployTestResources::Get(); - auto img_lists = gResources.LocateImageResources("mmseg/images"); + auto img_lists = gResources.LocateImageResources(fs::path{"mmseg"} / "images"); REQUIRE(!img_lists.empty()); for (auto &backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmseg/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmseg"} / backend); REQUIRE(!model_list.empty()); for (auto &model_path : model_list) { for (auto &device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_text_detector.cpp b/tests/test_csrc/capi/test_text_detector.cpp index a2bdd84493..af12d14e09 100644 --- a/tests/test_csrc/capi/test_text_detector.cpp +++ b/tests/test_csrc/capi/test_text_detector.cpp @@ -34,12 +34,12 @@ TEST_CASE("test text detector's c api", "[text-detector]") { auto result_ptr = results; for (auto i = 0; i < mats.size(); ++i) { - INFO("the {}-th image has '{}' objects", i, result_count[i]); + MMDEPLOY_INFO("the {}-th image has '{}' objects", i, result_count[i]); for (auto j = 0; j < result_count[i]; ++j, ++result_ptr) { auto& bbox = result_ptr->bbox; - INFO(">> bbox[{}].score: {}, coordinate: ", i, result_ptr->score); + MMDEPLOY_INFO(">> bbox[{}].score: {}, coordinate: ", i, result_ptr->score); for (auto& _bbox : result_ptr->bbox) { - INFO(">> >> ({}, {})", _bbox.x, _bbox.y); + MMDEPLOY_INFO(">> >> ({}, {})", _bbox.x, _bbox.y); } } } @@ -49,12 +49,12 @@ TEST_CASE("test text detector's c api", "[text-detector]") { }; auto& gResources = MMDeployTestResources::Get(); - auto img_list = gResources.LocateImageResources("mmocr/images"); + auto img_list = gResources.LocateImageResources(fs::path{"mmocr"} / "images"); REQUIRE(!img_list.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmocr/textdet/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmocr"} / "textdet" / "backend"); REQUIRE(!model_list.empty()); for (auto& model_path : model_list) { for (auto& device_name : gResources.device_names(backend)) { diff --git a/tests/test_csrc/capi/test_text_recognizer.cpp b/tests/test_csrc/capi/test_text_recognizer.cpp index 94f01063dc..3265c4b0ec 100644 --- a/tests/test_csrc/capi/test_text_recognizer.cpp +++ b/tests/test_csrc/capi/test_text_recognizer.cpp @@ -35,7 +35,7 @@ TEST_CASE("test text recognizer's c api", "[text-recognizer]") { for (auto i = 0; i < mats.size(); ++i) { std::vector score(results[i].score, results[i].score + results[i].length); - INFO("image {}, text = {}, score = {}", i, results[i].text, score); + MMDEPLOY_INFO("image {}, text = {}, score = {}", i, results[i].text, score); } mmdeploy_text_recognizer_release_result(results, (int)mats.size()); @@ -43,12 +43,12 @@ TEST_CASE("test text recognizer's c api", "[text-recognizer]") { }; auto& gResources = MMDeployTestResources::Get(); - auto img_list = gResources.LocateImageResources("mmocr/images"); + auto img_list = gResources.LocateImageResources(fs::path{"mmocr"} / "images"); REQUIRE(!img_list.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto model_list = gResources.LocateModelResources("mmocr/textreg/" + backend); + auto model_list = gResources.LocateModelResources(fs::path{"mmocr"} / "textreg" / "backend"); REQUIRE(!model_list.empty()); for (auto& model_path : model_list) { for (auto& device_name : gResources.device_names(backend)) { @@ -93,7 +93,7 @@ TEST_CASE("test text detector-recognizer combo", "[text-detector-recognizer]") { for (int j = 0; j < bbox_count[i]; ++j) { auto& text = texts[offset + j]; std::vector score(text.score, text.score + text.length); - INFO("image {}, text = {}, score = {}", i, text.text, score); + MMDEPLOY_INFO("image {}, text = {}, score = {}", i, text.text, score); } offset += bbox_count[i]; } @@ -106,13 +106,15 @@ TEST_CASE("test text detector-recognizer combo", "[text-detector-recognizer]") { }; auto& gResources = MMDeployTestResources::Get(); - auto img_list = gResources.LocateImageResources("mmocr/images"); + auto img_list = gResources.LocateImageResources(fs::path{"mmocr"} / "images"); REQUIRE(!img_list.empty()); for (auto& backend : gResources.backends()) { DYNAMIC_SECTION("loop backend: " << backend) { - auto det_model_list = gResources.LocateModelResources("/mmocr/textdet/" + backend); - auto reg_model_list = gResources.LocateModelResources("/mmocr/textreg/" + backend); + auto det_model_list = + gResources.LocateModelResources(fs::path{"mmocr"} / "textdet" / backend); + auto reg_model_list = + gResources.LocateModelResources(fs::path{"mmocr"} / "textreg" / backend); REQUIRE(!det_model_list.empty()); REQUIRE(!reg_model_list.empty()); auto det_model_path = det_model_list.front(); diff --git a/tests/test_csrc/core/test_mat.cpp b/tests/test_csrc/core/test_mat.cpp index b1ae27cb35..bb3e1a8842 100644 --- a/tests/test_csrc/core/test_mat.cpp +++ b/tests/test_csrc/core/test_mat.cpp @@ -1,6 +1,8 @@ // Copyright (c) OpenMMLab. All rights reserved. +#include #include +#include #include "catch.hpp" #include "core/logger.h" diff --git a/tests/test_csrc/core/test_status_code.cpp b/tests/test_csrc/core/test_status_code.cpp index 33059862d4..1316a07952 100644 --- a/tests/test_csrc/core/test_status_code.cpp +++ b/tests/test_csrc/core/test_status_code.cpp @@ -26,13 +26,13 @@ TEST_CASE("test status_code", "[status_code]") { sqrt_of_negative().value(); } catch (const Exception& e) { REQUIRE(e.code() == eInvalidArgument); - INFO("{}", e.what()); + MMDEPLOY_INFO("{}", e.what()); } auto r = sqrt_of_negative(); REQUIRE(!r); REQUIRE(r.error() == eInvalidArgument); - INFO("{}", r.error().message().c_str()); + MMDEPLOY_INFO("{}", r.error().message().c_str()); } } // namespace mmdeploy diff --git a/tests/test_csrc/core/test_token.cpp b/tests/test_csrc/core/test_token.cpp deleted file mode 100644 index de2e0f0cb2..0000000000 --- a/tests/test_csrc/core/test_token.cpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) OpenMMLab. All rights reserved. - -#include - -#include "catch.hpp" -#include "experimental/collection.h" - -namespace token { - -using namespace mmdeploy::token; - -using batch_size = mmdeploy::Token; -using type = mmdeploy::Token; -using name = mmdeploy::Token; - -} // namespace token - -TEST_CASE("test token", "[token]") { - using namespace mmdeploy::token; - using mmdeploy::Collection; - - auto produce = [] { - Collection c; - c << token::batch_size{64} << token::type{"Resize"} << token::name("resize1"); - return c; - }; - - auto c = produce(); - - auto consume = [](token::batch_size b, token::type t) { - std::cout << b.key() << ": " << *b << "\n" << t.key() << ": " << *t << "\n"; - return std::string{"success"}; - }; - - (void)Apply(consume, c); -} diff --git a/tests/test_csrc/core/test_value.cpp b/tests/test_csrc/core/test_value.cpp index 07bfe6d7ff..0ecc1c629b 100644 --- a/tests/test_csrc/core/test_value.cpp +++ b/tests/test_csrc/core/test_value.cpp @@ -219,7 +219,7 @@ TEST_CASE("test pointer of Value", "[value]") { REQUIRE(p["object"].is_object()); REQUIRE(p["array"].is_array()); REQUIRE(p["array"].is_array()); - INFO("{}", p); + MMDEPLOY_INFO("{}", p); } TEST_CASE("test null Value", "[value]") { @@ -332,7 +332,7 @@ TEST_CASE("test speed of value", "[value]") { } auto t1 = std::chrono::high_resolution_clock::now(); auto dt = std::chrono::duration(t1 - t0).count(); - INFO("time = {}ms", (float)dt); + MMDEPLOY_INFO("time = {}ms", (float)dt); } TEST_CASE("test ctor of value", "[value]") { diff --git a/tests/test_csrc/device/test_cpu_device.cpp b/tests/test_csrc/device/test_cpu_device.cpp index 88164014cf..3109f6cd4b 100644 --- a/tests/test_csrc/device/test_cpu_device.cpp +++ b/tests/test_csrc/device/test_cpu_device.cpp @@ -10,93 +10,6 @@ using namespace mmdeploy; using namespace std::string_literals; -namespace mmdeploy { -Kernel CreateCpuKernel(std::function task); -} - -TEST_CASE("basic device", "[device]") { - Platform platform("cpu"); - REQUIRE(platform.GetPlatformName() == "cpu"s); - REQUIRE(platform.GetPlatformId() == 0); - - const Device host("cpu"); - Stream stream(host); - // REQUIRE(platform.CreateStream("cpu", &stream) == 0); - REQUIRE(stream); - - SECTION("basic stream") { - bool set_me{}; - auto kernel = CreateCpuKernel([&] { set_me = true; }); - REQUIRE(kernel); - REQUIRE(stream.Submit(kernel)); - REQUIRE(stream.Wait()); - REQUIRE(set_me); - } - - SECTION("recursive task") { - auto outer_loop = CreateCpuKernel([&] { - for (int i = 0; i < 10; ++i) { - auto inner_loop = CreateCpuKernel([&, i] { - for (int j = 0; j < 10; ++j) { - std::cerr << "(" << i << ", " << j << ") "; - } - std::cerr << "\n"; - }); - REQUIRE(stream.Submit(inner_loop)); - } - }); - REQUIRE(stream.Submit(outer_loop)); - REQUIRE(stream.Wait()); - } - - SECTION("basic event") { - Event event(host); - // REQUIRE(platform.CreateEvent("cpu", &event) == 0); - REQUIRE(event); - auto sleeping = CreateCpuKernel([&] { - std::cerr << "start sleeping\n"; - for (int i = 0; i < 5; ++i) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - std::cerr << "0.1 second passed.\n"; - } - std::cerr << "time's up, waking up.\n"; - }); - for (int i = 0; i < 2; ++i) { - REQUIRE(stream.Submit(sleeping)); - REQUIRE(event.Record(stream)); - REQUIRE(event.Wait()); - std::cerr << "waked up.\n"; - } - } - - SECTION("event on stream") { - const int N = 10; - std::vector streams; - streams.reserve(N); - for (int i = 0; i < N; ++i) { - streams.emplace_back(host); - } - std::vector events; - events.reserve(N); - for (int i = 0; i < N; ++i) { - events.emplace_back(host); - } - for (int i = 0; i < N; ++i) { - auto kernel = CreateCpuKernel([&, i] { - std::cerr << "greatings from stream " << i << ".\n"; - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - std::cerr << "0.1 second passed, goodbye.\n"; - }); - if (i) { - REQUIRE(streams[i].DependsOn(events[i - 1])); - } - REQUIRE(streams[i].Submit(kernel)); - REQUIRE(events[i].Record(streams[i])); - } - REQUIRE(events.back().Wait()); - } -} - TEST_CASE("test buffer", "[buffer]") { using namespace mmdeploy; Device device{"cpu"}; diff --git a/tests/test_csrc/model/test_directory_model.cpp b/tests/test_csrc/model/test_directory_model.cpp index 6ea1bacc99..50091383ae 100644 --- a/tests/test_csrc/model/test_directory_model.cpp +++ b/tests/test_csrc/model/test_directory_model.cpp @@ -24,14 +24,14 @@ TEST_CASE("test directory model", "[model]") { REQUIRE(!directory_model_list.empty()); auto model_dir = "sdk_models/good_model"; REQUIRE(gResource.IsDir(model_dir)); - auto model_path = gResource.resource_root_path() + "/" + model_dir; - REQUIRE(!model_impl->Init(model_path).has_error()); + auto model_path = gResource.resource_root_path() / model_dir; + REQUIRE(!model_impl->Init(model_path.string()).has_error()); REQUIRE(!model_impl->ReadFile("deploy.json").has_error()); REQUIRE(model_impl->ReadFile("not-existing-file").has_error()); model_dir = "sdk_models/bad_model"; REQUIRE(gResource.IsDir(model_dir)); - model_path = gResource.resource_root_path() + "/" + model_dir; - REQUIRE(!model_impl->Init(model_path).has_error()); + model_path = gResource.resource_root_path() / model_dir; + REQUIRE(!model_impl->Init(model_path.string()).has_error()); REQUIRE(model_impl->ReadMeta().has_error()); } diff --git a/tests/test_csrc/model/test_model.cpp b/tests/test_csrc/model/test_model.cpp index b00f8c2b5f..34bd4c9841 100644 --- a/tests/test_csrc/model/test_model.cpp +++ b/tests/test_csrc/model/test_model.cpp @@ -24,7 +24,8 @@ TEST_CASE("model constructor", "[model]") { TEST_CASE("model init", "[model]") { auto& gResource = MMDeployTestResources::Get(); for (auto& codebase : gResource.codebases()) { - if (auto img_list = gResource.LocateImageResources(codebase + "/images"); !img_list.empty()) { + if (auto img_list = gResource.LocateImageResources(fs::path{codebase} / "images"); + !img_list.empty()) { Model model; REQUIRE(model.Init(img_list.front()).has_error()); break; @@ -32,7 +33,7 @@ TEST_CASE("model init", "[model]") { } for (auto& codebase : gResource.codebases()) { for (auto& backend : gResource.backends()) { - if (auto model_list = gResource.LocateModelResources(codebase + "/" + backend); + if (auto model_list = gResource.LocateModelResources(fs::path{codebase} / backend); !model_list.empty()) { Model model; REQUIRE(!model.Init(model_list.front()).has_error()); diff --git a/tests/test_csrc/model/test_zip_model.cpp b/tests/test_csrc/model/test_zip_model.cpp index 48f787bdea..8d5cb9ca76 100644 --- a/tests/test_csrc/model/test_zip_model.cpp +++ b/tests/test_csrc/model/test_zip_model.cpp @@ -25,10 +25,10 @@ TEST_CASE("test zip model", "[zip_model]") { auto& gResource = MMDeployTestResources::Get(); SECTION("bad sdk model") { - auto zip_model_path = "sdk_models/not_zip_file"; + auto zip_model_path = fs::path{"sdk_models"} / "not_zip_file"; REQUIRE(gResource.IsFile(zip_model_path)); - auto model_path = gResource.resource_root_path() + "/" + zip_model_path; - REQUIRE(model_impl->Init(model_path).has_error()); + auto model_path = gResource.resource_root_path() / zip_model_path; + REQUIRE(model_impl->Init(model_path.string()).has_error()); } SECTION("bad zip buffer") { std::vector buffer(100); @@ -36,10 +36,10 @@ TEST_CASE("test zip model", "[zip_model]") { } SECTION("good sdk model") { - auto zip_model_path = "sdk_models/good_model.zip"; + auto zip_model_path = fs::path{"sdk_models"} / "good_model.zip"; REQUIRE(gResource.IsFile(zip_model_path)); - auto model_path = gResource.resource_root_path() + "/" + zip_model_path; - REQUIRE(!model_impl->Init(model_path).has_error()); + auto model_path = gResource.resource_root_path() / zip_model_path; + REQUIRE(!model_impl->Init(model_path.string()).has_error()); REQUIRE(!model_impl->ReadFile("deploy.json").has_error()); REQUIRE(model_impl->ReadFile("not-exist-file").has_error()); REQUIRE(!model_impl->ReadMeta().has_error()); diff --git a/tests/test_csrc/net/test_ncnn_net.cpp b/tests/test_csrc/net/test_ncnn_net.cpp index 98b348c19b..b55280c041 100644 --- a/tests/test_csrc/net/test_ncnn_net.cpp +++ b/tests/test_csrc/net/test_ncnn_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test ncnn net", "[ncnn_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/ncnn"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "ncnn"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_openvino_net.cpp b/tests/test_csrc/net/test_openvino_net.cpp index f4a2f683f3..c3f82eb61e 100644 --- a/tests/test_csrc/net/test_openvino_net.cpp +++ b/tests/test_csrc/net/test_openvino_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test openvino net", "[openvino_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/openvino"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "openvino"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_ort_net.cpp b/tests/test_csrc/net/test_ort_net.cpp index 506fbaf199..1162210009 100644 --- a/tests/test_csrc/net/test_ort_net.cpp +++ b/tests/test_csrc/net/test_ort_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test ort net", "[ort_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/ort"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "ort"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_ppl_net.cpp b/tests/test_csrc/net/test_ppl_net.cpp index 64a6a478a1..b5d34a8ab5 100644 --- a/tests/test_csrc/net/test_ppl_net.cpp +++ b/tests/test_csrc/net/test_ppl_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test pplnn net", "[ppl_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/pplnn"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "pplnn"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/net/test_trt_net.cpp b/tests/test_csrc/net/test_trt_net.cpp index 2b2841d772..c1c579b2c4 100644 --- a/tests/test_csrc/net/test_trt_net.cpp +++ b/tests/test_csrc/net/test_trt_net.cpp @@ -12,7 +12,7 @@ using namespace mmdeploy; TEST_CASE("test trt net", "[trt_net]") { auto& gResource = MMDeployTestResources::Get(); - auto model_list = gResource.LocateModelResources("mmcls/trt"); + auto model_list = gResource.LocateModelResources(fs::path{"mmcls"} / "trt"); REQUIRE(!model_list.empty()); Model model(model_list.front()); diff --git a/tests/test_csrc/preprocess/test_compose.cpp b/tests/test_csrc/preprocess/test_compose.cpp index 9b7cd4d8d1..97e8ea452d 100644 --- a/tests/test_csrc/preprocess/test_compose.cpp +++ b/tests/test_csrc/preprocess/test_compose.cpp @@ -11,7 +11,7 @@ #include "core/registry.h" #include "core/utils/formatter.h" #include "json.hpp" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_crop.cpp b/tests/test_csrc/preprocess/test_crop.cpp index 836e527260..b5958b4218 100644 --- a/tests/test_csrc/preprocess/test_crop.cpp +++ b/tests/test_csrc/preprocess/test_crop.cpp @@ -4,7 +4,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_image2tensor.cpp b/tests/test_csrc/preprocess/test_image2tensor.cpp index 13de3e3414..16939a09b2 100644 --- a/tests/test_csrc/preprocess/test_image2tensor.cpp +++ b/tests/test_csrc/preprocess/test_image2tensor.cpp @@ -2,7 +2,7 @@ #include "catch.hpp" #include "core/tensor.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_load.cpp b/tests/test_csrc/preprocess/test_load.cpp index fa7ef5867b..47abe91b3b 100644 --- a/tests/test_csrc/preprocess/test_load.cpp +++ b/tests/test_csrc/preprocess/test_load.cpp @@ -4,7 +4,7 @@ #include "core/mat.h" #include "core/tensor.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_normalize.cpp b/tests/test_csrc/preprocess/test_normalize.cpp index 647203c02c..bf96f55b23 100644 --- a/tests/test_csrc/preprocess/test_normalize.cpp +++ b/tests/test_csrc/preprocess/test_normalize.cpp @@ -3,7 +3,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_pad.cpp b/tests/test_csrc/preprocess/test_pad.cpp index 3f1608b3b2..338be4bbaf 100644 --- a/tests/test_csrc/preprocess/test_pad.cpp +++ b/tests/test_csrc/preprocess/test_pad.cpp @@ -3,7 +3,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/preprocess/test_resize.cpp b/tests/test_csrc/preprocess/test_resize.cpp index 8c63d5a19f..e5143f3091 100644 --- a/tests/test_csrc/preprocess/test_resize.cpp +++ b/tests/test_csrc/preprocess/test_resize.cpp @@ -3,7 +3,7 @@ #include "catch.hpp" #include "core/mat.h" #include "core/utils/device_utils.h" -#include "preprocess/cpu/opencv_utils.h" +#include "opencv_utils.h" #include "preprocess/transform/transform.h" #include "test_resource.h" #include "test_utils.h" diff --git a/tests/test_csrc/test_resource.h b/tests/test_csrc/test_resource.h index 11fbd034e2..f59c79bf52 100644 --- a/tests/test_csrc/test_resource.h +++ b/tests/test_csrc/test_resource.h @@ -5,21 +5,13 @@ #include #include #include +#include #include #include +#include "core/utils/filesystem.h" #include "test_define.h" -#if __GNUC__ >= 8 -#include -namespace fs = std::filesystem; -#else - -#include - -namespace fs = std::experimental::filesystem; -#endif - using namespace std; class MMDeployTestResources { @@ -35,51 +27,51 @@ class MMDeployTestResources { } const std::vector &backends() const { return backends_; } const std::vector &codebases() const { return codebases_; } - const std::string &resource_root_path() const { return resource_root_path_; } + const fs::path &resource_root_path() const { return resource_root_path_; } bool HasDevice(const std::string &name) const { return std::any_of(devices_.begin(), devices_.end(), [&](const std::string &device_name) { return device_name == name; }); } - bool IsDir(const std::string &dir_name) const { - fs::path path{resource_root_path_ + "/" + dir_name}; + bool IsDir(const fs::path &dir_name) const { + auto path = resource_root_path_ / dir_name; return fs::is_directory(path); } - bool IsFile(const std::string &file_name) const { - fs::path path{resource_root_path_ + "/" + file_name}; + bool IsFile(const fs::path &file_name) const { + auto path = resource_root_path_ / file_name; return fs::is_regular_file(path); } public: - std::vector LocateModelResources(const std::string &sdk_model_zoo_dir) { + std::vector LocateModelResources(const fs::path &sdk_model_zoo_dir) { std::vector sdk_model_list; if (resource_root_path_.empty()) { return sdk_model_list; } - fs::path path{resource_root_path_ + "/" + sdk_model_zoo_dir}; + auto path = resource_root_path_ / sdk_model_zoo_dir; if (!fs::is_directory(path)) { return sdk_model_list; } for (auto const &dir_entry : fs::directory_iterator{path}) { fs::directory_entry entry{dir_entry.path()}; if (auto const &_path = dir_entry.path(); fs::is_directory(_path)) { - sdk_model_list.push_back(dir_entry.path()); + sdk_model_list.push_back(dir_entry.path().string()); } } return sdk_model_list; } - std::vector LocateImageResources(const std::string &img_dir) { + std::vector LocateImageResources(const fs::path &img_dir) { std::vector img_list; if (resource_root_path_.empty()) { return img_list; } - fs::path path{resource_root_path_ + "/" + img_dir}; + auto path = resource_root_path_ / img_dir; if (!fs::is_directory(path)) { return img_list; } @@ -122,15 +114,17 @@ class MMDeployTestResources { return result; } - std::string LocateResourceRootPath(const fs::path &cur_path, int max_depth) { + fs::path LocateResourceRootPath(const fs::path &cur_path, int max_depth) { if (max_depth < 0) { return ""; } for (auto const &dir_entry : fs::directory_iterator{cur_path}) { fs::directory_entry entry{dir_entry.path()}; auto const &_path = dir_entry.path(); - if (fs::is_directory(_path) && _path.filename() == "mmdeploy_test_resources") { - return _path.string(); + // filename must be checked before fs::is_directory, the latter will throw + // when _path points to a system file on Windows + if (_path.filename() == "mmdeploy_test_resources" && fs::is_directory(_path)) { + return _path; } } // Didn't find 'mmdeploy_test_resources' in current directory. @@ -143,7 +137,8 @@ class MMDeployTestResources { std::vector backends_; std::vector codebases_; std::map> backend_devices_; - std::string resource_root_path_; + fs::path resource_root_path_; + // std::string resource_root_path_; }; #endif // MMDEPLOY_TEST_RESOURCE_H From 2df085c59472032aee52f50648ef49156e5bdd2e Mon Sep 17 00:00:00 2001 From: AllentDan <41138331+AllentDan@users.noreply.github.com> Date: Mon, 28 Feb 2022 11:25:13 +0800 Subject: [PATCH 14/17] [Fix]use float in scale factor (#185) * use float * add ut --- mmdeploy/pytorch/functions/interpolate.py | 3 ++- tests/test_ops/test_ops.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/mmdeploy/pytorch/functions/interpolate.py b/mmdeploy/pytorch/functions/interpolate.py index 2d9632ea30..2b619b9535 100644 --- a/mmdeploy/pytorch/functions/interpolate.py +++ b/mmdeploy/pytorch/functions/interpolate.py @@ -90,7 +90,8 @@ def forward(g, input, scale_factor, align_corners): 'tensor. Which is not available for custom ops. Computed scale' '_factor might be the right way to get final shape.') scale_factor = [ - s_out / s_in for s_out, s_in in zip(size, input_size[2:]) + float(s_out / s_in) + for s_out, s_in in zip(size, input_size[2:]) ] return BicubicInterpolate.apply(input, scale_factor, align_corners) else: diff --git a/tests/test_ops/test_ops.py b/tests/test_ops/test_ops.py index eeb86425fd..60143d878f 100644 --- a/tests/test_ops/test_ops.py +++ b/tests/test_ops/test_ops.py @@ -105,12 +105,14 @@ def wrapped_function(inputs, grid): @pytest.mark.parametrize('dynamic_export', [True, False]) @pytest.mark.parametrize('mode', ['bicubic', 'nearest']) @pytest.mark.parametrize('align_corners', [True, False]) -@pytest.mark.parametrize('scale_factor', [2, 4]) +@pytest.mark.parametrize('output_size', [[10, 20], None]) +@pytest.mark.parametrize('scale_factor', [2]) @pytest.mark.parametrize('n, c, h, w', [(2, 3, 5, 10)]) def test_bicubic_interpolate(backend, dynamic_export, mode, align_corners, + output_size, scale_factor, n, c, @@ -140,8 +142,12 @@ def test_bicubic_interpolate(backend, if mode == 'nearest': align_corners = None - resize = nn.Upsample( - scale_factor=scale_factor, mode=mode, align_corners=align_corners) + if output_size is None: + resize = nn.Upsample( + scale_factor=scale_factor, mode=mode, align_corners=align_corners) + else: + resize = nn.Upsample( + size=output_size, mode=mode, align_corners=align_corners) expected_result = resize(input).cuda() wrapped_model = WrapFunction(resize).eval() From 5c596e1fd492605283b298017e0bd2b1516c554b Mon Sep 17 00:00:00 2001 From: lzhangzz Date: Mon, 28 Feb 2022 15:32:51 +0800 Subject: [PATCH 15/17] fix ncnn (#187) --- csrc/backend_ops/ncnn/ops/CMakeLists.txt | 4 ++-- csrc/backend_ops/ncnn/ops/ncnn_ops_register.h | 2 -- .../backend_ops/ncnn/pyncnn_ext/CMakeLists.txt | 8 +++++--- csrc/net/ncnn/ncnn_net.cpp | 18 ++++++++---------- mmdeploy/backend/ncnn/init_plugins.py | 4 ++-- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/csrc/backend_ops/ncnn/ops/CMakeLists.txt b/csrc/backend_ops/ncnn/ops/CMakeLists.txt index 4a4334518c..461301211a 100755 --- a/csrc/backend_ops/ncnn/ops/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/ops/CMakeLists.txt @@ -7,9 +7,9 @@ include(${CMAKE_SOURCE_DIR}/cmake/MMDeploy.cmake) # add plugin source file(GLOB_RECURSE NCNN_OPS_SRCS *.cpp) add_library(${PROJECT_NAME}_obj OBJECT "${NCNN_OPS_SRCS}") +target_compile_definitions(${PROJECT_NAME}_obj PRIVATE -DMMDEPLOY_API_EXPORTS=1) set_target_properties(${PROJECT_NAME}_obj PROPERTIES POSITION_INDEPENDENT_CODE 1) -target_include_directories(${PROJECT_NAME}_obj PUBLIC - $) +target_link_libraries(${PROJECT_NAME}_obj PRIVATE ncnn) set(_COMMON_INCLUDE_DIRS $ $) diff --git a/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h b/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h index b265f6d4af..333f174e79 100755 --- a/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h +++ b/csrc/backend_ops/ncnn/ops/ncnn_ops_register.h @@ -8,11 +8,9 @@ #include "core/macro.h" #include "net.h" -extern "C" { MMDEPLOY_API std::map& get_mmdeploy_layer_creator(); MMDEPLOY_API std::map& get_mmdeploy_layer_destroyer(); MMDEPLOY_API int register_mmdeploy_custom_layers(ncnn::Net& net); -} #endif diff --git a/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt b/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt index b60c91006d..c55acb488f 100755 --- a/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt +++ b/csrc/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt @@ -10,6 +10,8 @@ endif () pybind11_add_module(ncnn_ext ncnn_ext.cpp) target_link_libraries(ncnn_ext PUBLIC mmdeploy_ncnn_ops ncnn) -set_target_properties( - ncnn_ext PROPERTIES LIBRARY_OUTPUT_DIRECTORY - ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) +set(_NCNN_EXT_DIR ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) +set_target_properties(ncnn_ext PROPERTIES + LIBRARY_OUTPUT_DIRECTORY ${_NCNN_EXT_DIR} + LIBRARY_OUTPUT_DIRECTORY_DEBUG ${_NCNN_EXT_DIR} + LIBRARY_OUTPUT_DIRECTORY_RELEASE ${_NCNN_EXT_DIR}) diff --git a/csrc/net/ncnn/ncnn_net.cpp b/csrc/net/ncnn/ncnn_net.cpp index 041580d343..1a958ccfe8 100644 --- a/csrc/net/ncnn/ncnn_net.cpp +++ b/csrc/net/ncnn/ncnn_net.cpp @@ -41,22 +41,20 @@ Result NCNNNet::Init(const Value& args) { input_indices_ = net_.input_indexes(); for (const auto& x : net_.input_names()) { - // input_names_.emplace_back(x); input_tensors_.emplace_back(TensorDesc{ - .device = Device("cpu"), - .data_type = DataType::kFLOAT, - .shape = {}, - .name = x, + Device("cpu"), + DataType::kFLOAT, + {}, + x, }); } output_indices_ = net_.output_indexes(); for (const auto& x : net_.output_names()) { - // output_names_.emplace_back(x); output_tensors_.emplace_back(TensorDesc{ - .device = Device("cpu"), - .data_type = DataType::kFLOAT, - .shape = {}, - .name = x, + Device("cpu"), + DataType::kFLOAT, + {}, + x, }); } diff --git a/mmdeploy/backend/ncnn/init_plugins.py b/mmdeploy/backend/ncnn/init_plugins.py index 80cd871c53..10f9ca2655 100644 --- a/mmdeploy/backend/ncnn/init_plugins.py +++ b/mmdeploy/backend/ncnn/init_plugins.py @@ -12,7 +12,7 @@ def get_ops_path() -> str: """ candidates = [ '../../../build/lib/libmmdeploy_ncnn_ops.so', - '../../../build/bin/*/mmdeploy_ncnn_ops.pyd' + '../../../build/bin/*/mmdeploy_ncnn_ops.dll' ] return get_file_path(os.path.dirname(__file__), candidates) @@ -24,6 +24,6 @@ def get_onnx2ncnn_path() -> str: str: A path of onnx2ncnn tool. """ candidates = [ - '../../../build/bin/onnx2ncnn', '../../../build/bin/*/onnx2ncnn' + '../../../build/bin/onnx2ncnn', '../../../build/bin/*/onnx2ncnn.exe' ] return get_file_path(os.path.dirname(__file__), candidates) From e89becd178fe4c807c2a75916169dc198d650dc8 Mon Sep 17 00:00:00 2001 From: lvhan028 Date: Mon, 28 Feb 2022 15:38:31 +0800 Subject: [PATCH 16/17] [enhancement] Reorganizing OpenMMLab projects in readme (#184) * [enhancement] Reorganizing OpenMMLab projects in readme * add MMPose in supported codebase list * add MMPose in supported codebase list --- README.md | 21 +++++++++++---------- README_zh-CN.md | 17 +++++++++-------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 503588d833..37a10deff3 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,7 @@ a part of the [OpenMMLab](https://openmmlab.com/) project. - [x] MMSegmentation - [x] MMEditing - [x] MMOCR + - [x] MMPose - **Multiple inference backends are available** @@ -111,21 +112,21 @@ If you find this project useful in your research, please consider cite: ## Projects in OpenMMLab - [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. -- [MIM](https://github.com/open-mmlab/mim): MIM Installs OpenMMLab Packages. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. - [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark. - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. - [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. - [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. -- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. - [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox. -- [MMOCR](https://github.com/open-mmlab/mmocr): A Comprehensive Toolbox for Text Detection, Recognition and Understanding. - [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. -- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. -- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab FewShot Learning Toolbox and Benchmark. -- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab Human Pose and Shape Estimation Toolbox and Benchmark. -- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning Toolbox and Benchmark. -- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab Model Compression Toolbox and Benchmark. -- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework. -- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. diff --git a/README_zh-CN.md b/README_zh-CN.md index 0ed95fa110..12a6c4c5e3 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -46,6 +46,7 @@ MMDeploy 是一个开源深度学习模型部署工具箱,它是 [OpenMMLab](h - [x] MMSegmentation - [x] MMEditing - [x] MMOCR + - [x] MMPose - **支持多种推理后端** @@ -114,20 +115,20 @@ MMDeploy 是一个开源深度学习模型部署工具箱,它是 [OpenMMLab](h - [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱 - [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱 - [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台 +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 - [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱 -- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 -- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 -- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 -- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱 - [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包 -- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱 -- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 -- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱 - [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准 - [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准 - [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准 +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准 +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱 +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台 +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准 +- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱 +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱 - [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架 -- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准 ## 欢迎加入 OpenMMLab 社区 From ba5351e20948591effc75e644014923282c1fa10 Mon Sep 17 00:00:00 2001 From: Richard-mei Date: Mon, 28 Feb 2022 16:28:00 +0800 Subject: [PATCH 17/17] add gfl_trt (#124) * add gfl_trt * add gfl_head.py * add batch_integral * lint code * add gfl unit test * fix unit test * add gfl benchmark * fix unit test bug * Update gfl_head.py * Update __init__.py remove '**_forward_single' * fix lint error and ut error * fix docs and benchmark Co-authored-by: VVsssssk --- docs/en/benchmark.md | 14 ++ docs/en/codebases/mmdet.md | 1 + docs/en/supported_models.md | 1 + .../mmdet/models/dense_heads/__init__.py | 4 +- .../mmdet/models/dense_heads/gfl_head.py | 185 ++++++++++++++++++ .../test_mmdet/test_mmdet_models.py | 107 ++++++++++ 6 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 mmdeploy/codebase/mmdet/models/dense_heads/gfl_head.py diff --git a/docs/en/benchmark.md b/docs/en/benchmark.md index 8a5035de5e..41c748c9d2 100644 --- a/docs/en/benchmark.md +++ b/docs/en/benchmark.md @@ -996,6 +996,20 @@ Users can directly test the performance through [how_to_evaluate_a_model.md](tut - $MMDET_DIR/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py + + GFL + Object Detection + COCO2017 + box AP + 40.2 + - + 40.2 + 40.2 + 40.0 + - + - + $MMDET_DIR/configs/gfl/gfl_r50_fpn_1x_coco.py + Mask R-CNN Instance Segmentation diff --git a/docs/en/codebases/mmdet.md b/docs/en/codebases/mmdet.md index e5b4f5409d..f03bf7c60f 100644 --- a/docs/en/codebases/mmdet.md +++ b/docs/en/codebases/mmdet.md @@ -22,6 +22,7 @@ Please refer to [get_started.md](https://github.com/open-mmlab/mmdetection/blob/ | Cascade R-CNN | ObjectDetection | Y | Y | N | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | | Faster R-CNN | ObjectDetection | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) | | Faster R-CNN + DCN | ObjectDetection | Y | Y | Y | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) | +| GFL | ObjectDetection | Y | Y | N | ? | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl) | | Cascade Mask R-CNN | InstanceSegmentation | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | | Mask R-CNN | InstanceSegmentation | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) | diff --git a/docs/en/supported_models.md b/docs/en/supported_models.md index 8f13fe93df..2c865bfa20 100644 --- a/docs/en/supported_models.md +++ b/docs/en/supported_models.md @@ -14,6 +14,7 @@ The table below lists the models that are guaranteed to be exportable to other b | SSD[*](#note) | MMDetection | Y | Y | Y | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd) | | FoveaBox | MMDetection | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/foveabox) | | ATSS | MMDetection | Y | Y | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss) | +| GFL | MMDetection | Y | Y | N | ? | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl) | | Cascade R-CNN | MMDetection | Y | Y | N | Y | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | | Cascade Mask R-CNN | MMDetection | Y | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn) | | VFNet | MMDetection | N | N | N | N | Y | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/vfnet) | diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py b/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py index 080fe6c7ba..9043d44264 100644 --- a/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py @@ -2,6 +2,7 @@ from .base_dense_head import (base_dense_head__get_bbox, base_dense_head__get_bboxes__ncnn) from .fovea_head import fovea_head__get_bboxes +from .gfl_head import gfl_head__get_bbox from .rpn_head import rpn_head__get_bboxes, rpn_head__get_bboxes__ncnn from .ssd_head import ssd_head__get_bboxes__ncnn from .yolo_head import yolov3_head__get_bboxes, yolov3_head__get_bboxes__ncnn @@ -12,5 +13,6 @@ 'yolov3_head__get_bboxes', 'yolov3_head__get_bboxes__ncnn', 'yolox_head__get_bboxes', 'base_dense_head__get_bbox', 'fovea_head__get_bboxes', 'base_dense_head__get_bboxes__ncnn', - 'ssd_head__get_bboxes__ncnn', 'yolox_head__get_bboxes__ncnn' + 'ssd_head__get_bboxes__ncnn', 'yolox_head__get_bboxes__ncnn', + 'gfl_head__get_bbox' ] diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/gfl_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/gfl_head.py new file mode 100644 index 0000000000..8dba8b5666 --- /dev/null +++ b/mmdeploy/codebase/mmdet/models/dense_heads/gfl_head.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F + +from mmdeploy.codebase.mmdet import (get_post_processing_params, + multiclass_nms, pad_with_value) +from mmdeploy.core import FUNCTION_REWRITER +from mmdeploy.utils import Backend, get_backend, is_dynamic_shape + + +@FUNCTION_REWRITER.register_rewriter( + func_name='mmdet.models.dense_heads.gfl_head.' + 'GFLHead.get_bboxes') +def gfl_head__get_bbox(ctx, + self, + cls_scores, + bbox_preds, + score_factors=None, + img_metas=None, + cfg=None, + rescale=False, + with_nms=True, + **kwargs): + """Rewrite `get_bboxes` of `GFLHead` for default backend. + + Rewrite this function to deploy model, transform network output for a + batch into bbox predictions. + + Args: + ctx (ContextCaller): The context with additional information. + self: The instance of the original class. + cls_scores (list[Tensor]): Classification scores for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for all + scale levels, each is a 4D-tensor, has shape + (batch_size, num_priors * 4, H, W). + score_factors (list[Tensor], Optional): Score factor for + all scale level, each is a 4D-tensor, has shape + (batch_size, num_priors * 1, H, W). Default None. + img_metas (list[dict], Optional): Image meta info. Default None. + cfg (mmcv.Config, Optional): Test / postprocessing configuration, + if None, test_cfg would be used. Default None. + rescale (bool): If True, return boxes in original image space. + Default False. + with_nms (bool): If True, do nms before return boxes. + Default True. + + Returns: + If with_nms == True: + tuple[Tensor, Tensor]: tuple[Tensor, Tensor]: (dets, labels), + `dets` of shape [N, num_det, 5] and `labels` of shape + [N, num_det]. + Else: + tuple[Tensor, Tensor, Tensor]: batch_mlvl_bboxes, + batch_mlvl_scores, batch_mlvl_centerness + """ + deploy_cfg = ctx.cfg + is_dynamic_flag = is_dynamic_shape(deploy_cfg) + backend = get_backend(deploy_cfg) + num_levels = len(cls_scores) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + mlvl_priors = self.prior_generator.grid_priors( + featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) + + mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] + mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] + if score_factors is None: + with_score_factors = False + mlvl_score_factor = [None for _ in range(num_levels)] + else: + with_score_factors = True + mlvl_score_factor = [ + score_factors[i].detach() for i in range(num_levels) + ] + mlvl_score_factors = [] + assert img_metas is not None + img_shape = img_metas[0]['img_shape'] + + assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors) + batch_size = cls_scores[0].shape[0] + cfg = self.test_cfg + pre_topk = cfg.get('nms_pre', -1) + + mlvl_valid_bboxes = [] + mlvl_valid_scores = [] + mlvl_valid_priors = [] + + for cls_score, bbox_pred, score_factors, priors, stride in zip( + mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor, mlvl_priors, + self.prior_generator.strides): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + assert stride[0] == stride[1] + + scores = cls_score.permute(0, 2, 3, 1).reshape(batch_size, -1, + self.cls_out_channels) + if self.use_sigmoid_cls: + scores = scores.sigmoid() + nms_pre_score = scores + else: + scores = scores.softmax(-1) + nms_pre_score = scores + if with_score_factors: + score_factors = score_factors.permute(0, 2, 3, + 1).reshape(batch_size, + -1).sigmoid() + score_factors = score_factors.unsqueeze(2) + bbox_pred = batched_integral(self.integral, + bbox_pred.permute(0, 2, 3, 1)) * stride[0] + if not is_dynamic_flag: + priors = priors.data + priors = priors.expand(batch_size, -1, priors.size(-1)) + if pre_topk > 0: + if with_score_factors: + nms_pre_score = nms_pre_score * score_factors + if backend == Backend.TENSORRT: + priors = pad_with_value(priors, 1, pre_topk) + bbox_pred = pad_with_value(bbox_pred, 1, pre_topk) + scores = pad_with_value(scores, 1, pre_topk, 0.) + nms_pre_score = pad_with_value(nms_pre_score, 1, pre_topk, 0.) + if with_score_factors: + score_factors = pad_with_value(score_factors, 1, pre_topk, + 0.) + + # Get maximum scores for foreground classes. + if self.use_sigmoid_cls: + max_scores, _ = nms_pre_score.max(-1) + else: + max_scores, _ = nms_pre_score[..., :-1].max(-1) + _, topk_inds = max_scores.topk(pre_topk) + batch_inds = torch.arange( + batch_size, + device=bbox_pred.device).view(-1, 1).expand_as(topk_inds) + priors = priors[batch_inds, topk_inds, :] + bbox_pred = bbox_pred[batch_inds, topk_inds, :] + scores = scores[batch_inds, topk_inds, :] + if with_score_factors: + score_factors = score_factors[batch_inds, topk_inds, :] + + mlvl_valid_bboxes.append(bbox_pred) + mlvl_valid_scores.append(scores) + priors = self.anchor_center(priors) + mlvl_valid_priors.append(priors) + if with_score_factors: + mlvl_score_factors.append(score_factors) + + batch_mlvl_bboxes_pred = torch.cat(mlvl_valid_bboxes, dim=1) + batch_scores = torch.cat(mlvl_valid_scores, dim=1) + batch_priors = torch.cat(mlvl_valid_priors, dim=1) + batch_bboxes = self.bbox_coder.decode( + batch_priors, batch_mlvl_bboxes_pred, max_shape=img_shape) + if with_score_factors: + batch_score_factors = torch.cat(mlvl_score_factors, dim=1) + + if not self.use_sigmoid_cls: + batch_scores = batch_scores[..., :self.num_classes] + + if with_score_factors: + batch_scores = batch_scores * batch_score_factors + if not with_nms: + return batch_bboxes, batch_scores + post_params = get_post_processing_params(deploy_cfg) + max_output_boxes_per_class = post_params.max_output_boxes_per_class + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = post_params.pre_top_k + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + return multiclass_nms( + batch_bboxes, + batch_scores, + max_output_boxes_per_class, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + pre_top_k=pre_top_k, + keep_top_k=keep_top_k) + + +def batched_integral(intergral, x): + batch_size = x.size(0) + x = F.softmax(x.reshape(batch_size, -1, intergral.reg_max + 1), dim=2) + x = F.linear(x, + intergral.project.type_as(x).unsqueeze(0)).reshape( + batch_size, -1, 4) + return x diff --git a/tests/test_codebase/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py index 9cfe6e83d9..192bf72198 100644 --- a/tests/test_codebase/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -157,6 +157,31 @@ def get_single_roi_extractor(): return model +def get_gfl_head_model(): + test_cfg = mmcv.Config( + dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.6), + max_per_img=100)) + anchor_generator = dict( + type='AnchorGenerator', + scales_per_octave=1, + octave_base_scale=8, + ratios=[1.0], + strides=[8, 16, 32, 64, 128]) + from mmdet.models.dense_heads import GFLHead + model = GFLHead( + num_classes=3, + in_channels=256, + reg_max=3, + test_cfg=test_cfg, + anchor_generator=anchor_generator) + model.requires_grad_(False) + return model + + def test_focus_forward_ncnn(): backend_type = Backend.NCNN check_backend(backend_type) @@ -349,6 +374,88 @@ def test_get_bboxes_of_rpn_head(backend_type: Backend): assert rewrite_outputs is not None +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_get_bboxes_of_gfl_head(backend_type): + check_backend(backend_type) + head = get_gfl_head_model() + head.cpu().eval() + s = 4 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + output_names = ['dets'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + model_type='ncnn_end2end', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )))) + + seed_everything(1234) + cls_score = [ + torch.rand(1, 3, pow(2, i), pow(2, i)) for i in range(5, 0, -1) + ] + seed_everything(5678) + bboxes = [torch.rand(1, 16, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + + # to get outputs of onnx model after rewrite + img_metas[0]['img_shape'] = torch.Tensor([s, s]) + wrapped_model = WrapModel( + head, 'get_bboxes', img_metas=img_metas, with_nms=True) + rewrite_inputs = { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, + } + # do not run with ncnn backend + run_with_backend = False if backend_type in [Backend.NCNN] else True + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg, + run_with_backend=run_with_backend) + assert rewrite_outputs is not None + + +@pytest.mark.parametrize('backend_type', [Backend.ONNXRUNTIME]) +def test_forward_of_gfl_head(backend_type): + check_backend(backend_type) + head = get_gfl_head_model() + head.cpu().eval() + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type.value), + onnx_config=dict(input_shape=None))) + feats = [torch.rand(1, 256, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + model_outputs = [head.forward(feats)] + wrapped_model = WrapModel(head, 'forward') + rewrite_inputs = { + 'feats': feats, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + model_outputs[0] = [*model_outputs[0][0], *model_outputs[0][1]] + for model_output, rewrite_output in zip(model_outputs[0], + rewrite_outputs[0]): + model_output = model_output.squeeze().cpu().numpy() + rewrite_output = rewrite_output.squeeze() + assert np.allclose( + model_output, rewrite_output, rtol=1e-03, atol=1e-05) + + def _replace_r50_with_r18(model): """Replace ResNet50 with ResNet18 in config.""" model = copy.deepcopy(model)