From 2371b331316e2b2893d45c376f74620bc530ef44 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 Jan 2022 11:19:02 +0800 Subject: [PATCH 1/5] update docs --- contrib/Matting/README.md | 2 +- contrib/Matting/README_CN.md | 2 +- .../Matting/deploy/human_matting_android_demo/README_CN.md | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/Matting/README.md b/contrib/Matting/README.md index e123249b4c..42f93c51a9 100644 --- a/contrib/Matting/README.md +++ b/contrib/Matting/README.md @@ -231,7 +231,7 @@ python export.py --help python deploy/python/infer.py \ --config output/export/deploy.yaml \ --image_path data/PPM-100/val/fg/ \ - --save_dir ouput/results + --save_dir output/results ``` If the model requires trimap information, pass the trimap path through '--trimap_path'. diff --git a/contrib/Matting/README_CN.md b/contrib/Matting/README_CN.md index 92e87c367c..028c1724dd 100644 --- a/contrib/Matting/README_CN.md +++ b/contrib/Matting/README_CN.md @@ -235,7 +235,7 @@ python export.py --help python deploy/python/infer.py \ --config output/export/deploy.yaml \ --image_path data/PPM-100/val/fg/ \ - --save_dir ouput/results + --save_dir output/results ``` 如模型需要trimap信息,需要通过`--trimap_path`传入trimap路径。 diff --git a/contrib/Matting/deploy/human_matting_android_demo/README_CN.md b/contrib/Matting/deploy/human_matting_android_demo/README_CN.md index 48142462e1..69e851606b 100644 --- a/contrib/Matting/deploy/human_matting_android_demo/README_CN.md +++ b/contrib/Matting/deploy/human_matting_android_demo/README_CN.md @@ -26,7 +26,7 @@ *注:此安卓demo基于[Paddle-Lite](https://paddlelite.paddlepaddle.org.cn/)开发,PaddleLite版本为2.8.0。* ### 2.3 预测 -* 在人像抠图Demo中,默认会载入一张人像图像,并会在图像下方给出CPU的预测结果和预测时延; +* 在人像抠图Demo中,默认会载入一张人像图像,并会在图像下方给出CPU的预测结果和预测延时; * 在人像抠图Demo中,你还可以通过右上角的"打开本地相册"和"打开摄像头拍照"按钮分别从相册或相机中加载测试图像然后进行预测推理; *注意:demo中拍照时照片会自动压缩,想测试拍照原图效果,可使用手机相机拍照后从相册中打开进行预测。* @@ -80,7 +80,7 @@ val_dataset: mode: val get_trimap: False ``` -上述修改中尤其注意short_size: 256这个字段,这个值直接决定我们最终的推理图像采用的尺寸大小。这个字段值设置太小会影响预测精度,设置太大会影响手机推理速度(甚至造成手机因性能问题无法完成推理而奔溃)。经过实际测试,对于hrnet18,该字段设置为256较好。 +上述修改中尤其注意short_size: 256这个字段,这个值直接决定我们最终的推理图像采用的尺寸大小。这个字段值设置太小会影响预测精度,设置太大会影响手机推理速度(甚至造成手机因性能问题无法完成推理而崩溃)。经过实际测试,对于hrnet18,该字段设置为256较好。 完成配置文件修改后,采用下面的命令进行静态图导出: ``` shell From 8b9914cab6b644df14af704a97e7e6f4835745cd Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 Jan 2022 11:47:57 +0800 Subject: [PATCH 2/5] update url of pretrained model --- .../Matting/configs/human_matting/human_matting_resnet34_vd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/Matting/configs/human_matting/human_matting_resnet34_vd.yml b/contrib/Matting/configs/human_matting/human_matting_resnet34_vd.yml index 56b033a6c7..7426164788 100644 --- a/contrib/Matting/configs/human_matting/human_matting_resnet34_vd.yml +++ b/contrib/Matting/configs/human_matting/human_matting_resnet34_vd.yml @@ -39,7 +39,7 @@ model: type: HumanMatting backbone: type: ResNet34_vd - pretrained: https://bj.bcebos.com/paddleseg/dygraph/ResNet34_vd_pretrained.pdparams + pretrained: https://paddleseg.bj.bcebos.com/matting/models/ResNet34_vd_pretrained/model.pdparams pretrained: Null if_refine: True From dc30a153b70b6bb7e8faea8e9abd4d217198f17d Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 Jan 2022 15:52:28 +0800 Subject: [PATCH 3/5] update shape for exporting and update docs --- contrib/Matting/README.md | 4 +++- contrib/Matting/README_CN.md | 2 ++ contrib/Matting/model/human_matting.py | 14 ++++++++------ 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/contrib/Matting/README.md b/contrib/Matting/README.md index 42f93c51a9..a024a9dc2d 100644 --- a/contrib/Matting/README.md +++ b/contrib/Matting/README.md @@ -200,10 +200,12 @@ python bg_replace.py \ --bg_path path/to/your/background/image \ --save_dir ./output/results ``` -If the model requires trimap information, pass the trimap path through '--trimap_path'. +If the model requires trimap information, pass the trimap path through `--trimap_path`. If `--bg_path` is not provided, green background is used。 +**note:** `--image_path` must be a image path。 + You can directly download the provided model for background replacement. Run the following command to view more parameters. diff --git a/contrib/Matting/README_CN.md b/contrib/Matting/README_CN.md index 028c1724dd..fed9516b5a 100644 --- a/contrib/Matting/README_CN.md +++ b/contrib/Matting/README_CN.md @@ -208,6 +208,8 @@ python bg_replace.py \ 若不提供`--bg_path`, 则采用绿色作为背景。 +**注意:** `--image_path`必须是一张图片的具体路径。 + 你可以直接下载我们提供的模型进行背景替换。 更多参数信息请运行如下命令进行查看: diff --git a/contrib/Matting/model/human_matting.py b/contrib/Matting/model/human_matting.py index dac8802c25..d16207dfd8 100644 --- a/contrib/Matting/model/human_matting.py +++ b/contrib/Matting/model/human_matting.py @@ -162,12 +162,14 @@ def __init__(self, def forward(self, data): src = data['img'] - src_h, src_w = src.shape[2:] + src_h, src_w = paddle.shape(src)[2:] if self.if_refine: - if (src_h % 4 != 0) or (src_w % 4) != 0: - raise ValueError( - 'The input image must have width and height that are divisible by 4' - ) + # It is not need when exporting. + if isinstance(src_h, paddle.Tensor): + if (src_h % 4 != 0) or (src_w % 4) != 0: + raise ValueError( + 'The input image must have width and height that are divisible by 4' + ) # Downsample src for backbone src_sm = F.interpolate( @@ -366,7 +368,7 @@ def forward(self, src, pha, err, hid, tri): hid: (B, 32, Hc, Hc) coarse hidden encoding. tri: (B, 1, Hc, Hc) trimap prediction. ''' - h_full, w_full = src.shape[2:] + h_full, w_full = paddle.shape(src)[2:] h_half, w_half = h_full // 2, w_full // 2 h_quat, w_quat = h_full // 4, w_full // 4 From 974b5798f3eaa64ab26a10acfa98c4c6e867ce8f Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 Jan 2022 16:13:03 +0800 Subject: [PATCH 4/5] update README.md --- contrib/Matting/deploy/human_matting_android_demo/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/Matting/deploy/human_matting_android_demo/README.md b/contrib/Matting/deploy/human_matting_android_demo/README.md index af958cb73f..6f9aa7f143 100644 --- a/contrib/Matting/deploy/human_matting_android_demo/README.md +++ b/contrib/Matting/deploy/human_matting_android_demo/README.md @@ -1,7 +1,7 @@ English | [简体中文](README_CN.md) # Human Matting Android Demo -Based on [PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop)[MODNet](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting) algorithm to realise human matting(Android demo). +Based on [PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop) [MODNet](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting) algorithm to realise human matting(Android demo). You can directly download and install the example project [apk](https://paddleseg.bj.bcebos.com/matting/models/deploy/app-debug.apk) to experience。 From 8f69dd45c74ee80a2d85d08dfb32398ce51a0d1e Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 Jan 2022 17:41:43 +0800 Subject: [PATCH 5/5] update human_matting.py --- contrib/Matting/model/human_matting.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/contrib/Matting/model/human_matting.py b/contrib/Matting/model/human_matting.py index d16207dfd8..31a8930610 100644 --- a/contrib/Matting/model/human_matting.py +++ b/contrib/Matting/model/human_matting.py @@ -374,9 +374,15 @@ def forward(self, src, pha, err, hid, tri): x = paddle.concat([hid, pha, tri], axis=1) x = F.interpolate( - x, (h_half, w_half), mode='bilinear', align_corners=False) + x, + paddle.concat((h_half, w_half)), + mode='bilinear', + align_corners=False) y = F.interpolate( - src, (h_half, w_half), mode='bilinear', align_corners=False) + src, + paddle.concat((h_half, w_half)), + mode='bilinear', + align_corners=False) if self.kernel_size == 3: x = F.pad(x, [3, 3, 3, 3]) @@ -386,10 +392,11 @@ def forward(self, src, pha, err, hid, tri): x = self.conv2(x) if self.kernel_size == 3: - x = F.interpolate(x, (h_full + 4, w_full + 4)) + x = F.interpolate(x, paddle.concat((h_full + 4, w_full + 4))) y = F.pad(src, [2, 2, 2, 2]) else: - x = F.interpolate(x, (h_full, w_full), mode='nearest') + x = F.interpolate( + x, paddle.concat((h_full, w_full)), mode='nearest') y = src x = self.conv3(paddle.concat([x, y], axis=1))