From 02d29aa0c33835d900c0e706cc78906222f75d38 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Tue, 12 Jul 2022 02:51:41 +0000 Subject: [PATCH 01/19] update .gitignore --- .gitignore | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.gitignore b/.gitignore index b2602607a9..b268868c02 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,12 @@ fastdeploy/libs/lib* +build +cmake-build-debug +cmake-build-release +.vscode +FastDeploy.cmake +fastdeploy/core/config.h +build-debug.sh +*dist +fastdeploy.egg-info +.setuptools-cmake-build +fastdeploy/version.py \ No newline at end of file From afa81147a6df3a7072912231cdd9cbd1c02282e4 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Tue, 12 Jul 2022 06:41:55 +0000 Subject: [PATCH 02/19] Added checking for cmake include dir --- CMakeLists.txt | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8bbc8139b9..2ff2b85f01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -172,13 +172,15 @@ install( TARGETS fastdeploy LIBRARY DESTINATION lib ) -install( - DIRECTORY ${PROJECT_SOURCE_DIR}/fastdeploy - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} - FILES_MATCHING - PATTERN "*.h" - PATTERN "${PROJECT_SOURCE_DIR}/fastdeploy/backends/*/*.h" -) +if (DEFINED CMAKE_INSTALL_INCLUDEDIR) + install( + DIRECTORY ${PROJECT_SOURCE_DIR}/fastdeploy + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING + PATTERN "*.h" + PATTERN "${PROJECT_SOURCE_DIR}/fastdeploy/backends/*/*.h" + ) +endif() install( DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install DESTINATION ${CMAKE_INSTALL_PREFIX}/third_libs From 659c14c5b90028fba4440af02ef942796f03a5a3 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Tue, 12 Jul 2022 06:44:21 +0000 Subject: [PATCH 03/19] fixed missing trt_backend option bug when init from trt --- fastdeploy/backends/tensorrt/trt_backend.cc | 3 ++- fastdeploy/backends/tensorrt/trt_backend.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/fastdeploy/backends/tensorrt/trt_backend.cc b/fastdeploy/backends/tensorrt/trt_backend.cc index dfc2840c2b..5dbb61ffe8 100644 --- a/fastdeploy/backends/tensorrt/trt_backend.cc +++ b/fastdeploy/backends/tensorrt/trt_backend.cc @@ -52,7 +52,8 @@ std::vector toVec(const nvinfer1::Dims& dim) { return out; } -bool TrtBackend::InitFromTrt(const std::string& trt_engine_file) { +bool TrtBackend::InitFromTrt(const std::string& trt_engine_file, + const TrtBackendOption& option) { if (initialized_) { FDERROR << "TrtBackend is already initlized, cannot initialize again." << std::endl; diff --git a/fastdeploy/backends/tensorrt/trt_backend.h b/fastdeploy/backends/tensorrt/trt_backend.h index 3b77c8bc24..e3f848a012 100644 --- a/fastdeploy/backends/tensorrt/trt_backend.h +++ b/fastdeploy/backends/tensorrt/trt_backend.h @@ -69,7 +69,8 @@ class TrtBackend : public BaseBackend { bool InitFromOnnx(const std::string& model_file, const TrtBackendOption& option = TrtBackendOption(), bool from_memory_buffer = false); - bool InitFromTrt(const std::string& trt_engine_file); + bool InitFromTrt(const std::string& trt_engine_file, + const TrtBackendOption& option = TrtBackendOption()); bool Infer(std::vector& inputs, std::vector* outputs); From 17a43cebd0507ee02ee3234958ce49cc52ad8c16 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Tue, 12 Jul 2022 06:46:38 +0000 Subject: [PATCH 04/19] remove un-need data layout and add pre-check for dtype --- fastdeploy/vision/common/processors/cast.cc | 34 +++++++++++++-------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/fastdeploy/vision/common/processors/cast.cc b/fastdeploy/vision/common/processors/cast.cc index 2f8a0993ec..77a1b249ae 100644 --- a/fastdeploy/vision/common/processors/cast.cc +++ b/fastdeploy/vision/common/processors/cast.cc @@ -18,30 +18,40 @@ namespace fastdeploy { namespace vision { bool Cast::CpuRun(Mat* mat) { - if (mat->layout != Layout::CHW) { - FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl; - return false; - } cv::Mat* im = mat->GetCpuMat(); + int c = im->channels(); if (dtype_ == "float") { - im->convertTo(*im, CV_32FC(im->channels())); + if (im->type() != CV_32FC(c)) { + im->convertTo(*im, CV_32FC(c)); + } } else if (dtype_ == "double") { - im->convertTo(*im, CV_64FC(im->channels())); + if (im->type() != CV_64FC(c)) { + im->convertTo(*im, CV_64FC(c)); + } + } else { + FDLogger() << "[WARN] Cast not support for " << dtype_ + << " now! will skip this operation." + << std::endl; } return true; } #ifdef ENABLE_OPENCV_CUDA bool Cast::GpuRun(Mat* mat) { - if (mat->layout != Layout::CHW) { - FDERROR << "Cast: The input data must be Layout::HWC format!" << std::endl; - return false; - } cv::cuda::GpuMat* im = mat->GetGpuMat(); + int c = im->channels(); if (dtype_ == "float") { - im->convertTo(*im, CV_32FC(im->channels())); + if (im->type() != CV_32FC(c)) { + im->convertTo(*im, CV_32FC(c)); + } } else if (dtype_ == "double") { - im->convertTo(*im, CV_64FC(im->channels())); + if (im->type() != CV_64FC(c)) { + im->convertTo(*im, CV_64FC(c)); + } + } else { + FDLogger() << "[WARN] Cast not support for " << dtype_ + << " now! will skip this operation." + << std::endl; } return true; } From 75948f831317bacfb77bc70a8be7289f55f093eb Mon Sep 17 00:00:00 2001 From: DefTruth Date: Tue, 12 Jul 2022 06:48:36 +0000 Subject: [PATCH 05/19] changed RGB2BRG to BGR2RGB in ppcls model --- fastdeploy/vision/ppcls/model.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fastdeploy/vision/ppcls/model.cc b/fastdeploy/vision/ppcls/model.cc index ad894f3daf..915cb97512 100644 --- a/fastdeploy/vision/ppcls/model.cc +++ b/fastdeploy/vision/ppcls/model.cc @@ -44,7 +44,7 @@ bool Model::BuildPreprocessPipelineFromConfig() { return false; } auto preprocess_cfg = cfg["PreProcess"]["transform_ops"]; - processors_.push_back(std::make_shared()); + processors_.push_back(std::make_shared()); for (const auto& op : preprocess_cfg) { FDASSERT(op.IsMap(), "Require the transform information in yaml be Map type."); From f847490b978ff6b60d31c7825e2671ad8e97d7a9 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Thu, 14 Jul 2022 05:58:15 +0000 Subject: [PATCH 06/19] add model_zoo yolov6 c++/python demo --- fastdeploy/download.py | 1 + fastdeploy/fastdeploy_runtime.py | 2 +- fastdeploy/vision/__init__.py | 1 + fastdeploy/vision/vision_pybind.cc | 4 ++ model_zoo/.gitignore | 12 ++++ model_zoo/vision/yolov5/api.md | 8 +-- model_zoo/vision/yolov6/README.md | 45 ++++++++++++++ model_zoo/vision/yolov6/api.md | 71 ++++++++++++++++++++++ model_zoo/vision/yolov6/cpp/CMakeLists.txt | 17 ++++++ model_zoo/vision/yolov6/cpp/README.md | 0 model_zoo/vision/yolov6/cpp/yolov6.cc | 40 ++++++++++++ model_zoo/vision/yolov6/yolov6.py | 24 ++++++++ 12 files changed, 220 insertions(+), 5 deletions(-) create mode 100644 model_zoo/.gitignore create mode 100644 model_zoo/vision/yolov6/README.md create mode 100644 model_zoo/vision/yolov6/api.md create mode 100644 model_zoo/vision/yolov6/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/yolov6/cpp/README.md create mode 100644 model_zoo/vision/yolov6/cpp/yolov6.cc create mode 100644 model_zoo/vision/yolov6/yolov6.py diff --git a/fastdeploy/download.py b/fastdeploy/download.py index 805f63636e..e00af098df 100644 --- a/fastdeploy/download.py +++ b/fastdeploy/download.py @@ -18,6 +18,7 @@ import requests import time import zipfile +import tarfile import hashlib import tqdm import logging diff --git a/fastdeploy/fastdeploy_runtime.py b/fastdeploy/fastdeploy_runtime.py index 3eef861f2f..4b7acb25ed 100644 --- a/fastdeploy/fastdeploy_runtime.py +++ b/fastdeploy/fastdeploy_runtime.py @@ -51,5 +51,5 @@ def runtime_option(self): @property def initialized(self): if self._model is None: - return false + return False return self._model.initialized() diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 810b23cd3d..81a1424727 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -16,4 +16,5 @@ from . import evaluation from . import ppcls from . import ultralytics +from . import meituan from . import visualize diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index f3c3f0052d..604d808e07 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -18,6 +18,7 @@ namespace fastdeploy { void BindPpClsModel(pybind11::module& m); void BindUltralytics(pybind11::module& m); +void BindMeituan(pybind11::module& m); #ifdef ENABLE_VISION_VISUALIZE void BindVisualize(pybind11::module& m); #endif @@ -40,6 +41,9 @@ void BindVision(pybind11::module& m) { BindPpClsModel(m); BindUltralytics(m); + BindMeituan(m); +#ifdef ENABLE_VISION_VISUALIZE BindVisualize(m); +#endif } } // namespace fastdeploy diff --git a/model_zoo/.gitignore b/model_zoo/.gitignore new file mode 100644 index 0000000000..e3919c57f9 --- /dev/null +++ b/model_zoo/.gitignore @@ -0,0 +1,12 @@ +*.png +*.jpg +*.jpeg +*.onnx +*.zip +*.tar +*.pd* +*.engine +*.trt +*.nb +*.tgz +*.gz diff --git a/model_zoo/vision/yolov5/api.md b/model_zoo/vision/yolov5/api.md index 8c9d5675d0..66d6acdc77 100644 --- a/model_zoo/vision/yolov5/api.md +++ b/model_zoo/vision/yolov5/api.md @@ -23,7 +23,7 @@ YOLOv5模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只 > > **参数** > -> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,RGB格式 +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 > > * **conf_threshold**(float): 检测框置信度过滤阈值 > > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 @@ -49,9 +49,9 @@ YOLOv5模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 > * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 > * **model_format**(Frontend): 模型格式 -#### predict函数 +#### Predict函数 > ``` -> YOLOv5::predict(cv::Mat* im, DetectionResult* result, +> YOLOv5::Predict(cv::Mat* im, DetectionResult* result, > float conf_threshold = 0.25, > float nms_iou_threshold = 0.5) > ``` @@ -59,7 +59,7 @@ YOLOv5模型加载和初始化,当model_format为`Frontend::ONNX`时,只需 > > **参数** > -> > * **im**: 输入图像,注意需为HWC,RGB格式 +> > * **im**: 输入图像,注意需为HWC,BGR格式 > > * **result**: 检测结果,包括检测框,各个框的置信度 > > * **conf_threshold**: 检测框置信度过滤阈值 > > * **nms_iou_threshold**: NMS处理过程中iou阈值 diff --git a/model_zoo/vision/yolov6/README.md b/model_zoo/vision/yolov6/README.md new file mode 100644 index 0000000000..5fa3578bfc --- /dev/null +++ b/model_zoo/vision/yolov6/README.md @@ -0,0 +1,45 @@ +# YOLOv6部署示例 + +本文档说明如何进行[YOLOv6](https://github.com/meituan/YOLOv6)的快速部署推理。本目录结构如下 +``` +. +├── cpp # C++ 代码目录 +│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 +│   ├── README.md # C++ 代码编译部署文档 +│   └── yolov6.cc # C++ 示例代码 +├── README.md # YOLOv6 部署文档 +└── yolov6.py # Python示例代码 +``` + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python + +# 安装vision-cpu模块 +fastdeploy install vision-cpu +``` + +## Python部署 + +执行如下代码即会自动下载YOLOv6模型和测试图片 +``` +python yolov6.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +11.772949,229.269287, 792.933838, 748.294189, 0.954794, 5 +667.140381,396.185455, 807.701721, 881.810120, 0.900997, 0 +223.271011,405.105743, 345.740723, 859.328552, 0.898938, 0 +50.135777,405.863129, 245.485519, 904.153809, 0.888936, 0 +0.000000,549.002869, 77.864723, 869.455017, 0.614145, 0 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [YOLOv6 API文档](./api.md) diff --git a/model_zoo/vision/yolov6/api.md b/model_zoo/vision/yolov6/api.md new file mode 100644 index 0000000000..eca89f06aa --- /dev/null +++ b/model_zoo/vision/yolov6/api.md @@ -0,0 +1,71 @@ +# YOLOv6 API说明 + +## Python API + +### YOLOv6类 +``` +fastdeploy.vision.meituan.YOLOv6(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) +``` +YOLOv6模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolov6s.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> YOLOv6.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +示例代码参考[yolov6.py](./yolov6.py) + + +## C++ API + +### YOLOv6类 +``` +fastdeploy::vision::meituan::YOLOv6( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +YOLOv6模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolov6s.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOv6::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +示例代码参考[cpp/yolov6.cc](cpp/yolov6.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/yolov6/cpp/CMakeLists.txt b/model_zoo/vision/yolov6/cpp/CMakeLists.txt new file mode 100644 index 0000000000..28987f7f75 --- /dev/null +++ b/model_zoo/vision/yolov6/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(yolov6_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.0.3/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(yolov6_demo ${PROJECT_SOURCE_DIR}/yolov6.cc) +# 添加FastDeploy库依赖 +target_link_libraries(yolov6_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/yolov6/cpp/README.md b/model_zoo/vision/yolov6/cpp/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/model_zoo/vision/yolov6/cpp/yolov6.cc b/model_zoo/vision/yolov6/cpp/yolov6.cc new file mode 100644 index 0000000000..62d2fa0be3 --- /dev/null +++ b/model_zoo/vision/yolov6/cpp/yolov6.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + auto model = vis::meituan::YOLOv6("yolov6s.onnx"); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + cv::Mat im = cv::imread("bus.jpg"); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite("vis_result.jpg", vis_im); + return 0; +} diff --git a/model_zoo/vision/yolov6/yolov6.py b/model_zoo/vision/yolov6/yolov6.py new file mode 100644 index 0000000000..5172679c97 --- /dev/null +++ b/model_zoo/vision/yolov6/yolov6.py @@ -0,0 +1,24 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +model_url = "https://github.com/meituan/YOLOv6/releases/download/0.1.0/yolov6s.onnx" +test_jpg_url = "https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg" +fd.download(model_url, ".", show_progress=True) +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.meituan.YOLOv6("yolov6s.onnx") +print(model.is_dynamic_shape()) + +# 预测图片 +im = cv2.imread("bus.jpg") +result = model.predict(im, conf_threshold=0.25, nms_iou_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) +print(model.runtime_option) From c56fdc37d0b9019ccd53c7c8db406a36b9566909 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Thu, 14 Jul 2022 06:04:37 +0000 Subject: [PATCH 07/19] fixed CMakeLists.txt typos --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 654d28624c..6bb2638b2c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,8 +38,8 @@ option(ENABLE_VISION_VISUALIZE "if to enable visualize vision model result toolb option(ENABLE_OPENCV_CUDA "if to enable opencv with cuda, this will allow process image with GPU." OFF) option(ENABLE_DEBUG "if to enable print debug information, this may reduce performance." OFF) -# Wheter to build fastdeply with vision/text/... examples, only for testings. -option(WTIH_VISION_EXAMPLES "wheter to build fastdeply with vision examples" ON) +# Whether to build fastdeply with vision/text/... examples, only for testings. +option(WTIH_VISION_EXAMPLES "Whether to build fastdeply with vision examples" ON) if(ENABLE_DEBUG) add_definitions(-DFASTDEPLOY_DEBUG) From 2300f57969f33579e6e78e985a7585a216077627 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Thu, 14 Jul 2022 06:25:57 +0000 Subject: [PATCH 08/19] update yolov6 cpp/README.md --- model_zoo/vision/yolov6/cpp/README.md | 30 +++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/model_zoo/vision/yolov6/cpp/README.md b/model_zoo/vision/yolov6/cpp/README.md index e69de29bb2..c7b4d4d7ab 100644 --- a/model_zoo/vision/yolov6/cpp/README.md +++ b/model_zoo/vision/yolov6/cpp/README.md @@ -0,0 +1,30 @@ +# 编译YOLOv6示例 + + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 下载模型和图片 +wget https://github.com/meituan/YOLOv6/releases/download/0.1.0/yolov6s.onnx +wget https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg + +# 执行 +./yolov6_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +11.772949,229.269287, 792.933838, 748.294189, 0.954794, 5 +667.140381,396.185455, 807.701721, 881.810120, 0.900997, 0 +223.271011,405.105743, 345.740723, 859.328552, 0.898938, 0 +50.135777,405.863129, 245.485519, 904.153809, 0.888936, 0 +0.000000,549.002869, 77.864723, 869.455017, 0.614145, 0 +``` From cb91b3c161bb856c3a8cff9c47a18a55ddbb49a1 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 06:01:54 +0000 Subject: [PATCH 09/19] add yolox c++/pybind and model_zoo demo --- examples/CMakeLists.txt | 3 +- examples/vision/megvii_yolox.cc | 52 ++++ examples/vision/meituan_yolov6.cc | 5 +- fastdeploy/vision/megvii/__init__.py | 96 ++++++ fastdeploy/vision/megvii/megvii_pybind.cc | 41 +++ fastdeploy/vision/megvii/yolox.cc | 341 ++++++++++++++++++++++ fastdeploy/vision/megvii/yolox.h | 105 +++++++ model_zoo/vision/yolox/README.md | 45 +++ model_zoo/vision/yolox/api.md | 71 +++++ model_zoo/vision/yolox/cpp/CMakeLists.txt | 17 ++ model_zoo/vision/yolox/cpp/README.md | 30 ++ model_zoo/vision/yolox/cpp/yolox.cc | 40 +++ model_zoo/vision/yolox/yolox.py | 23 ++ 13 files changed, 865 insertions(+), 4 deletions(-) create mode 100644 examples/vision/megvii_yolox.cc create mode 100644 fastdeploy/vision/megvii/__init__.py create mode 100644 fastdeploy/vision/megvii/megvii_pybind.cc create mode 100644 fastdeploy/vision/megvii/yolox.cc create mode 100644 fastdeploy/vision/megvii/yolox.h create mode 100644 model_zoo/vision/yolox/README.md create mode 100644 model_zoo/vision/yolox/api.md create mode 100644 model_zoo/vision/yolox/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/yolox/cpp/README.md create mode 100644 model_zoo/vision/yolox/cpp/yolox.cc create mode 100644 model_zoo/vision/yolox/yolox.py diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 4228a3e01f..7dd3e0e25e 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -14,9 +14,10 @@ function(add_fastdeploy_executable field url model) endfunction() # vision examples -if (WTIH_VISION_EXAMPLES) +if (WITH_VISION_EXAMPLES) add_fastdeploy_executable(vision ultralytics yolov5) add_fastdeploy_executable(vision meituan yolov6) + add_fastdeploy_executable(vision megvii yolox) endif() # other examples ... \ No newline at end of file diff --git a/examples/vision/megvii_yolox.cc b/examples/vision/megvii_yolox.cc new file mode 100644 index 0000000000..340694b54f --- /dev/null +++ b/examples/vision/megvii_yolox.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "../resources/models/yolox_s.onnx"; + std::string img_path = "../resources/images/bus.jpg"; + std::string vis_path = "../resources/outputs/megvii_yolox_vis_result.jpg"; + + auto model = vis::megvii::YOLOX(model_file); + if (!model.Initialized()) { + std::cerr << "Init Failed! Model: " << model_file << std::endl; + return -1; + } else { + std::cout << "Init Done! Model:" << model_file << std::endl; + } + model.EnableDebug(); + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/examples/vision/meituan_yolov6.cc b/examples/vision/meituan_yolov6.cc index b92abcd429..7bdd78e5dc 100644 --- a/examples/vision/meituan_yolov6.cc +++ b/examples/vision/meituan_yolov6.cc @@ -23,11 +23,10 @@ int main() { auto model = vis::meituan::YOLOv6(model_file); if (!model.Initialized()) { - std::cerr << "Init Failed." << std::endl; + std::cerr << "Init Failed! Model: " << model_file << std::endl; return -1; } else { - std::cout << "Init Done! Dynamic Mode: " - << model.IsDynamicShape() << std::endl; + std::cout << "Init Done! Model:" << model_file << std::endl; } model.EnableDebug(); diff --git a/fastdeploy/vision/megvii/__init__.py b/fastdeploy/vision/megvii/__init__.py new file mode 100644 index 0000000000..67096e4fc8 --- /dev/null +++ b/fastdeploy/vision/megvii/__init__.py @@ -0,0 +1,96 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import logging +from ... import FastDeployModel, Frontend +from ... import fastdeploy_main as C + + +class YOLOX(FastDeployModel): + def __init__(self, + model_file, + params_file="", + runtime_option=None, + model_format=Frontend.ONNX): + # 调用基函数进行backend_option的初始化 + # 初始化后的option保存在self._runtime_option + super(YOLOX, self).__init__(runtime_option) + + self._model = C.vision.megvii.YOLOX( + model_file, params_file, self._runtime_option, model_format) + # 通过self.initialized判断整个模型的初始化是否成功 + assert self.initialized, "YOLOX initialize failed." + + def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): + return self._model.predict(input_image, conf_threshold, + nms_iou_threshold) + + # 一些跟YOLOX模型有关的属性封装 + # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) + @property + def size(self): + return self._model.size + + @property + def padding_value(self): + return self._model.padding_value + + @property + def is_decode_exported(self): + return self._model.is_decode_exported + + @property + def downsample_strides(self): + return self._model.downsample_strides + + @property + def max_wh(self): + return self._model.max_wh + + @size.setter + def size(self, wh): + assert isinstance(wh, [list, tuple]),\ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2,\ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self._model.size = wh + + @padding_value.setter + def padding_value(self, value): + assert isinstance( + value, + list), "The value to set `padding_value` must be type of list." + self._model.padding_value = value + + @is_decode_exported.setter + def is_decode_exported(self, value): + assert isinstance( + value, + bool), "The value to set `is_decode_exported` must be type of bool." + self._model.max_wh = value + + @downsample_strides.setter + def downsample_strides(self, value): + assert isinstance( + value, + list), "The value to set `downsample_strides` must be type of list." + self._model.downsample_strides = value + + @max_wh.setter + def max_wh(self, value): + assert isinstance( + value, float), "The value to set `max_wh` must be type of float." + self._model.max_wh = value diff --git a/fastdeploy/vision/megvii/megvii_pybind.cc b/fastdeploy/vision/megvii/megvii_pybind.cc new file mode 100644 index 0000000000..7e7fbc79aa --- /dev/null +++ b/fastdeploy/vision/megvii/megvii_pybind.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindMegvii(pybind11::module& m) { + auto megvii_module = + m.def_submodule("megvii", "https://github.com/megvii/YOLOX"); + pybind11::class_( + megvii_module, "YOLOX") + .def(pybind11::init()) + .def("predict", + [](vision::megvii::YOLOX& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::DetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }) + .def_readwrite("size", &vision::megvii::YOLOX::size) + .def_readwrite("padding_value", + &vision::megvii::YOLOX::padding_value) + .def_readwrite("is_decode_exported", + &vision::megvii::YOLOX::is_decode_exported) + .def_readwrite("downsample_strides", + &vision::megvii::YOLOX::downsample_strides) + .def_readwrite("max_wh", &vision::megvii::YOLOX::max_wh); +} +} // namespace fastdeploy diff --git a/fastdeploy/vision/megvii/yolox.cc b/fastdeploy/vision/megvii/yolox.cc new file mode 100644 index 0000000000..e308297050 --- /dev/null +++ b/fastdeploy/vision/megvii/yolox.cc @@ -0,0 +1,341 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/megvii/yolox.h" +#include "fastdeploy/utils/perf.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { + +namespace vision { + +namespace megvii { + +struct YOLOXAnchor { + int grid0; + int grid1; + int stride; +}; + +void GenerateYOLOXAnchors(const std::vector &size, + const std::vector &downsample_strides, + std::vector &anchors) { + // size: tuple of input (width, height) + // downsample_strides: downsample strides in YOLOX, e.g (8,16,32) + const int width = size[0]; + const int height = size[1]; + for (const auto &ds: downsample_strides) { + int num_grid_w = width / ds; + int num_grid_h = height / ds; + for (int g1 = 0; g1 < num_grid_h; ++g1) { + for (int g0 = 0; g0 < num_grid_w; ++g0) { + anchors.emplace_back(YOLOXAnchor{g0, g1, ds}); + } + } + } +} + +void PreProc(Mat* mat, std::vector size, std::vector color) { + // specific pre process for YOLOX, not the same as YOLOv5 + // reference: YOLOX/yolox/data/data_augment.py#L142 + float r = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); + + int resize_h = int(round(static_cast(mat->Height()) * r)); + int resize_w = int(round(static_cast(mat->Width()) * r)); + + if (resize_h != mat->Height() || resize_w != mat->Width()) { + Resize::Run(mat, resize_w, resize_h); + } + + int pad_w = size[0] - resize_w; + int pad_h = size[1] - resize_h; + // right-bottom padding for YOLOX + if (pad_h > 0 || pad_w > 0) { + int top = 0; + int left = 0; + int right = pad_w; + int bottom = pad_h; + Pad::Run(mat, top, bottom, left, right, color); + } +} + +YOLOX::YOLOX(const std::string& model_file, const std::string& params_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + if (model_format == Frontend::ONNX) { + valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 + valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 + } else { + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOX::Initialize() { + // parameters for preprocess + size = {640, 640}; + padding_value = {114.0, 114.0, 114.0}; + downsample_strides = {8, 16, 32}; + max_wh = 4096.0f; + is_decode_exported = false; + + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + // Check if the input shape is dynamic after Runtime already initialized. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + return true; +} + +bool YOLOX::Preprocess(Mat* mat, FDTensor* output, + std::map>* im_info) { + // YOLOX ( >= v0.1.1) preprocess steps + // 1. preproc + // 2. HWC->CHW + // 3. NO!!! BRG2GRB and Normalize needed in YOLOX + PreProc(mat, size, padding_value); + // Record output shape of preprocessed image + (*im_info)["output_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + HWC2CHW::Run(mat); + Cast::Run(mat, "float"); + mat->ShareWithTensor(output); + output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c + return true; +} + +bool YOLOX::Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + int s = i * infer_result.shape[2]; + float confidence = data[s + 4]; + float* max_class_score = + std::max_element(data + s + 5, data + s + infer_result.shape[2]); + confidence *= (*max_class_score); + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + int32_t label_id = std::distance(data + s + 5, max_class_score); + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + data[s] - data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh, + data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh, + data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh}); + result->label_ids.push_back(label_id); + result->scores.push_back(confidence); + } + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float r = std::min(out_h / ipt_h, out_w / ipt_w); + for (size_t i = 0; i < result->boxes.size(); ++i) { + int32_t label_id = (result->label_ids)[i]; + // clip box + result->boxes[i][0] = result->boxes[i][0] - max_wh * label_id; + result->boxes[i][1] = result->boxes[i][1] - max_wh * label_id; + result->boxes[i][2] = result->boxes[i][2] - max_wh * label_id; + result->boxes[i][3] = result->boxes[i][3] - max_wh * label_id; + result->boxes[i][0] = std::max(result->boxes[i][0] / r, 0.0f); + result->boxes[i][1] = std::max(result->boxes[i][1] / r, 0.0f); + result->boxes[i][2] = std::max(result->boxes[i][2] / r, 0.0f); + result->boxes[i][3] = std::max(result->boxes[i][3] / r, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w - 1.0f); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h - 1.0f); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w - 1.0f); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h - 1.0f); + } + return true; +} + +bool YOLOX::PostprocessWithDecode( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + // generate anchors with dowmsample strides + std::vector anchors; + GenerateYOLOXAnchors(size, downsample_strides, anchors); + + // infer_result shape might look like (1,n,85=5+80) + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + int s = i * infer_result.shape[2]; + float confidence = data[s + 4]; + float* max_class_score = + std::max_element(data + s + 5, data + s + infer_result.shape[2]); + confidence *= (*max_class_score); + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + int32_t label_id = std::distance(data + s + 5, max_class_score); + // fetch i-th anchor + float grid0 = static_cast(anchors.at(i).grid0); + float grid1 = static_cast(anchors.at(i).grid1); + float downsample_stride = static_cast(anchors.at(i).stride); + // convert from offsets to [x, y, w, h] + float dx = data[s]; + float dy = data[s + 1]; + float dw = data[s + 2]; + float dh = data[s + 3]; + + float x = (dx + grid0) * downsample_stride; + float y = (dy + grid1) * downsample_stride; + float w = std::exp(dw) * downsample_stride; + float h = std::exp(dh) * downsample_stride; + + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + x - w / 2.0f + label_id * max_wh, + y - h / 2.0f + label_id * max_wh, + x + w / 2.0f + label_id * max_wh, + y + h / 2.0f + label_id * max_wh}); + // label_id * max_wh for multi classes NMS + result->label_ids.push_back(label_id); + result->scores.push_back(confidence); + } + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float r = std::min(out_h / ipt_h, out_w / ipt_w); + for (size_t i = 0; i < result->boxes.size(); ++i) { + int32_t label_id = (result->label_ids)[i]; + // clip box + result->boxes[i][0] = result->boxes[i][0] - max_wh * label_id; + result->boxes[i][1] = result->boxes[i][1] - max_wh * label_id; + result->boxes[i][2] = result->boxes[i][2] - max_wh * label_id; + result->boxes[i][3] = result->boxes[i][3] - max_wh * label_id; + result->boxes[i][0] = std::max(result->boxes[i][0] / r, 0.0f); + result->boxes[i][1] = std::max(result->boxes[i][1] / r, 0.0f); + result->boxes[i][2] = std::max(result->boxes[i][2] / r, 0.0f); + result->boxes[i][3] = std::max(result->boxes[i][3] / r, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w - 1.0f); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h - 1.0f); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w - 1.0f); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h - 1.0f); + } + return true; +} + +bool YOLOX::Predict(cv::Mat* im, DetectionResult* result, float conf_threshold, + float nms_iou_threshold) { +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_START(0) +#endif + + Mat mat(*im); + std::vector input_tensors(1); + + std::map> im_info; + + // Record the shape of image and the shape of preprocessed image + im_info["input_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + im_info["output_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + + if (!Preprocess(&mat, &input_tensors[0], &im_info)) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(0, "Preprocess") + TIMERECORD_START(1) +#endif + + input_tensors[0].name = InputInfoOfRuntime(0).name; + std::vector output_tensors; + if (!Infer(input_tensors, &output_tensors)) { + FDERROR << "Failed to inference." << std::endl; + return false; + } +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(1, "Inference") + TIMERECORD_START(2) +#endif + + if (is_decode_exported) { + if (!Postprocess(output_tensors[0], result, im_info, conf_threshold, + nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + } else { + if (!PostprocessWithDecode(output_tensors[0], result, im_info, conf_threshold, + nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(2, "Postprocess") +#endif + return true; +} + +} // namespace megvii +} // namespace vision +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/megvii/yolox.h b/fastdeploy/vision/megvii/yolox.h new file mode 100644 index 0000000000..7ff8edcf00 --- /dev/null +++ b/fastdeploy/vision/megvii/yolox.h @@ -0,0 +1,105 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { + +namespace vision { + +namespace megvii { + +class FASTDEPLOY_DECL YOLOX : public FastDeployModel { + public: + // 当model_format为ONNX时,无需指定params_file + // 当model_format为Paddle时,则需同时指定model_file & params_file + YOLOX(const std::string& model_file, const std::string& params_file = "", + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX); + + // 定义模型的名称 + std::string ModelName() const { return "megvii/YOLOX"; } + + // 模型预测接口,即用户调用的接口 + // im 为用户的输入数据,目前对于CV均定义为cv::Mat + // result 为模型预测的输出结构体 + // conf_threshold 为后处理的参数 + // nms_iou_threshold 为后处理的参数 + virtual bool Predict(cv::Mat* im, DetectionResult* result, + float conf_threshold = 0.25, + float nms_iou_threshold = 0.5); + + // 以下为模型在预测时的一些参数,基本是前后处理所需 + // 用户在创建模型后,可根据模型的要求,以及自己的需求 + // 对参数进行修改 + // tuple of (width, height) + std::vector size; + // padding value, size should be same with Channels + std::vector padding_value; + // whether the model_file was exported with decode module. The official + // YOLOX/tools/export_onnx.py script will export ONNX file without + // decode module. Please set it 'true' manually if the model file + // was exported with decode module. + bool is_decode_exported; + // downsample strides for YOLOX to generate anchors, will take + // (8,16,32) as default values, might have stride=64. + std::vector downsample_strides; + // for offseting the boxes by classes when using NMS, default 4096. + float max_wh; + + private: + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + bool Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // YOLOX的官方脚本默认导出不带decode模块的模型文件 需要在后处理进行decode + bool PostprocessWithDecode( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape or not.) + // megvii/YOLOX official 'export_onnx.py' script will export static ONNX by default. + // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This value will + // auto check by fastdeploy after the internal Runtime already initialized. + bool is_dynamic_input_; +}; + +} // namespace megvii +} // namespace vision +} // namespace fastdeploy diff --git a/model_zoo/vision/yolox/README.md b/model_zoo/vision/yolox/README.md new file mode 100644 index 0000000000..52fca9de72 --- /dev/null +++ b/model_zoo/vision/yolox/README.md @@ -0,0 +1,45 @@ +# YOLOX部署示例 + +本文档说明如何进行[YOLOX](https://github.com/Megvii-BaseDetection/YOLOX)的快速部署推理。本目录结构如下 +``` +. +├── cpp # C++ 代码目录 +│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 +│   ├── README.md # C++ 代码编译部署文档 +│   └── yolox.cc # C++ 示例代码 +├── README.md # YOLOX 部署文档 +└── yolox.py # Python示例代码 +``` + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` +``` +# 安装fastdeploy-python工具 +pip install fastdeploy-python + +# 安装vision-cpu模块 +fastdeploy install vision-cpu +``` + +## Python部署 + +执行如下代码即会自动下载YOLOX模型和测试图片 +``` +python yolox.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +17.151855,225.294434, 805.329712, 735.578613, 0.940478, 5 +671.162109,387.403961, 809.000000, 879.525513, 0.909566, 0 +54.373432,400.188110, 204.652756, 893.662537, 0.894507, 0 +221.339310,406.614960, 347.045593, 857.299927, 0.887144, 0 +0.083759,554.987305, 61.894527, 881.098816, 0.450202, 0 +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [YOLOX API文档](./api.md) diff --git a/model_zoo/vision/yolox/api.md b/model_zoo/vision/yolox/api.md new file mode 100644 index 0000000000..c7a6f254b1 --- /dev/null +++ b/model_zoo/vision/yolox/api.md @@ -0,0 +1,71 @@ +# YOLOX API说明 + +## Python API + +### YOLOX类 +``` +fastdeploy.vision.megvii.YOLOX(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) +``` +YOLOX模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolox_s.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> YOLOX.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +示例代码参考[yolox.py](./yolox.py) + + +## C++ API + +### YOLOX类 +``` +fastdeploy::vision::megvii::YOLOX( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +YOLOX模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolox_s.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOX::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +示例代码参考[cpp/yolox.cc](cpp/yolox.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/yolox/cpp/CMakeLists.txt b/model_zoo/vision/yolox/cpp/CMakeLists.txt new file mode 100644 index 0000000000..fe9668f6a0 --- /dev/null +++ b/model_zoo/vision/yolox/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(yolox_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.0.3/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(yolox_demo ${PROJECT_SOURCE_DIR}/yolox.cc) +# 添加FastDeploy库依赖 +target_link_libraries(yolox_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/yolox/cpp/README.md b/model_zoo/vision/yolox/cpp/README.md new file mode 100644 index 0000000000..b63d93872b --- /dev/null +++ b/model_zoo/vision/yolox/cpp/README.md @@ -0,0 +1,30 @@ +# 编译YOLOX示例 + + +``` +# 下载和解压预测库 +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz + +# 编译示例代码 +mkdir build & cd build +cmake .. +make -j + +# 下载模型和图片 +wget https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.onnx +wget https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg + +# 执行 +./yolox_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +DetectionResult: [xmin, ymin, xmax, ymax, score, label_id] +17.151855,225.294434, 805.329712, 735.578613, 0.940478, 5 +671.162109,387.403961, 809.000000, 879.525513, 0.909566, 0 +54.373432,400.188110, 204.652756, 893.662537, 0.894507, 0 +221.339310,406.614960, 347.045593, 857.299927, 0.887144, 0 +0.083759,554.987305, 61.894527, 881.098816, 0.450202, 0 +``` diff --git a/model_zoo/vision/yolox/cpp/yolox.cc b/model_zoo/vision/yolox/cpp/yolox.cc new file mode 100644 index 0000000000..934a50bea8 --- /dev/null +++ b/model_zoo/vision/yolox/cpp/yolox.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + auto model = vis::megvii::YOLOX("yolox_s.onnx"); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + cv::Mat im = cv::imread("bus.jpg"); + cv::Mat vis_im = im.clone(); + + vis::DetectionResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisDetection(&vis_im, res); + cv::imwrite("vis_result.jpg", vis_im); + return 0; +} diff --git a/model_zoo/vision/yolox/yolox.py b/model_zoo/vision/yolox/yolox.py new file mode 100644 index 0000000000..8fd1a8a021 --- /dev/null +++ b/model_zoo/vision/yolox/yolox.py @@ -0,0 +1,23 @@ +import fastdeploy as fd +import cv2 + +# 下载模型和测试图片 +model_url = "https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.onnx" +test_jpg_url = "https://raw.githubusercontent.com/ultralytics/yolov5/master/data/images/bus.jpg" +fd.download(model_url, ".", show_progress=True) +fd.download(test_jpg_url, ".", show_progress=True) + +# 加载模型 +model = fd.vision.megvii.YOLOX("yolox_s.onnx") + +# 预测图片 +im = cv2.imread("bus.jpg") +result = model.predict(im, conf_threshold=0.25, nms_iou_threshold=0.5) + +# 可视化结果 +fd.vision.visualize.vis_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) +print(model.runtime_option) From 9d7e9d97517ec9e9b5c57dde456162cb868e2e13 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 06:04:12 +0000 Subject: [PATCH 10/19] move some helpers to private --- fastdeploy/vision.h | 1 + fastdeploy/vision/__init__.py | 1 + fastdeploy/vision/meituan/__init__.py | 4 -- fastdeploy/vision/meituan/meituan_pybind.cc | 4 -- fastdeploy/vision/meituan/yolov6.cc | 17 +++--- fastdeploy/vision/meituan/yolov6.h | 57 ++++++++++---------- fastdeploy/vision/ppcls/model.h | 8 +-- fastdeploy/vision/ultralytics/yolov5.cc | 15 ++++++ fastdeploy/vision/ultralytics/yolov5.h | 59 ++++++++++++--------- fastdeploy/vision/vision_pybind.cc | 2 + model_zoo/vision/yolov6/yolov6.py | 1 - 11 files changed, 96 insertions(+), 73 deletions(-) diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index 1bcf9a26f9..d0e83ed030 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -18,6 +18,7 @@ #include "fastdeploy/vision/ppcls/model.h" #include "fastdeploy/vision/ultralytics/yolov5.h" #include "fastdeploy/vision/meituan/yolov6.h" +#include "fastdeploy/vision/megvii/yolox.h" #endif #include "fastdeploy/vision/visualize/visualize.h" diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 81a1424727..f2de6190b0 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -17,4 +17,5 @@ from . import ppcls from . import ultralytics from . import meituan +from . import megvii from . import visualize diff --git a/fastdeploy/vision/meituan/__init__.py b/fastdeploy/vision/meituan/__init__.py index 5ff45f0fce..7b6635dd36 100644 --- a/fastdeploy/vision/meituan/__init__.py +++ b/fastdeploy/vision/meituan/__init__.py @@ -37,10 +37,6 @@ def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): return self._model.predict(input_image, conf_threshold, nms_iou_threshold) - # BOOL: 查看输入的模型是否为动态维度的 - def is_dynamic_shape(self): - return self._model.is_dynamic_shape() - # 一些跟YOLOv6模型有关的属性封装 # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) @property diff --git a/fastdeploy/vision/meituan/meituan_pybind.cc b/fastdeploy/vision/meituan/meituan_pybind.cc index 2667bf8bed..d1e81fa582 100644 --- a/fastdeploy/vision/meituan/meituan_pybind.cc +++ b/fastdeploy/vision/meituan/meituan_pybind.cc @@ -29,10 +29,6 @@ void BindMeituan(pybind11::module& m) { self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); return res; }) - .def("is_dynamic_shape", - [](vision::meituan::YOLOv6& self) { - return self.IsDynamicShape(); - }) .def_readwrite("size", &vision::meituan::YOLOv6::size) .def_readwrite("padding_value", &vision::meituan::YOLOv6::padding_value) diff --git a/fastdeploy/vision/meituan/yolov6.cc b/fastdeploy/vision/meituan/yolov6.cc index 213f30b87f..8f37bf89c6 100644 --- a/fastdeploy/vision/meituan/yolov6.cc +++ b/fastdeploy/vision/meituan/yolov6.cc @@ -91,17 +91,18 @@ bool YOLOv6::Initialize() { return false; } // Check if the input shape is dynamic after Runtime already initialized, - // Note that, YOLOv6 has 1 input only. We need to force is_mini_pad - // 'false' to keep static shape after padding (LetterBox) - // when the is_dynamic_shape is 'false'. - is_dynamic_shape_ = false; + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. + is_dynamic_input_ = false; auto shape = InputInfoOfRuntime(0).shape; - for (const auto &d: shape) { - if (d <= 0) { - is_dynamic_shape_ = true; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; } } - if (!is_dynamic_shape_) { + if (!is_dynamic_input_) { is_mini_pad = false; } return true; diff --git a/fastdeploy/vision/meituan/yolov6.h b/fastdeploy/vision/meituan/yolov6.h index 81215b2342..b2d6a062df 100644 --- a/fastdeploy/vision/meituan/yolov6.h +++ b/fastdeploy/vision/meituan/yolov6.h @@ -33,28 +33,7 @@ class FASTDEPLOY_DECL YOLOv6 : public FastDeployModel { const Frontend& model_format = Frontend::ONNX); // 定义模型的名称 - virtual std::string ModelName() const { return "meituan/YOLOv6"; } - - // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 - virtual bool Initialize(); - - // 输入图像预处理操作 - // Mat为FastDeploy定义的数据结构 - // FDTensor为预处理后的Tensor数据,传给后端进行推理 - // im_info为预处理过程保存的数据,在后处理中需要用到 - virtual bool Preprocess(Mat* mat, FDTensor* outputs, - std::map>* im_info); - - // 后端推理结果后处理,输出给用户 - // infer_result 为后端推理后的输出Tensor - // result 为模型预测的结果 - // im_info 为预处理记录的信息,后处理用于还原box - // conf_threshold 后处理时过滤box的置信度阈值 - // nms_iou_threshold 后处理时NMS设定的iou阈值 - virtual bool Postprocess( - FDTensor& infer_result, DetectionResult* result, - const std::map>& im_info, - float conf_threshold, float nms_iou_threshold); + std::string ModelName() const { return "meituan/YOLOv6"; } // 模型预测接口,即用户调用的接口 // im 为用户的输入数据,目前对于CV均定义为cv::Mat @@ -65,9 +44,6 @@ class FASTDEPLOY_DECL YOLOv6 : public FastDeployModel { float conf_threshold = 0.25, float nms_iou_threshold = 0.5); - // 用户可以通过该接口 查看输入的模型是否为动态维度 - virtual bool IsDynamicShape() const { return is_dynamic_shape_; } - // 以下为模型在预测时的一些参数,基本是前后处理所需 // 用户在创建模型后,可根据模型的要求,以及自己的需求 // 对参数进行修改 @@ -88,13 +64,38 @@ class FASTDEPLOY_DECL YOLOv6 : public FastDeployModel { // for offseting the boxes by classes when using NMS, default 4096 in meituan/YOLOv6 float max_wh; - protected: + private: + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + bool Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape or not.) // meituan/YOLOv6 official 'export_onnx.py' script will export static ONNX by default. - // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This value will + // while is_dynamic_input if 'false', is_mini_pad will force 'false'. This value will // auto check by fastdeploy after the internal Runtime already initialized. - bool is_dynamic_shape_; + bool is_dynamic_input_; }; + } // namespace meituan } // namespace vision } // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/ppcls/model.h b/fastdeploy/vision/ppcls/model.h index f649ca1977..36841d74c6 100644 --- a/fastdeploy/vision/ppcls/model.h +++ b/fastdeploy/vision/ppcls/model.h @@ -16,6 +16,10 @@ class FASTDEPLOY_DECL Model : public FastDeployModel { std::string ModelName() const { return "ppclas-classify"; } + // TODO(jiangjiajun) Batch is on the way + virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1); + + private: bool Initialize(); bool BuildPreprocessPipelineFromConfig(); @@ -25,10 +29,6 @@ class FASTDEPLOY_DECL Model : public FastDeployModel { bool Postprocess(const FDTensor& infer_result, ClassifyResult* result, int topk = 1); - // TODO(jiangjiajun) Batch is on the way - virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1); - - private: std::vector> processors_; std::string config_file_; }; diff --git a/fastdeploy/vision/ultralytics/yolov5.cc b/fastdeploy/vision/ultralytics/yolov5.cc index 372f6c060a..561e917d4a 100644 --- a/fastdeploy/vision/ultralytics/yolov5.cc +++ b/fastdeploy/vision/ultralytics/yolov5.cc @@ -73,6 +73,21 @@ bool YOLOv5::Initialize() { FDERROR << "Failed to initialize fastdeploy backend." << std::endl; return false; } + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_shape is 'false'. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + if (!is_dynamic_input_) { + is_mini_pad = false; + } return true; } diff --git a/fastdeploy/vision/ultralytics/yolov5.h b/fastdeploy/vision/ultralytics/yolov5.h index 9a8197e53b..573b0294f2 100644 --- a/fastdeploy/vision/ultralytics/yolov5.h +++ b/fastdeploy/vision/ultralytics/yolov5.h @@ -30,29 +30,7 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { const Frontend& model_format = Frontend::ONNX); // 定义模型的名称 - virtual std::string ModelName() const { return "ultralytics/yolov5"; } - - // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 - virtual bool Initialize(); - - // 输入图像预处理操作 - // Mat为FastDeploy定义的数据结构 - // FDTensor为预处理后的Tensor数据,传给后端进行推理 - // im_info为预处理过程保存的数据,在后处理中需要用到 - virtual bool Preprocess(Mat* mat, FDTensor* outputs, - std::map>* im_info); - - // 后端推理结果后处理,输出给用户 - // infer_result 为后端推理后的输出Tensor - // result 为模型预测的结果 - // im_info 为预处理记录的信息,后处理用于还原box - // conf_threshold 后处理时过滤box的置信度阈值 - // nms_iou_threshold 后处理时NMS设定的iou阈值 - // multi_label 后处理时box选取是否采用多标签方式 - virtual bool Postprocess( - FDTensor& infer_result, DetectionResult* result, - const std::map>& im_info, - float conf_threshold, float nms_iou_threshold, bool multi_label); + std::string ModelName() const { return "ultralytics/yolov5"; } // 模型预测接口,即用户调用的接口 // im 为用户的输入数据,目前对于CV均定义为cv::Mat @@ -61,7 +39,7 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { // nms_iou_threshold 为后处理的参数 virtual bool Predict(cv::Mat* im, DetectionResult* result, float conf_threshold = 0.25, - float nms_iou_threshold = 0.5); + float nms_iou_threshold = 0.5); // 以下为模型在预测时的一些参数,基本是前后处理所需 // 用户在创建模型后,可根据模型的要求,以及自己的需求 @@ -84,7 +62,40 @@ class FASTDEPLOY_DECL YOLOv5 : public FastDeployModel { float max_wh; // for different strategies to get boxes when postprocessing bool multi_label; + + private: + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + // multi_label 后处理时box选取是否采用多标签方式 + bool Postprocess( + FDTensor& infer_result, DetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold, bool multi_label); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + // whether to inference with dynamic shape (e.g ONNX export with dynamic shape or not.) + // YOLOv5 official 'export_onnx.py' script will export dynamic ONNX by default. + // while is_dynamic_shape if 'false', is_mini_pad will force 'false'. This value will + // auto check by fastdeploy after the internal Runtime already initialized. + bool is_dynamic_input_; }; + } // namespace ultralytics } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index 604d808e07..23d0e91e0e 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -19,6 +19,7 @@ namespace fastdeploy { void BindPpClsModel(pybind11::module& m); void BindUltralytics(pybind11::module& m); void BindMeituan(pybind11::module& m); +void BindMegvii(pybind11::module& m); #ifdef ENABLE_VISION_VISUALIZE void BindVisualize(pybind11::module& m); #endif @@ -42,6 +43,7 @@ void BindVision(pybind11::module& m) { BindPpClsModel(m); BindUltralytics(m); BindMeituan(m); + BindMegvii(m); #ifdef ENABLE_VISION_VISUALIZE BindVisualize(m); #endif diff --git a/model_zoo/vision/yolov6/yolov6.py b/model_zoo/vision/yolov6/yolov6.py index 5172679c97..fa8aca0740 100644 --- a/model_zoo/vision/yolov6/yolov6.py +++ b/model_zoo/vision/yolov6/yolov6.py @@ -9,7 +9,6 @@ # 加载模型 model = fd.vision.meituan.YOLOv6("yolov6s.onnx") -print(model.is_dynamic_shape()) # 预测图片 im = cv2.imread("bus.jpg") From d2e51a2094b2cb513394643869f36b86a5737011 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 06:05:10 +0000 Subject: [PATCH 11/19] fixed CMakeLists.txt typos --- CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6bb2638b2c..a8d451e02f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,7 +39,7 @@ option(ENABLE_OPENCV_CUDA "if to enable opencv with cuda, this will allow proces option(ENABLE_DEBUG "if to enable print debug information, this may reduce performance." OFF) # Whether to build fastdeply with vision/text/... examples, only for testings. -option(WTIH_VISION_EXAMPLES "Whether to build fastdeply with vision examples" ON) +option(WITH_VISION_EXAMPLES "Whether to build fastdeply with vision examples" ON) if(ENABLE_DEBUG) add_definitions(-DFASTDEPLOY_DEBUG) @@ -181,8 +181,8 @@ set_target_properties(fastdeploy PROPERTIES VERSION ${FASTDEPLOY_VERSION}) target_link_libraries(fastdeploy ${DEPEND_LIBS}) # add examples after prepare include paths for third-parties -if (WTIH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples) - add_definitions(-DWTIH_VISION_EXAMPLES) +if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples) + add_definitions(-DWITH_VISION_EXAMPLES) set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/examples/bin) add_subdirectory(examples) endif() From df8b6a6634d95402360af7c1ae1c210342e3bf52 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 11:00:36 +0000 Subject: [PATCH 12/19] add normalize with alpha and beta --- fastdeploy/vision/common/processors/normalize.cc | 12 ++++++++++++ fastdeploy/vision/common/processors/normalize.h | 8 ++++++++ 2 files changed, 20 insertions(+) diff --git a/fastdeploy/vision/common/processors/normalize.cc b/fastdeploy/vision/common/processors/normalize.cc index b75406070c..0e01d23e56 100644 --- a/fastdeploy/vision/common/processors/normalize.cc +++ b/fastdeploy/vision/common/processors/normalize.cc @@ -52,6 +52,11 @@ Normalize::Normalize(const std::vector& mean, } } +Normalize::Normalize(const float* alpha, const float* beta, size_t size) { + alpha_.assign(alpha, alpha + size); + beta_.assign(beta, beta + size); +} + bool Normalize::CpuRun(Mat* mat) { cv::Mat* im = mat->GetCpuMat(); std::vector split_im; @@ -84,5 +89,12 @@ bool Normalize::Run(Mat* mat, const std::vector& mean, return n(mat, lib); } +bool Normalize::Run(Mat* mat, const float* alpha, + const float* beta, size_t size, + ProcLib lib) { + auto n = Normalize(alpha, beta, size); + return n(mat, lib); +} + } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/normalize.h b/fastdeploy/vision/common/processors/normalize.h index eeb839d024..2f09e2d9ba 100644 --- a/fastdeploy/vision/common/processors/normalize.h +++ b/fastdeploy/vision/common/processors/normalize.h @@ -24,6 +24,9 @@ class Normalize : public Processor { bool is_scale = true, const std::vector& min = std::vector(), const std::vector& max = std::vector()); + + Normalize(const float* alpha, const float* beta, size_t size); + bool CpuRun(Mat* mat); #ifdef ENABLE_OPENCV_CUDA bool GpuRun(Mat* mat); @@ -46,6 +49,11 @@ class Normalize : public Processor { const std::vector& max = std::vector(), ProcLib lib = ProcLib::OPENCV_CPU); + // compute `result = mat * alpha + beta` directly + static bool Run(Mat* mat, const float* alpha, + const float* beta, size_t size, + ProcLib lib = ProcLib::OPENCV_CPU); + private: std::vector alpha_; std::vector beta_; From 87863845c395a287ee88b429c770d9d0d53c3a5a Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 11:01:36 +0000 Subject: [PATCH 13/19] add version notes for yolov5/yolov6/yolox --- CMakeLists.txt | 2 +- model_zoo/vision/yolov5/README.md | 2 ++ model_zoo/vision/yolov5/cpp/README.md | 1 + model_zoo/vision/yolov6/README.md | 2 ++ model_zoo/vision/yolov6/cpp/README.md | 1 + model_zoo/vision/yolox/README.md | 2 ++ model_zoo/vision/yolox/cpp/README.md | 1 + 7 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a8d451e02f..71aeb8165c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -53,7 +53,7 @@ option(BUILD_FASTDEPLOY_PYTHON "if build python lib for fastdeploy." OFF) include_directories(${PROJECT_SOURCE_DIR}) include_directories(${CMAKE_CURRENT_BINARY_DIR}) -if (WTIH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples) +if (WITH_VISION_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples) # ENABLE_VISION and ENABLE_VISION_VISUALIZE must be ON if enable vision examples. message(STATUS "Found WTIH_VISION_EXAMPLES ON, so, force ENABLE_VISION and ENABLE_VISION_VISUALIZE ON") set(ENABLE_VISION ON CACHE BOOL "force to enable vision models usage" FORCE) diff --git a/model_zoo/vision/yolov5/README.md b/model_zoo/vision/yolov5/README.md index efb5510759..03b19d44cc 100644 --- a/model_zoo/vision/yolov5/README.md +++ b/model_zoo/vision/yolov5/README.md @@ -1,5 +1,7 @@ # YOLOv5部署示例 +当前支持模型版本为:[YOLOv5 v6.0](https://github.com/ultralytics/yolov5/releases/download/v6.0) + 本文档说明如何进行[YOLOv5](https://github.com/ultralytics/yolov5)的快速部署推理。本目录结构如下 ``` . diff --git a/model_zoo/vision/yolov5/cpp/README.md b/model_zoo/vision/yolov5/cpp/README.md index dd740ff58a..a1f1bde49c 100644 --- a/model_zoo/vision/yolov5/cpp/README.md +++ b/model_zoo/vision/yolov5/cpp/README.md @@ -1,5 +1,6 @@ # 编译YOLOv5示例 +当前支持模型版本为:[YOLOv5 v6.0](https://github.com/ultralytics/yolov5/releases/download/v6.0) ``` # 下载和解压预测库 diff --git a/model_zoo/vision/yolov6/README.md b/model_zoo/vision/yolov6/README.md index 5fa3578bfc..accc6bdbb7 100644 --- a/model_zoo/vision/yolov6/README.md +++ b/model_zoo/vision/yolov6/README.md @@ -1,5 +1,7 @@ # YOLOv6部署示例 +当前支持模型版本为:[YOLOv6 v0.1.0](https://github.com/meituan/YOLOv6/releases/download/0.1.0) + 本文档说明如何进行[YOLOv6](https://github.com/meituan/YOLOv6)的快速部署推理。本目录结构如下 ``` . diff --git a/model_zoo/vision/yolov6/cpp/README.md b/model_zoo/vision/yolov6/cpp/README.md index c7b4d4d7ab..0e2c03dbfa 100644 --- a/model_zoo/vision/yolov6/cpp/README.md +++ b/model_zoo/vision/yolov6/cpp/README.md @@ -1,5 +1,6 @@ # 编译YOLOv6示例 +当前支持模型版本为:[YOLOv6 v0.1.0](https://github.com/meituan/YOLOv6/releases/download/0.1.0) ``` # 下载和解压预测库 diff --git a/model_zoo/vision/yolox/README.md b/model_zoo/vision/yolox/README.md index 52fca9de72..d64a2f0ffa 100644 --- a/model_zoo/vision/yolox/README.md +++ b/model_zoo/vision/yolox/README.md @@ -1,5 +1,7 @@ # YOLOX部署示例 +当前支持模型版本为:[YOLOX v0.1.1](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0) + 本文档说明如何进行[YOLOX](https://github.com/Megvii-BaseDetection/YOLOX)的快速部署推理。本目录结构如下 ``` . diff --git a/model_zoo/vision/yolox/cpp/README.md b/model_zoo/vision/yolox/cpp/README.md index b63d93872b..cc48878f60 100644 --- a/model_zoo/vision/yolox/cpp/README.md +++ b/model_zoo/vision/yolox/cpp/README.md @@ -1,5 +1,6 @@ # 编译YOLOX示例 +当前支持模型版本为:[YOLOX v0.1.1](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0) ``` # 下载和解压预测库 From fed29537eaddbf9ad76f3ce78b91677207fae1b7 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 11:16:55 +0000 Subject: [PATCH 14/19] add copyright to yolov5.cc --- fastdeploy/vision/ultralytics/yolov5.cc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/fastdeploy/vision/ultralytics/yolov5.cc b/fastdeploy/vision/ultralytics/yolov5.cc index 561e917d4a..193cfe9794 100644 --- a/fastdeploy/vision/ultralytics/yolov5.cc +++ b/fastdeploy/vision/ultralytics/yolov5.cc @@ -1,3 +1,17 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include "fastdeploy/vision/ultralytics/yolov5.h" #include "fastdeploy/utils/perf.h" #include "fastdeploy/vision/utils/utils.h" From 6ec3bd54e60afcf357d97d488b8a73249a795ecf Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 11:26:11 +0000 Subject: [PATCH 15/19] revert normalize --- fastdeploy/vision/common/processors/normalize.cc | 12 ------------ fastdeploy/vision/common/processors/normalize.h | 9 --------- 2 files changed, 21 deletions(-) diff --git a/fastdeploy/vision/common/processors/normalize.cc b/fastdeploy/vision/common/processors/normalize.cc index 0e01d23e56..b75406070c 100644 --- a/fastdeploy/vision/common/processors/normalize.cc +++ b/fastdeploy/vision/common/processors/normalize.cc @@ -52,11 +52,6 @@ Normalize::Normalize(const std::vector& mean, } } -Normalize::Normalize(const float* alpha, const float* beta, size_t size) { - alpha_.assign(alpha, alpha + size); - beta_.assign(beta, beta + size); -} - bool Normalize::CpuRun(Mat* mat) { cv::Mat* im = mat->GetCpuMat(); std::vector split_im; @@ -89,12 +84,5 @@ bool Normalize::Run(Mat* mat, const std::vector& mean, return n(mat, lib); } -bool Normalize::Run(Mat* mat, const float* alpha, - const float* beta, size_t size, - ProcLib lib) { - auto n = Normalize(alpha, beta, size); - return n(mat, lib); -} - } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/common/processors/normalize.h b/fastdeploy/vision/common/processors/normalize.h index 2f09e2d9ba..b8a66e945a 100644 --- a/fastdeploy/vision/common/processors/normalize.h +++ b/fastdeploy/vision/common/processors/normalize.h @@ -24,9 +24,6 @@ class Normalize : public Processor { bool is_scale = true, const std::vector& min = std::vector(), const std::vector& max = std::vector()); - - Normalize(const float* alpha, const float* beta, size_t size); - bool CpuRun(Mat* mat); #ifdef ENABLE_OPENCV_CUDA bool GpuRun(Mat* mat); @@ -48,12 +45,6 @@ class Normalize : public Processor { const std::vector& min = std::vector(), const std::vector& max = std::vector(), ProcLib lib = ProcLib::OPENCV_CPU); - - // compute `result = mat * alpha + beta` directly - static bool Run(Mat* mat, const float* alpha, - const float* beta, size_t size, - ProcLib lib = ProcLib::OPENCV_CPU); - private: std::vector alpha_; std::vector beta_; From 367dad00d81e97f3f276cc3990434a89ad059d6e Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 18 Jul 2022 12:25:46 +0000 Subject: [PATCH 16/19] fixed some bugs in yolox --- fastdeploy/vision/megvii/yolox.cc | 50 +++++++++++++++---------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/fastdeploy/vision/megvii/yolox.cc b/fastdeploy/vision/megvii/yolox.cc index e308297050..f2cb5d1c34 100644 --- a/fastdeploy/vision/megvii/yolox.cc +++ b/fastdeploy/vision/megvii/yolox.cc @@ -28,32 +28,33 @@ struct YOLOXAnchor { int stride; }; -void GenerateYOLOXAnchors(const std::vector &size, - const std::vector &downsample_strides, - std::vector &anchors) { - // size: tuple of input (width, height) +void GenerateYOLOXAnchors(const std::vector& size, + const std::vector& downsample_strides, + std::vector* anchors) { + // size: tuple of input (width, height) // downsample_strides: downsample strides in YOLOX, e.g (8,16,32) const int width = size[0]; const int height = size[1]; - for (const auto &ds: downsample_strides) { + for (const auto& ds : downsample_strides) { int num_grid_w = width / ds; int num_grid_h = height / ds; for (int g1 = 0; g1 < num_grid_h; ++g1) { for (int g0 = 0; g0 < num_grid_w; ++g0) { - anchors.emplace_back(YOLOXAnchor{g0, g1, ds}); - } + (*anchors).emplace_back(YOLOXAnchor{g0, g1, ds}); + } } } } -void PreProc(Mat* mat, std::vector size, std::vector color) { +void LetterBoxWithRightBottomPad(Mat* mat, std::vector size, + std::vector color) { // specific pre process for YOLOX, not the same as YOLOv5 // reference: YOLOX/yolox/data/data_augment.py#L142 - float r = std::min(size[1] * 1.0f / static_cast(mat->Height()), - size[0] * 1.0f / static_cast(mat->Width())); - + float r = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); + int resize_h = int(round(static_cast(mat->Height()) * r)); - int resize_w = int(round(static_cast(mat->Width()) * r)); + int resize_w = int(round(static_cast(mat->Width()) * r)); if (resize_h != mat->Height() || resize_w != mat->Width()) { Resize::Run(mat, resize_w, resize_h); @@ -72,8 +73,7 @@ void PreProc(Mat* mat, std::vector size, std::vector color) { } YOLOX::YOLOX(const std::string& model_file, const std::string& params_file, - const RuntimeOption& custom_option, - const Frontend& model_format) { + const RuntimeOption& custom_option, const Frontend& model_format) { if (model_format == Frontend::ONNX) { valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 @@ -119,7 +119,7 @@ bool YOLOX::Preprocess(Mat* mat, FDTensor* output, // 1. preproc // 2. HWC->CHW // 3. NO!!! BRG2GRB and Normalize needed in YOLOX - PreProc(mat, size, padding_value); + LetterBoxWithRightBottomPad(mat, size, padding_value); // Record output shape of preprocessed image (*im_info)["output_shape"] = {static_cast(mat->Height()), static_cast(mat->Width())}; @@ -207,7 +207,7 @@ bool YOLOX::PostprocessWithDecode( } // generate anchors with dowmsample strides std::vector anchors; - GenerateYOLOXAnchors(size, downsample_strides, anchors); + GenerateYOLOXAnchors(size, downsample_strides, &anchors); // infer_result shape might look like (1,n,85=5+80) float* data = static_cast(infer_result.Data()); @@ -239,11 +239,9 @@ bool YOLOX::PostprocessWithDecode( // convert from [x, y, w, h] to [x1, y1, x2, y2] result->boxes.emplace_back(std::array{ - x - w / 2.0f + label_id * max_wh, - y - h / 2.0f + label_id * max_wh, - x + w / 2.0f + label_id * max_wh, - y + h / 2.0f + label_id * max_wh}); - // label_id * max_wh for multi classes NMS + x - w / 2.0f + label_id * max_wh, y - h / 2.0f + label_id * max_wh, + x + w / 2.0f + label_id * max_wh, y + h / 2.0f + label_id * max_wh}); + // label_id * max_wh for multi classes NMS result->label_ids.push_back(label_id); result->scores.push_back(confidence); } @@ -299,7 +297,7 @@ bool YOLOX::Predict(cv::Mat* im, DetectionResult* result, float conf_threshold, FDERROR << "Failed to preprocess input image." << std::endl; return false; } - + #ifdef FASTDEPLOY_DEBUG TIMERECORD_END(0, "Preprocess") TIMERECORD_START(1) @@ -315,7 +313,7 @@ bool YOLOX::Predict(cv::Mat* im, DetectionResult* result, float conf_threshold, TIMERECORD_END(1, "Inference") TIMERECORD_START(2) #endif - + if (is_decode_exported) { if (!Postprocess(output_tensors[0], result, im_info, conf_threshold, nms_iou_threshold)) { @@ -323,13 +321,13 @@ bool YOLOX::Predict(cv::Mat* im, DetectionResult* result, float conf_threshold, return false; } } else { - if (!PostprocessWithDecode(output_tensors[0], result, im_info, conf_threshold, - nms_iou_threshold)) { + if (!PostprocessWithDecode(output_tensors[0], result, im_info, + conf_threshold, nms_iou_threshold)) { FDERROR << "Failed to post process." << std::endl; return false; } } - + #ifdef FASTDEPLOY_DEBUG TIMERECORD_END(2, "Postprocess") #endif From eab499ec8a0c0f24eb5adba0700b96a36ebaca75 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 25 Jul 2022 06:11:55 +0000 Subject: [PATCH 17/19] Add YOLOv5Face Model support --- examples/vision/deepcam_yolov5face.cc | 53 ++++ fastdeploy/vision.h | 1 + fastdeploy/vision/__init__.py | 1 + fastdeploy/vision/common/result.cc | 67 ++++ fastdeploy/vision/common/result.h | 28 +- fastdeploy/vision/deepcam/__init__.py | 117 +++++++ fastdeploy/vision/deepcam/deepcam_pybind.cc | 43 +++ fastdeploy/vision/deepcam/yolov5face.cc | 292 ++++++++++++++++++ fastdeploy/vision/deepcam/yolov5face.h | 97 ++++++ fastdeploy/vision/utils/nms.cc | 56 ++++ fastdeploy/vision/utils/sort_face_det_res.cc | 69 +++++ fastdeploy/vision/utils/utils.h | 4 + fastdeploy/vision/vision_pybind.cc | 12 + fastdeploy/vision/visualize/__init__.py | 5 + fastdeploy/vision/visualize/face_detection.cc | 81 +++++ fastdeploy/vision/visualize/visualize.h | 6 +- .../vision/visualize/visualize_pybind.cc | 17 +- model_zoo/vision/yolov5face/README.md | 78 +++++ model_zoo/vision/yolov5face/api.md | 71 +++++ .../vision/yolov5face/cpp/CMakeLists.txt | 17 + model_zoo/vision/yolov5face/cpp/README.md | 60 ++++ model_zoo/vision/yolov5face/cpp/yolov5face.cc | 40 +++ model_zoo/vision/yolov5face/yolov5face.py | 17 + 23 files changed, 1224 insertions(+), 8 deletions(-) create mode 100644 examples/vision/deepcam_yolov5face.cc create mode 100644 fastdeploy/vision/deepcam/__init__.py create mode 100644 fastdeploy/vision/deepcam/deepcam_pybind.cc create mode 100644 fastdeploy/vision/deepcam/yolov5face.cc create mode 100644 fastdeploy/vision/deepcam/yolov5face.h create mode 100644 fastdeploy/vision/utils/sort_face_det_res.cc create mode 100644 fastdeploy/vision/visualize/face_detection.cc create mode 100644 model_zoo/vision/yolov5face/README.md create mode 100644 model_zoo/vision/yolov5face/api.md create mode 100644 model_zoo/vision/yolov5face/cpp/CMakeLists.txt create mode 100644 model_zoo/vision/yolov5face/cpp/README.md create mode 100644 model_zoo/vision/yolov5face/cpp/yolov5face.cc create mode 100644 model_zoo/vision/yolov5face/yolov5face.py diff --git a/examples/vision/deepcam_yolov5face.cc b/examples/vision/deepcam_yolov5face.cc new file mode 100644 index 0000000000..8f55740d6b --- /dev/null +++ b/examples/vision/deepcam_yolov5face.cc @@ -0,0 +1,53 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + + std::string model_file = "../resources/models/yolov5s-face.onnx.onnx"; + std::string img_path = "../resources/images/test_face_det.jpg"; + std::string vis_path = + "../resources/outputs/deepcam_yolov5face_vis_result.jpg"; + + auto model = vis::deepcam::YOLOv5Face(model_file); + if (!model.Initialized()) { + std::cerr << "Init Failed! Model: " << model_file << std::endl; + return -1; + } else { + std::cout << "Init Done! Model:" << model_file << std::endl; + } + model.EnableDebug(); + + cv::Mat im = cv::imread(img_path); + cv::Mat vis_im = im.clone(); + + vis::FaceDetectionResult res; + if (!model.Predict(&im, &res, 0.1f, 0.3f)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } else { + std::cout << "Prediction Done!" << std::endl; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisFaceDetection(&vis_im, res, 2, 0.3f); + cv::imwrite(vis_path, vis_im); + std::cout << "Detect Done! Saved: " << vis_path << std::endl; + return 0; +} diff --git a/fastdeploy/vision.h b/fastdeploy/vision.h index 68c0881cac..4f47f5fc43 100644 --- a/fastdeploy/vision.h +++ b/fastdeploy/vision.h @@ -15,6 +15,7 @@ #include "fastdeploy/core/config.h" #ifdef ENABLE_VISION +#include "fastdeploy/vision/deepcam/yolov5face.h" #include "fastdeploy/vision/megvii/yolox.h" #include "fastdeploy/vision/meituan/yolov6.h" #include "fastdeploy/vision/ppcls/model.h" diff --git a/fastdeploy/vision/__init__.py b/fastdeploy/vision/__init__.py index 6acbf0c376..0b627c12da 100644 --- a/fastdeploy/vision/__init__.py +++ b/fastdeploy/vision/__init__.py @@ -21,3 +21,4 @@ from . import megvii from . import visualize from . import wongkinyiu +from . import deepcam diff --git a/fastdeploy/vision/common/result.cc b/fastdeploy/vision/common/result.cc index ece0973c0c..d1a1f10f11 100644 --- a/fastdeploy/vision/common/result.cc +++ b/fastdeploy/vision/common/result.cc @@ -72,5 +72,72 @@ std::string DetectionResult::Str() { return out; } +FaceDetectionResult::FaceDetectionResult(const FaceDetectionResult& res) { + boxes.assign(res.boxes.begin(), res.boxes.end()); + landmarks.assign(res.landmarks.begin(), res.landmarks.end()); + scores.assign(res.scores.begin(), res.scores.end()); + landmarks_per_face = res.landmarks_per_face; +} + +void FaceDetectionResult::Clear() { + std::vector>().swap(boxes); + std::vector().swap(scores); + std::vector>().swap(landmarks); + landmarks_per_face = 0; +} + +void FaceDetectionResult::Reserve(int size) { + boxes.reserve(size); + scores.reserve(size); + if (landmarks_per_face > 0) { + landmarks.reserve(size * landmarks_per_face); + } +} + +void FaceDetectionResult::Resize(int size) { + boxes.resize(size); + scores.resize(size); + if (landmarks_per_face > 0) { + landmarks.resize(size * landmarks_per_face); + } +} + +std::string FaceDetectionResult::Str() { + std::string out; + // format without landmarks + if (landmarks_per_face <= 0) { + out = "FaceDetectionResult: [xmin, ymin, xmax, ymax, score]\n"; + for (size_t i = 0; i < boxes.size(); ++i) { + out = out + std::to_string(boxes[i][0]) + "," + + std::to_string(boxes[i][1]) + ", " + std::to_string(boxes[i][2]) + + ", " + std::to_string(boxes[i][3]) + ", " + + std::to_string(scores[i]) + "\n"; + } + return out; + } + // format with landmarks + FDASSERT((landmarks.size() == boxes.size() * landmarks_per_face), + "The size of landmarks != boxes.size * landmarks_per_face."); + out = "FaceDetectionResult: [xmin, ymin, xmax, ymax, score, (x, y) x " + + std::to_string(landmarks_per_face) + "]\n"; + for (size_t i = 0; i < boxes.size(); ++i) { + out = out + std::to_string(boxes[i][0]) + "," + + std::to_string(boxes[i][1]) + ", " + std::to_string(boxes[i][2]) + + ", " + std::to_string(boxes[i][3]) + ", " + + std::to_string(scores[i]) + ", "; + for (size_t j = 0; j < landmarks_per_face; ++j) { + out = out + "(" + + std::to_string(landmarks[i * landmarks_per_face + j][0]) + "," + + std::to_string(landmarks[i * landmarks_per_face + j][1]); + if (j < landmarks_per_face - 1) { + out = out + "), "; + } else { + out = out + ")\n"; + } + } + } + return out; +} + } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/common/result.h b/fastdeploy/vision/common/result.h index 22227a26cb..70b2d6bdc4 100644 --- a/fastdeploy/vision/common/result.h +++ b/fastdeploy/vision/common/result.h @@ -21,7 +21,8 @@ enum FASTDEPLOY_DECL ResultType { UNKNOWN_RESULT, CLASSIFY, DETECTION, - SEGMENTATION + SEGMENTATION, + FACE_DETECTION }; struct FASTDEPLOY_DECL BaseResult { @@ -56,5 +57,30 @@ struct FASTDEPLOY_DECL DetectionResult : public BaseResult { std::string Str(); }; +struct FASTDEPLOY_DECL FaceDetectionResult : public BaseResult { + // box: xmin, ymin, xmax, ymax + std::vector> boxes; + // landmark: x, y, landmarks may empty if the + // model don't detect face with landmarks. + // Note, one face might have multiple landmarks, + // such as 5/19/21/68/98/..., etc. + std::vector> landmarks; + std::vector scores; + ResultType type = ResultType::FACE_DETECTION; + // set landmarks_per_face manually in your post processes. + int landmarks_per_face; + + FaceDetectionResult() { landmarks_per_face = 0; } + FaceDetectionResult(const FaceDetectionResult& res); + + void Clear(); + + void Reserve(int size); + + void Resize(int size); + + std::string Str(); +}; + } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/deepcam/__init__.py b/fastdeploy/vision/deepcam/__init__.py new file mode 100644 index 0000000000..6b1af4328b --- /dev/null +++ b/fastdeploy/vision/deepcam/__init__.py @@ -0,0 +1,117 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import logging +from ... import FastDeployModel, Frontend +from ... import fastdeploy_main as C + + +class YOLOv5Face(FastDeployModel): + def __init__(self, + model_file, + params_file="", + runtime_option=None, + model_format=Frontend.ONNX): + # 调用基函数进行backend_option的初始化 + # 初始化后的option保存在self._runtime_option + super(YOLOv5Face, self).__init__(runtime_option) + + self._model = C.vision.deepcam.YOLOv5Face( + model_file, params_file, self._runtime_option, model_format) + # 通过self.initialized判断整个模型的初始化是否成功 + assert self.initialized, "YOLOv5Face initialize failed." + + def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5): + return self._model.predict(input_image, conf_threshold, + nms_iou_threshold) + + # 一些跟YOLOv5Face模型有关的属性封装 + # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持) + @property + def size(self): + return self._model.size + + @property + def padding_value(self): + return self._model.padding_value + + @property + def is_no_pad(self): + return self._model.is_no_pad + + @property + def is_mini_pad(self): + return self._model.is_mini_pad + + @property + def is_scale_up(self): + return self._model.is_scale_up + + @property + def stride(self): + return self._model.stride + + @property + def landmarks_per_face(self): + return self._model.landmarks_per_face + + @size.setter + def size(self, wh): + assert isinstance(wh, [list, tuple]),\ + "The value to set `size` must be type of tuple or list." + assert len(wh) == 2,\ + "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format( + len(wh)) + self._model.size = wh + + @padding_value.setter + def padding_value(self, value): + assert isinstance( + value, + list), "The value to set `padding_value` must be type of list." + self._model.padding_value = value + + @is_no_pad.setter + def is_no_pad(self, value): + assert isinstance( + value, bool), "The value to set `is_no_pad` must be type of bool." + self._model.is_no_pad = value + + @is_mini_pad.setter + def is_mini_pad(self, value): + assert isinstance( + value, + bool), "The value to set `is_mini_pad` must be type of bool." + self._model.is_mini_pad = value + + @is_scale_up.setter + def is_scale_up(self, value): + assert isinstance( + value, + bool), "The value to set `is_scale_up` must be type of bool." + self._model.is_scale_up = value + + @stride.setter + def stride(self, value): + assert isinstance( + value, int), "The value to set `stride` must be type of int." + self._model.stride = value + + @landmarks_per_face.setter + def landmarks_per_face(self, value): + assert isinstance( + value, + int), "The value to set `landmarks_per_face` must be type of int." + self._model.landmarks_per_face = value diff --git a/fastdeploy/vision/deepcam/deepcam_pybind.cc b/fastdeploy/vision/deepcam/deepcam_pybind.cc new file mode 100644 index 0000000000..3ac741bbcd --- /dev/null +++ b/fastdeploy/vision/deepcam/deepcam_pybind.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/pybind/main.h" + +namespace fastdeploy { +void BindDeepCam(pybind11::module& m) { + auto deepcam_module = + m.def_submodule("deepcam", "https://github.com/deepcam-cn/yolov5-face"); + pybind11::class_(deepcam_module, + "YOLOv5Face") + .def(pybind11::init()) + .def("predict", + [](vision::deepcam::YOLOv5Face& self, pybind11::array& data, + float conf_threshold, float nms_iou_threshold) { + auto mat = PyArrayToCvMat(data); + vision::FaceDetectionResult res; + self.Predict(&mat, &res, conf_threshold, nms_iou_threshold); + return res; + }) + .def_readwrite("size", &vision::deepcam::YOLOv5Face::size) + .def_readwrite("padding_value", + &vision::deepcam::YOLOv5Face::padding_value) + .def_readwrite("is_mini_pad", &vision::deepcam::YOLOv5Face::is_mini_pad) + .def_readwrite("is_no_pad", &vision::deepcam::YOLOv5Face::is_no_pad) + .def_readwrite("is_scale_up", &vision::deepcam::YOLOv5Face::is_scale_up) + .def_readwrite("stride", &vision::deepcam::YOLOv5Face::stride) + .def_readwrite("landmarks_per_face", + &vision::deepcam::YOLOv5Face::landmarks_per_face); +} + +} // namespace fastdeploy diff --git a/fastdeploy/vision/deepcam/yolov5face.cc b/fastdeploy/vision/deepcam/yolov5face.cc new file mode 100644 index 0000000000..5b2c77af9d --- /dev/null +++ b/fastdeploy/vision/deepcam/yolov5face.cc @@ -0,0 +1,292 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/deepcam/yolov5face.h" +#include "fastdeploy/utils/perf.h" +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { + +namespace vision { + +namespace deepcam { + +void LetterBox(Mat* mat, std::vector size, std::vector color, + bool _auto, bool scale_fill = false, bool scale_up = true, + int stride = 32) { + float scale = + std::min(size[1] * 1.0 / mat->Height(), size[0] * 1.0 / mat->Width()); + if (!scale_up) { + scale = std::min(scale, 1.0f); + } + + int resize_h = int(round(mat->Height() * scale)); + int resize_w = int(round(mat->Width() * scale)); + + int pad_w = size[0] - resize_w; + int pad_h = size[1] - resize_h; + if (_auto) { + pad_h = pad_h % stride; + pad_w = pad_w % stride; + } else if (scale_fill) { + pad_h = 0; + pad_w = 0; + resize_h = size[1]; + resize_w = size[0]; + } + if (resize_h != mat->Height() || resize_w != mat->Width()) { + Resize::Run(mat, resize_w, resize_h); + } + if (pad_h > 0 || pad_w > 0) { + float half_h = pad_h * 1.0 / 2; + int top = int(round(half_h - 0.1)); + int bottom = int(round(half_h + 0.1)); + float half_w = pad_w * 1.0 / 2; + int left = int(round(half_w - 0.1)); + int right = int(round(half_w + 0.1)); + Pad::Run(mat, top, bottom, left, right, color); + } +} + +YOLOv5Face::YOLOv5Face(const std::string& model_file, + const std::string& params_file, + const RuntimeOption& custom_option, + const Frontend& model_format) { + if (model_format == Frontend::ONNX) { + valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 + valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 + } else { + valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; + valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + } + runtime_option = custom_option; + runtime_option.model_format = model_format; + runtime_option.model_file = model_file; + runtime_option.params_file = params_file; + initialized = Initialize(); +} + +bool YOLOv5Face::Initialize() { + // parameters for preprocess + size = {640, 640}; + padding_value = {114.0, 114.0, 114.0}; + is_mini_pad = false; + is_no_pad = false; + is_scale_up = false; + stride = 32; + landmarks_per_face = 5; + + if (!InitRuntime()) { + FDERROR << "Failed to initialize fastdeploy backend." << std::endl; + return false; + } + // Check if the input shape is dynamic after Runtime already initialized, + // Note that, We need to force is_mini_pad 'false' to keep static + // shape after padding (LetterBox) when the is_dynamic_input_ is 'false'. + is_dynamic_input_ = false; + auto shape = InputInfoOfRuntime(0).shape; + for (int i = 0; i < shape.size(); ++i) { + // if height or width is dynamic + if (i >= 2 && shape[i] <= 0) { + is_dynamic_input_ = true; + break; + } + } + if (!is_dynamic_input_) { + is_mini_pad = false; + } + return true; +} + +bool YOLOv5Face::Preprocess( + Mat* mat, FDTensor* output, + std::map>* im_info) { + // process after image load + float ratio = std::min(size[1] * 1.0f / static_cast(mat->Height()), + size[0] * 1.0f / static_cast(mat->Width())); + if (ratio != 1.0) { // always true + int interp = cv::INTER_AREA; + if (ratio > 1.0) { + interp = cv::INTER_LINEAR; + } + int resize_h = int(round(static_cast(mat->Height()) * ratio)); + int resize_w = int(round(static_cast(mat->Width()) * ratio)); + Resize::Run(mat, resize_w, resize_h, -1, -1, interp); + } + // yolov5face's preprocess steps + // 1. letterbox + // 2. BGR->RGB + // 3. HWC->CHW + LetterBox(mat, size, padding_value, is_mini_pad, is_no_pad, is_scale_up, + stride); + BGR2RGB::Run(mat); + // Normalize::Run(mat, std::vector(mat->Channels(), 0.0), + // std::vector(mat->Channels(), 1.0)); + // Compute `result = mat * alpha + beta` directly by channel + std::vector alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f}; + std::vector beta = {0.0f, 0.0f, 0.0f}; + Convert::Run(mat, alpha, beta); + + // Record output shape of preprocessed image + (*im_info)["output_shape"] = {static_cast(mat->Height()), + static_cast(mat->Width())}; + + HWC2CHW::Run(mat); + Cast::Run(mat, "float"); + mat->ShareWithTensor(output); + output->shape.insert(output->shape.begin(), 1); // reshape to n, h, w, c + return true; +} + +bool YOLOv5Face::Postprocess( + FDTensor& infer_result, FaceDetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold) { + // infer_result: (1,n,16) 16=4+1+10+1 + FDASSERT(infer_result.shape[0] == 1, "Only support batch =1 now."); + result->Clear(); + // must be setup landmarks_per_face before reserve + result->landmarks_per_face = landmarks_per_face; + result->Reserve(infer_result.shape[1]); + if (infer_result.dtype != FDDataType::FP32) { + FDERROR << "Only support post process with float32 data." << std::endl; + return false; + } + float* data = static_cast(infer_result.Data()); + for (size_t i = 0; i < infer_result.shape[1]; ++i) { + float* reg_cls_ptr = data + (i * infer_result.shape[2]); + float obj_conf = reg_cls_ptr[4]; + float cls_conf = reg_cls_ptr[15]; + float confidence = obj_conf * cls_conf; + // filter boxes by conf_threshold + if (confidence <= conf_threshold) { + continue; + } + float x = reg_cls_ptr[0]; + float y = reg_cls_ptr[1]; + float w = reg_cls_ptr[2]; + float h = reg_cls_ptr[3]; + + // convert from [x, y, w, h] to [x1, y1, x2, y2] + result->boxes.emplace_back(std::array{ + (x - w / 2.f), (y - h / 2.f), (x + w / 2.f), (y + h / 2.f)}); + result->scores.push_back(confidence); + // decode landmarks (default 5 landmarks) + if (landmarks_per_face > 0) { + float* landmarks_ptr = reg_cls_ptr + 5; + for (size_t j = 0; j < landmarks_per_face * 2; j += 2) { + result->landmarks.emplace_back( + std::array{landmarks_ptr[j], landmarks_ptr[j + 1]}); + } + } + } + + if (result->boxes.size() == 0) { + return true; + } + + utils::NMS(result, nms_iou_threshold); + + // scale the boxes to the origin image shape + auto iter_out = im_info.find("output_shape"); + auto iter_ipt = im_info.find("input_shape"); + FDASSERT(iter_out != im_info.end() && iter_ipt != im_info.end(), + "Cannot find input_shape or output_shape from im_info."); + float out_h = iter_out->second[0]; + float out_w = iter_out->second[1]; + float ipt_h = iter_ipt->second[0]; + float ipt_w = iter_ipt->second[1]; + float scale = std::min(out_h / ipt_h, out_w / ipt_w); + float pad_h = (out_h - ipt_h * scale) / 2.f; + float pad_w = (out_w - ipt_w * scale) / 2.f; + if (is_mini_pad) { + pad_h = static_cast(static_cast(pad_h) % stride); + pad_w = static_cast(static_cast(pad_w) % stride); + } + // scale and clip box + for (size_t i = 0; i < result->boxes.size(); ++i) { + result->boxes[i][0] = std::max((result->boxes[i][0] - pad_w) / scale, 0.0f); + result->boxes[i][1] = std::max((result->boxes[i][1] - pad_h) / scale, 0.0f); + result->boxes[i][2] = std::max((result->boxes[i][2] - pad_w) / scale, 0.0f); + result->boxes[i][3] = std::max((result->boxes[i][3] - pad_h) / scale, 0.0f); + result->boxes[i][0] = std::min(result->boxes[i][0], ipt_w - 1.0f); + result->boxes[i][1] = std::min(result->boxes[i][1], ipt_h - 1.0f); + result->boxes[i][2] = std::min(result->boxes[i][2], ipt_w - 1.0f); + result->boxes[i][3] = std::min(result->boxes[i][3], ipt_h - 1.0f); + } + // scale and clip landmarks + for (size_t i = 0; i < result->landmarks.size(); ++i) { + result->landmarks[i][0] = + std::max((result->landmarks[i][0] - pad_w) / scale, 0.0f); + result->landmarks[i][1] = + std::max((result->landmarks[i][1] - pad_h) / scale, 0.0f); + result->landmarks[i][0] = std::min(result->landmarks[i][0], ipt_w - 1.0f); + result->landmarks[i][1] = std::min(result->landmarks[i][1], ipt_h - 1.0f); + } + return true; +} + +bool YOLOv5Face::Predict(cv::Mat* im, FaceDetectionResult* result, + float conf_threshold, float nms_iou_threshold) { +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_START(0) +#endif + + Mat mat(*im); + std::vector input_tensors(1); + + std::map> im_info; + + // Record the shape of image and the shape of preprocessed image + im_info["input_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + im_info["output_shape"] = {static_cast(mat.Height()), + static_cast(mat.Width())}; + + if (!Preprocess(&mat, &input_tensors[0], &im_info)) { + FDERROR << "Failed to preprocess input image." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(0, "Preprocess") + TIMERECORD_START(1) +#endif + + input_tensors[0].name = InputInfoOfRuntime(0).name; + std::vector output_tensors; + if (!Infer(input_tensors, &output_tensors)) { + FDERROR << "Failed to inference." << std::endl; + return false; + } +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(1, "Inference") + TIMERECORD_START(2) +#endif + + if (!Postprocess(output_tensors[0], result, im_info, conf_threshold, + nms_iou_threshold)) { + FDERROR << "Failed to post process." << std::endl; + return false; + } + +#ifdef FASTDEPLOY_DEBUG + TIMERECORD_END(2, "Postprocess") +#endif + return true; +} + +} // namespace deepcam +} // namespace vision +} // namespace fastdeploy \ No newline at end of file diff --git a/fastdeploy/vision/deepcam/yolov5face.h b/fastdeploy/vision/deepcam/yolov5face.h new file mode 100644 index 0000000000..74a6f9c699 --- /dev/null +++ b/fastdeploy/vision/deepcam/yolov5face.h @@ -0,0 +1,97 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "fastdeploy/fastdeploy_model.h" +#include "fastdeploy/vision/common/processors/transform.h" +#include "fastdeploy/vision/common/result.h" + +namespace fastdeploy { + +namespace vision { + +namespace deepcam { + +class FASTDEPLOY_DECL YOLOv5Face : public FastDeployModel { + public: + // 当model_format为ONNX时,无需指定params_file + // 当model_format为Paddle时,则需同时指定model_file & params_file + YOLOv5Face(const std::string& model_file, const std::string& params_file = "", + const RuntimeOption& custom_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX); + + // 定义模型的名称 + std::string ModelName() const { return "deepcam-cn/yolov5-face"; } + + // 模型预测接口,即用户调用的接口 + // im 为用户的输入数据,目前对于CV均定义为cv::Mat + // result 为模型预测的输出结构体 + // conf_threshold 为后处理的参数 + // nms_iou_threshold 为后处理的参数 + virtual bool Predict(cv::Mat* im, FaceDetectionResult* result, + float conf_threshold = 0.25, + float nms_iou_threshold = 0.5); + + // 以下为模型在预测时的一些参数,基本是前后处理所需 + // 用户在创建模型后,可根据模型的要求,以及自己的需求 + // 对参数进行修改 + // tuple of (width, height) + std::vector size; + // padding value, size should be same with Channels + std::vector padding_value; + // only pad to the minimum rectange which height and width is times of stride + bool is_mini_pad; + // while is_mini_pad = false and is_no_pad = true, will resize the image to + // the set size + bool is_no_pad; + // if is_scale_up is false, the input image only can be zoom out, the maximum + // resize scale cannot exceed 1.0 + bool is_scale_up; + // padding stride, for is_mini_pad + int stride; + // setup the number of landmarks for per face (if have), default 5 in + // official yolov5face note that, the outupt tensor's shape must be: + // (1,n,4+1+2*landmarks_per_face+1=box+obj+landmarks+cls) + int landmarks_per_face; + + private: + // 初始化函数,包括初始化后端,以及其它模型推理需要涉及的操作 + bool Initialize(); + + // 输入图像预处理操作 + // Mat为FastDeploy定义的数据结构 + // FDTensor为预处理后的Tensor数据,传给后端进行推理 + // im_info为预处理过程保存的数据,在后处理中需要用到 + bool Preprocess(Mat* mat, FDTensor* outputs, + std::map>* im_info); + + // 后端推理结果后处理,输出给用户 + // infer_result 为后端推理后的输出Tensor + // result 为模型预测的结果 + // im_info 为预处理记录的信息,后处理用于还原box + // conf_threshold 后处理时过滤box的置信度阈值 + // nms_iou_threshold 后处理时NMS设定的iou阈值 + bool Postprocess(FDTensor& infer_result, FaceDetectionResult* result, + const std::map>& im_info, + float conf_threshold, float nms_iou_threshold); + + // 查看输入是否为动态维度的 不建议直接使用 不同模型的逻辑可能不一致 + bool IsDynamicInput() const { return is_dynamic_input_; } + + bool is_dynamic_input_; +}; + +} // namespace deepcam +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/utils/nms.cc b/fastdeploy/vision/utils/nms.cc index d0cd1d59ef..900acf84da 100644 --- a/fastdeploy/vision/utils/nms.cc +++ b/fastdeploy/vision/utils/nms.cc @@ -66,6 +66,62 @@ void NMS(DetectionResult* result, float iou_threshold) { } } +void NMS(FaceDetectionResult* result, float iou_threshold) { + utils::SortDetectionResult(result); + + std::vector area_of_boxes(result->boxes.size()); + std::vector suppressed(result->boxes.size(), 0); + for (size_t i = 0; i < result->boxes.size(); ++i) { + area_of_boxes[i] = (result->boxes[i][2] - result->boxes[i][0]) * + (result->boxes[i][3] - result->boxes[i][1]); + } + + for (size_t i = 0; i < result->boxes.size(); ++i) { + if (suppressed[i] == 1) { + continue; + } + for (size_t j = i + 1; j < result->boxes.size(); ++j) { + if (suppressed[j] == 1) { + continue; + } + float xmin = std::max(result->boxes[i][0], result->boxes[j][0]); + float ymin = std::max(result->boxes[i][1], result->boxes[j][1]); + float xmax = std::min(result->boxes[i][2], result->boxes[j][2]); + float ymax = std::min(result->boxes[i][3], result->boxes[j][3]); + float overlap_w = std::max(0.0f, xmax - xmin); + float overlap_h = std::max(0.0f, ymax - ymin); + float overlap_area = overlap_w * overlap_h; + float overlap_ratio = + overlap_area / (area_of_boxes[i] + area_of_boxes[j] - overlap_area); + if (overlap_ratio > iou_threshold) { + suppressed[j] = 1; + } + } + } + FaceDetectionResult backup(*result); + int landmarks_per_face = result->landmarks_per_face; + + result->Clear(); + // don't forget to reset the landmarks_per_face + // before apply Reserve method. + result->landmarks_per_face = landmarks_per_face; + result->Reserve(suppressed.size()); + for (size_t i = 0; i < suppressed.size(); ++i) { + if (suppressed[i] == 1) { + continue; + } + result->boxes.emplace_back(backup.boxes[i]); + result->scores.push_back(backup.scores[i]); + // landmarks (if have) + if (result->landmarks_per_face > 0) { + for (size_t j = 0; j < result->landmarks_per_face; ++j) { + result->landmarks.emplace_back( + backup.landmarks[i * result->landmarks_per_face + j]); + } + } + } +} + } // namespace utils } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/utils/sort_face_det_res.cc b/fastdeploy/vision/utils/sort_face_det_res.cc new file mode 100644 index 0000000000..34150f9ace --- /dev/null +++ b/fastdeploy/vision/utils/sort_face_det_res.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision/utils/utils.h" + +namespace fastdeploy { +namespace vision { +namespace utils { + +void SortDetectionResult(FaceDetectionResult* result) { + // sort face detection results with landmarks or not. + if (result->boxes.size() == 0) { + return; + } + int landmarks_per_face = result->landmarks_per_face; + if (landmarks_per_face > 0) { + FDASSERT( + (result->landmarks.size() == result->boxes.size() * landmarks_per_face), + "The size of landmarks != boxes.size * landmarks_per_face."); + } + + // argsort for scores. + std::vector indices; + indices.resize(result->boxes.size()); + for (size_t i = 0; i < result->boxes.size(); ++i) { + indices[i] = i; + } + std::vector& scores = result->scores; + std::sort(indices.begin(), indices.end(), + [&scores](size_t a, size_t b) { return scores[a] > scores[b]; }); + + // reorder boxes, scores, landmarks (if have). + FaceDetectionResult backup(*result); + result->Clear(); + // don't forget to reset the landmarks_per_face + // before apply Reserve method. + result->landmarks_per_face = landmarks_per_face; + result->Reserve(indices.size()); + if (landmarks_per_face > 0) { + for (size_t i = 0; i < indices.size(); ++i) { + result->boxes.emplace_back(backup.boxes[indices[i]]); + result->scores.push_back(backup.scores[indices[i]]); + for (size_t j = 0; j < landmarks_per_face; ++j) { + result->landmarks.emplace_back( + backup.landmarks[indices[i] * landmarks_per_face + j]); + } + } + } else { + for (size_t i = 0; i < indices.size(); ++i) { + result->boxes.emplace_back(backup.boxes[indices[i]]); + result->scores.push_back(backup.scores[indices[i]]); + } + } +} + +} // namespace utils +} // namespace vision +} // namespace fastdeploy diff --git a/fastdeploy/vision/utils/utils.h b/fastdeploy/vision/utils/utils.h index 79ece458c8..e95e7e10b5 100644 --- a/fastdeploy/vision/utils/utils.h +++ b/fastdeploy/vision/utils/utils.h @@ -53,9 +53,13 @@ std::vector TopKIndices(const T* array, int array_size, int topk) { void NMS(DetectionResult* output, float iou_threshold = 0.5); +void NMS(FaceDetectionResult* result, float iou_threshold = 0.5); + // MergeSort void SortDetectionResult(DetectionResult* output); +void SortDetectionResult(FaceDetectionResult* result); + } // namespace utils } // namespace vision } // namespace fastdeploy diff --git a/fastdeploy/vision/vision_pybind.cc b/fastdeploy/vision/vision_pybind.cc index 0334303ce6..09b072090e 100644 --- a/fastdeploy/vision/vision_pybind.cc +++ b/fastdeploy/vision/vision_pybind.cc @@ -22,6 +22,7 @@ void BindWongkinyiu(pybind11::module& m); void BindUltralytics(pybind11::module& m); void BindMeituan(pybind11::module& m); void BindMegvii(pybind11::module& m); +void BindDeepCam(pybind11::module& m); #ifdef ENABLE_VISION_VISUALIZE void BindVisualize(pybind11::module& m); #endif @@ -42,12 +43,23 @@ void BindVision(pybind11::module& m) { .def("__repr__", &vision::DetectionResult::Str) .def("__str__", &vision::DetectionResult::Str); + pybind11::class_(m, "FaceDetectionResult") + .def(pybind11::init()) + .def_readwrite("boxes", &vision::FaceDetectionResult::boxes) + .def_readwrite("scores", &vision::FaceDetectionResult::scores) + .def_readwrite("landmarks", &vision::FaceDetectionResult::landmarks) + .def_readwrite("landmarks_per_face", + &vision::FaceDetectionResult::landmarks_per_face) + .def("__repr__", &vision::FaceDetectionResult::Str) + .def("__str__", &vision::FaceDetectionResult::Str); + BindPPCls(m); BindPPDet(m); BindUltralytics(m); BindWongkinyiu(m); BindMeituan(m); BindMegvii(m); + BindDeepCam(m); #ifdef ENABLE_VISION_VISUALIZE BindVisualize(m); #endif diff --git a/fastdeploy/vision/visualize/__init__.py b/fastdeploy/vision/visualize/__init__.py index 384ec2768f..fc0ae706d8 100644 --- a/fastdeploy/vision/visualize/__init__.py +++ b/fastdeploy/vision/visualize/__init__.py @@ -19,3 +19,8 @@ def vis_detection(im_data, det_result, line_size=1, font_size=0.5): C.vision.Visualize.vis_detection(im_data, det_result, line_size, font_size) + + +def vis_face_detection(im_data, face_det_result, line_size=1, font_size=0.5): + C.vision.Visualize.vis_face_detection(im_data, face_det_result, line_size, + font_size) diff --git a/fastdeploy/vision/visualize/face_detection.cc b/fastdeploy/vision/visualize/face_detection.cc new file mode 100644 index 0000000000..8a95a1ad7e --- /dev/null +++ b/fastdeploy/vision/visualize/face_detection.cc @@ -0,0 +1,81 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifdef ENABLE_VISION_VISUALIZE + +#include "fastdeploy/vision/visualize/visualize.h" +#include "opencv2/imgproc/imgproc.hpp" + +namespace fastdeploy { + +namespace vision { + +// Default only support visualize num_classes <= 1000 +// If need to visualize num_classes > 1000 +// Please call Visualize::GetColorMap(num_classes) first +void Visualize::VisFaceDetection(cv::Mat* im, const FaceDetectionResult& result, + int line_size, float font_size) { + auto color_map = GetColorMap(); + int h = im->rows; + int w = im->cols; + + bool vis_landmarks = false; + if ((result.landmarks_per_face > 0) && + (result.boxes.size() * result.landmarks_per_face == + result.landmarks.size())) { + vis_landmarks = true; + } + for (size_t i = 0; i < result.boxes.size(); ++i) { + cv::Rect rect(result.boxes[i][0], result.boxes[i][1], + result.boxes[i][2] - result.boxes[i][0], + result.boxes[i][3] - result.boxes[i][1]); + int color_id = i % 333; + int c0 = color_map[3 * color_id + 0]; + int c1 = color_map[3 * color_id + 1]; + int c2 = color_map[3 * color_id + 2]; + cv::Scalar rect_color = cv::Scalar(c0, c1, c2); + std::string text = std::to_string(result.scores[i]); + if (text.size() > 4) { + text = text.substr(0, 4); + } + int font = cv::FONT_HERSHEY_SIMPLEX; + cv::Size text_size = cv::getTextSize(text, font, font_size, 1, nullptr); + cv::Point origin; + origin.x = rect.x; + origin.y = rect.y; + cv::Rect text_background = + cv::Rect(result.boxes[i][0], result.boxes[i][1] - text_size.height, + text_size.width, text_size.height); + cv::rectangle(*im, rect, rect_color, line_size); + cv::putText(*im, text, origin, font, font_size, cv::Scalar(255, 255, 255), + 1); + // vis landmarks (if have) + if (vis_landmarks) { + cv::Scalar landmark_color = rect_color; + for (size_t j = 0; j < result.landmarks_per_face; ++j) { + cv::Point landmark; + landmark.x = static_cast( + result.landmarks[i * result.landmarks_per_face + j][0]); + landmark.y = static_cast( + result.landmarks[i * result.landmarks_per_face + j][1]); + cv::circle(*im, landmark, line_size, landmark_color, -1); + } + } + } +} + +} // namespace vision +} // namespace fastdeploy + +#endif \ No newline at end of file diff --git a/fastdeploy/vision/visualize/visualize.h b/fastdeploy/vision/visualize/visualize.h index 6fffa521a6..034fe6cc43 100644 --- a/fastdeploy/vision/visualize/visualize.h +++ b/fastdeploy/vision/visualize/visualize.h @@ -27,8 +27,10 @@ class FASTDEPLOY_DECL Visualize { static const std::vector& GetColorMap(int num_classes = 1000); static void VisDetection(cv::Mat* im, const DetectionResult& result, int line_size = 2, float font_size = 0.5f); + static void VisFaceDetection(cv::Mat* im, const FaceDetectionResult& result, + int line_size = 2, float font_size = 0.5f); }; -} // namespace vision -} // namespace fastdeploy +} // namespace vision +} // namespace fastdeploy #endif diff --git a/fastdeploy/vision/visualize/visualize_pybind.cc b/fastdeploy/vision/visualize/visualize_pybind.cc index 66ffc74f9f..853b64ddcd 100644 --- a/fastdeploy/vision/visualize/visualize_pybind.cc +++ b/fastdeploy/vision/visualize/visualize_pybind.cc @@ -18,11 +18,18 @@ namespace fastdeploy { void BindVisualize(pybind11::module& m) { pybind11::class_(m, "Visualize") .def(pybind11::init<>()) - .def_static("vis_detection", [](pybind11::array& im_data, - vision::DetectionResult& result, - int line_size, float font_size) { + .def_static("vis_detection", + [](pybind11::array& im_data, vision::DetectionResult& result, + int line_size, float font_size) { + auto im = PyArrayToCvMat(im_data); + vision::Visualize::VisDetection(&im, result, line_size, + font_size); + }) + .def_static("vis_face_detection", [](pybind11::array& im_data, + vision::FaceDetectionResult& result, + int line_size, float font_size) { auto im = PyArrayToCvMat(im_data); - vision::Visualize::VisDetection(&im, result, line_size, font_size); + vision::Visualize::VisFaceDetection(&im, result, line_size, font_size); }); } -} // namespace fastdeploy +} // namespace fastdeploy diff --git a/model_zoo/vision/yolov5face/README.md b/model_zoo/vision/yolov5face/README.md new file mode 100644 index 0000000000..e1713e67d7 --- /dev/null +++ b/model_zoo/vision/yolov5face/README.md @@ -0,0 +1,78 @@ +# YOLOv5Face部署示例 + +当前支持模型版本为:[YOLOv5Face CommitID:4fd1ead](https://github.com/deepcam-cn/yolov5-face/commit/4fd1ead) + +本文档说明如何进行[YOLOv5Face](https://github.com/deepcam-cn/yolov5-face)的快速部署推理。本目录结构如下 + +``` +. +├── cpp # C++ 代码目录 +│   ├── CMakeLists.txt # C++ 代码编译CMakeLists文件 +│   ├── README.md # C++ 代码编译部署文档 +│   └── yolov5face.cc # C++ 示例代码 +├── api.md # API 说明文档 +├── README.md # YOLOv5Face 部署文档 +└── yolov5face.py # Python示例代码 +``` + +## 获取ONNX文件 + +访问[YOLOv5Face](https://github.com/deepcam-cn/yolov5-face)官方github库,按照指引下载安装,下载`yolov5s-face.pt` 模型,利用 `export.py` 得到`onnx`格式文件。 + +* 下载yolov5face模型文件 + ``` + Link: https://pan.baidu.com/s/1fyzLxZYx7Ja1_PCIWRhxbw Link: eq0q + https://drive.google.com/file/d/1zxaHeLDyID9YU4-hqK7KNepXIwbTkRIO/view?usp=sharing + ``` + +* 导出onnx格式文件 + ```bash + PYTHONPATH=. python export.py --weights weights/yolov5s-face.pt --img_size 640 640 --batch_size 1 + ``` +* onnx模型简化(可选) + ```bash + onnxsim yolov5s-face.onnx yolov5s-face.onnx + ``` +* 移动onnx文件到model_zoo/yolov5face的目录 + ```bash + cp PATH/TO/yolov5s-face.onnx PATH/TO/model_zoo/vision/yolov5face/ + ``` + + + +## 准备测试图片 +准备一张包含人脸的测试图片,命名为test.jpg,并拷贝到可执行文件所在的目录 + +## 安装FastDeploy + +使用如下命令安装FastDeploy,注意到此处安装的是`vision-cpu`,也可根据需求安装`vision-gpu` +```bash +# 安装fastdeploy-python工具 +pip install fastdeploy-python + +# 安装vision-cpu模块 +fastdeploy install vision-cpu +``` + +## Python部署 + +执行如下代码即会自动下载YOLOv5Face模型和测试图片 +```bash +python yolov5face.py +``` + +执行完成后会将可视化结果保存在本地`vis_result.jpg`,同时输出检测结果如下 +``` +FaceDetectionResult: [xmin, ymin, xmax, ymax, score, (x, y) x 5] +749.575256,375.122162, 775.008850, 407.858215, 0.851824, (756.933838,388.423157), (767.810974,387.932922), (762.617065,394.212341), (758.053101,399.073639), (767.370300,398.769470) +897.833862,380.372864, 924.725281, 409.566803, 0.847505, (903.757202,390.221741), (914.575867,389.495911), (908.998901,395.983307), (905.803223,400.871429), (914.674438,400.268066) +281.558197,367.739349, 305.474701, 397.860535, 0.840915, (287.018768,379.771088), (297.285004,378.755280), (292.057831,385.207367), (289.110962,390.010437), (297.535339,389.412048) +132.922104,368.507263, 159.098541, 402.777283, 0.840232, (140.632492,382.361633), (151.900864,380.966156), (146.869186,388.505066), (141.930420,393.724670), (151.734604,392.808197) +699.379700,306.743256, 723.219421, 336.533295, 0.840228, (705.688843,319.133301), (715.784668,318.449524), (711.107300,324.416016), (707.236633,328.671936), (716.088623,328.151794) +# ... +``` + +## 其它文档 + +- [C++部署](./cpp/README.md) +- [YOLOv5Face API文档](./api.md) diff --git a/model_zoo/vision/yolov5face/api.md b/model_zoo/vision/yolov5face/api.md new file mode 100644 index 0000000000..384ef23d31 --- /dev/null +++ b/model_zoo/vision/yolov5face/api.md @@ -0,0 +1,71 @@ +# YOLOv5Face API说明 + +## Python API + +### YOLOv5Face类 +``` +fastdeploy.vision.deepcam.YOLOv5Face(model_file, params_file=None, runtime_option=None, model_format=fd.Frontend.ONNX) +``` +YOLOv5Face模型加载和初始化,当model_format为`fd.Frontend.ONNX`时,只需提供model_file,如`yolov5s-face.onnx`;当model_format为`fd.Frontend.PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### predict函数 +> ``` +> YOLOv5Face.predict(image_data, conf_threshold=0.25, nms_iou_threshold=0.5) +> ``` +> 模型预测结口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **image_data**(np.ndarray): 输入数据,注意需为HWC,BGR格式 +> > * **conf_threshold**(float): 检测框置信度过滤阈值 +> > * **nms_iou_threshold**(float): NMS处理过程中iou阈值 + +示例代码参考[yolov5face.py](./yolov5face.py) + + +## C++ API + +### YOLOv5Face类 +``` +fastdeploy::vision::deepcam::YOLOv5Face( + const string& model_file, + const string& params_file = "", + const RuntimeOption& runtime_option = RuntimeOption(), + const Frontend& model_format = Frontend::ONNX) +``` +YOLOv5Face模型加载和初始化,当model_format为`Frontend::ONNX`时,只需提供model_file,如`yolov5s-face.onnx`;当model_format为`Frontend::PADDLE`时,则需同时提供model_file和params_file。 + +**参数** + +> * **model_file**(str): 模型文件路径 +> * **params_file**(str): 参数文件路径 +> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置 +> * **model_format**(Frontend): 模型格式 + +#### Predict函数 +> ``` +> YOLOv5Face::Predict(cv::Mat* im, DetectionResult* result, +> float conf_threshold = 0.25, +> float nms_iou_threshold = 0.5) +> ``` +> 模型预测接口,输入图像直接输出检测结果。 +> +> **参数** +> +> > * **im**: 输入图像,注意需为HWC,BGR格式 +> > * **result**: 检测结果,包括检测框,各个框的置信度 +> > * **conf_threshold**: 检测框置信度过滤阈值 +> > * **nms_iou_threshold**: NMS处理过程中iou阈值 + +示例代码参考[cpp/yolov5face.cc](cpp/yolov5face.cc) + +## 其它API使用 + +- [模型部署RuntimeOption配置](../../../docs/api/runtime_option.md) diff --git a/model_zoo/vision/yolov5face/cpp/CMakeLists.txt b/model_zoo/vision/yolov5face/cpp/CMakeLists.txt new file mode 100644 index 0000000000..23878ac2c9 --- /dev/null +++ b/model_zoo/vision/yolov5face/cpp/CMakeLists.txt @@ -0,0 +1,17 @@ +PROJECT(yolov5face_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.16) + +# 在低版本ABI环境中,通过如下代码进行兼容性编译 +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +# 指定下载解压后的fastdeploy库路径 +set(FASTDEPLOY_INSTALL_DIR ${PROJECT_SOURCE_DIR}/fastdeploy-linux-x64-0.3.0/) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(yolov5face_demo ${PROJECT_SOURCE_DIR}/yolov5face.cc) +# 添加FastDeploy库依赖 +target_link_libraries(yolov5face_demo ${FASTDEPLOY_LIBS}) diff --git a/model_zoo/vision/yolov5face/cpp/README.md b/model_zoo/vision/yolov5face/cpp/README.md new file mode 100644 index 0000000000..4f5788458f --- /dev/null +++ b/model_zoo/vision/yolov5face/cpp/README.md @@ -0,0 +1,60 @@ +# 编译YOLOv5Face示例 + +当前支持模型版本为:[YOLOv5Face CommitID:4fd1ead](https://github.com/deepcam-cn/yolov5-face/commit/4fd1ead) + +## 下载和解压预测库 +```bash +wget https://bj.bcebos.com/paddle2onnx/fastdeploy/fastdeploy-linux-x64-0.0.3.tgz +tar xvf fastdeploy-linux-x64-0.0.3.tgz +``` + +## 编译示例代码 +```bash +mkdir build & cd build +cmake .. +make -j +``` + +## 获取ONNX文件 + +访问[YOLOv5Face](https://github.com/deepcam-cn/yolov5-face)官方github库,按照指引下载安装,下载`yolov5s-face.pt` 模型,利用 `export.py` 得到`onnx`格式文件。 + +* 下载yolov5face模型文件 + ``` + Link: https://pan.baidu.com/s/1fyzLxZYx7Ja1_PCIWRhxbw Link: eq0q + https://drive.google.com/file/d/1zxaHeLDyID9YU4-hqK7KNepXIwbTkRIO/view?usp=sharing + ``` + +* 导出onnx格式文件 + ```bash + PYTHONPATH=. python export.py --weights weights/yolov5s-face.pt --img_size 640 640 --batch_size 1 + ``` +* onnx模型简化(可选) + ```bash + onnxsim yolov5s-face.onnx yolov5s-face.onnx + ``` +* 移动onnx文件到可执行文件的目录 + ```bash + cp PATH/TO/yolov5s-face.onnx PATH/TO/model_zoo/vision/yolov5face/cpp/build + ``` + + + +## 准备测试图片 +准备一张包含人脸的测试图片,命名为test.jpg,并拷贝到可执行文件所在的目录 + +## 执行 +```bash +./yolov5face_demo +``` + +执行完后可视化的结果保存在本地`vis_result.jpg`,同时会将检测框输出在终端,如下所示 +``` +aceDetectionResult: [xmin, ymin, xmax, ymax, score, (x, y) x 5] +749.575256,375.122162, 775.008850, 407.858215, 0.851824, (756.933838,388.423157), (767.810974,387.932922), (762.617065,394.212341), (758.053101,399.073639), (767.370300,398.769470) +897.833862,380.372864, 924.725281, 409.566803, 0.847505, (903.757202,390.221741), (914.575867,389.495911), (908.998901,395.983307), (905.803223,400.871429), (914.674438,400.268066) +281.558197,367.739349, 305.474701, 397.860535, 0.840915, (287.018768,379.771088), (297.285004,378.755280), (292.057831,385.207367), (289.110962,390.010437), (297.535339,389.412048) +132.922104,368.507263, 159.098541, 402.777283, 0.840232, (140.632492,382.361633), (151.900864,380.966156), (146.869186,388.505066), (141.930420,393.724670), (151.734604,392.808197) +699.379700,306.743256, 723.219421, 336.533295, 0.840228, (705.688843,319.133301), (715.784668,318.449524), (711.107300,324.416016), (707.236633,328.671936), (716.088623,328.151794) +# ... +``` diff --git a/model_zoo/vision/yolov5face/cpp/yolov5face.cc b/model_zoo/vision/yolov5face/cpp/yolov5face.cc new file mode 100644 index 0000000000..baa0bb7c0f --- /dev/null +++ b/model_zoo/vision/yolov5face/cpp/yolov5face.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fastdeploy/vision.h" + +int main() { + namespace vis = fastdeploy::vision; + auto model = vis::deepcam::YOLOv5Face("yolov5s-face.onnx"); + if (!model.Initialized()) { + std::cerr << "Init Failed." << std::endl; + return -1; + } + cv::Mat im = cv::imread("test.jpg"); + cv::Mat vis_im = im.clone(); + + vis::FaceDetectionResult res; + if (!model.Predict(&im, &res, 0.1f, 0.3f)) { + std::cerr << "Prediction Failed." << std::endl; + return -1; + } + + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + vis::Visualize::VisFaceDetection(&vis_im, res, 2, 0.3f); + cv::imwrite("vis_result.jpg", vis_im); + return 0; +} diff --git a/model_zoo/vision/yolov5face/yolov5face.py b/model_zoo/vision/yolov5face/yolov5face.py new file mode 100644 index 0000000000..ff7ab1b770 --- /dev/null +++ b/model_zoo/vision/yolov5face/yolov5face.py @@ -0,0 +1,17 @@ +import fastdeploy as fd +import cv2 + +# 加载模型 +model = fd.vision.deepcam.YOLOv5Face("yolov5s-face.onnx") + +# 预测图片 +im = cv2.imread("test.jpg") +result = model.predict(im, conf_threshold=0.1, nms_iou_threshold=0.3) + +# 可视化结果 +fd.vision.visualize.vis_face_detection(im, result) +cv2.imwrite("vis_result.jpg", im) + +# 输出预测结果 +print(result) +print(model.runtime_option) From 0d3e929f885cae4f2947cf2a0cf7b1cf9a493313 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 25 Jul 2022 07:46:58 +0000 Subject: [PATCH 18/19] fixed examples/vision typos --- .gitignore | 2 ++ examples/vision/deepcam_yolov5face.cc | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 51f2f2ed80..967c01a0d9 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,5 @@ fastdeploy.egg-info fastdeploy/version.py fastdeploy/LICENSE* fastdeploy/ThirdPartyNotices* +*.so* +fastdeploy/libs/third_libs diff --git a/examples/vision/deepcam_yolov5face.cc b/examples/vision/deepcam_yolov5face.cc index 8f55740d6b..c6e0083e07 100644 --- a/examples/vision/deepcam_yolov5face.cc +++ b/examples/vision/deepcam_yolov5face.cc @@ -17,7 +17,7 @@ int main() { namespace vis = fastdeploy::vision; - std::string model_file = "../resources/models/yolov5s-face.onnx.onnx"; + std::string model_file = "../resources/models/yolov5s-face.onnx"; std::string img_path = "../resources/images/test_face_det.jpg"; std::string vis_path = "../resources/outputs/deepcam_yolov5face_vis_result.jpg"; From 2c0b4cfb7a5a6d3c7a5462f1c46200e46b4aadb6 Mon Sep 17 00:00:00 2001 From: DefTruth Date: Mon, 25 Jul 2022 11:53:16 +0000 Subject: [PATCH 19/19] fixed runtime_option print func bugs --- fastdeploy/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fastdeploy/__init__.py b/fastdeploy/__init__.py index 68006c1bed..948f988b8c 100644 --- a/fastdeploy/__init__.py +++ b/fastdeploy/__init__.py @@ -32,6 +32,8 @@ def RuntimeOptionStr(runtime_option): for attr in attrs: if attr.startswith("__"): continue + if hasattr(getattr(runtime_option, attr), "__call__"): + continue message += " {} : {}\t\n".format(attr, getattr(runtime_option, attr)) message.strip("\n") message += ")"