From 948070713ed7730123f84a0e140b410c3ddd1de9 Mon Sep 17 00:00:00 2001 From: neonhuang <87560459+neonhuang@users.noreply.github.com> Date: Mon, 24 Jan 2022 15:59:49 +0800 Subject: [PATCH] [LaneSeg] Fix a bug in cpp project (#1741) --- contrib/LaneSeg/README.md | 2 +- contrib/LaneSeg/README_CN.md | 2 +- contrib/LaneSeg/deploy/cpp/CMakeLists.txt | 8 +--- contrib/LaneSeg/deploy/cpp/README.md | 46 ++++++++++++++++++++++ contrib/LaneSeg/deploy/cpp/README_CN.md | 44 +++++++++++++++++++++ contrib/LaneSeg/deploy/cpp/run_seg_cpu.sh | 35 ++++++++++++++++ contrib/LaneSeg/deploy/cpp/run_seg_gpu.sh | 34 ++++++++++++++++ contrib/LaneSeg/deploy/cpp/src/test_seg.cc | 39 ++++++++++++------ 8 files changed, 191 insertions(+), 19 deletions(-) create mode 100644 contrib/LaneSeg/deploy/cpp/README.md create mode 100644 contrib/LaneSeg/deploy/cpp/README_CN.md create mode 100755 contrib/LaneSeg/deploy/cpp/run_seg_cpu.sh create mode 100755 contrib/LaneSeg/deploy/cpp/run_seg_gpu.sh diff --git a/contrib/LaneSeg/README.md b/contrib/LaneSeg/README.md index d8d8949c3a..17ea626d35 100644 --- a/contrib/LaneSeg/README.md +++ b/contrib/LaneSeg/README.md @@ -227,6 +227,6 @@ python deploy/python/infer.py --help ``` #### Paddle Inference(C++) -reference [Paddle Inference tutorial](../../deploy/cpp/) +reference [Paddle Inference tutorial](./deploy/cpp/README.md) the C++ sources files of the project is in LaneSeg/deploy/cpp diff --git a/contrib/LaneSeg/README_CN.md b/contrib/LaneSeg/README_CN.md index b02ac9dc8a..8ce98ceac2 100644 --- a/contrib/LaneSeg/README_CN.md +++ b/contrib/LaneSeg/README_CN.md @@ -230,6 +230,6 @@ python deploy/python/infer.py --help ``` #### Paddle Inference部署(C++) -参见[Paddle Inference部署教程](../../deploy/cpp/) +参见[Paddle Inference部署教程](./deploy/cpp/README_cn.md) 本项目使用的C++源文件在LaneSeg/deploy/cpp目录下 diff --git a/contrib/LaneSeg/deploy/cpp/CMakeLists.txt b/contrib/LaneSeg/deploy/cpp/CMakeLists.txt index f148d9d0bb..025581027b 100644 --- a/contrib/LaneSeg/deploy/cpp/CMakeLists.txt +++ b/contrib/LaneSeg/deploy/cpp/CMakeLists.txt @@ -1,16 +1,12 @@ cmake_minimum_required(VERSION 3.0) project(cpp_inference_demo CXX C) -option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." OFF) +option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) option(USE_TENSORRT "Compile demo with TensorRT." OFF) option(WITH_ROCM "Compile demo with rocm." OFF) - -set(PADDLE_LIB ${CMAKE_SOURCE_DIR}/paddle) -set(DEMO_NAME test_seg) - if(NOT WITH_STATIC_LIB) add_definitions("-DPADDLE_WITH_SHARED_LIB") else() @@ -150,7 +146,7 @@ else() endif() if (NOT WIN32) - set(EXTERNAL_LIB "-ldl -lpthread") + set(EXTERNAL_LIB "-lrt -ldl -lpthread") set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf xxhash cryptopp diff --git a/contrib/LaneSeg/deploy/cpp/README.md b/contrib/LaneSeg/deploy/cpp/README.md new file mode 100644 index 0000000000..fc78a9c92a --- /dev/null +++ b/contrib/LaneSeg/deploy/cpp/README.md @@ -0,0 +1,46 @@ +English | [简体中文](README_CN.md) + +## Deploy the PaddleSeg model using Paddle Inference C++ + + +### 1、Install + +- Paddle Inference C++ + +- OpenCV + +- Yaml + + More install informations,please refer to [Tutorial](../../../../docs/deployment/inference/cpp_inference.md)。 + +### 2、Models and Pictures + + - Downdload model + + Enter to `LaneSeg/` directory, and execute commands as follows: +```shell + mkdir output # if not exists + wget -P output https://paddleseg.bj.bcebos.com/lane_seg/bisenet/model.pdparams +``` + - Export Model + +```shell + python export.py \ + --config configs/bisenetV2_tusimple_640x368_300k.yml \ + --model_path output/model.pdparams \ + --save_dir output/export +``` + + - Using the image `data/test_images/3.jpg` + +### 3、Compile and execute + +Enter to the `LaneSeg/deploy/cpp` + +Execute `sh run_seg_cpu.sh`, it will compile and then perform prediction on X86 CPU. + +Execute `sh run_seg_gpu.sh`, it will compile and then perform prediction on Nvidia GPU. + +The result will be saved in the`out_img_seg.jpg` and `out_image_points.jpg` images + +- Note:For the path of the model and image, you can change the files `run_seg_cpu.sh` and `run_seg_gpu.sh` as needed diff --git a/contrib/LaneSeg/deploy/cpp/README_CN.md b/contrib/LaneSeg/deploy/cpp/README_CN.md new file mode 100644 index 0000000000..b92caad4e7 --- /dev/null +++ b/contrib/LaneSeg/deploy/cpp/README_CN.md @@ -0,0 +1,44 @@ +简体中文 | [English](README.md) + +## 使用Paddle Inference C++部署PaddleSeg模型 + +### 1、安装 + +- Paddle Inference C++ + +- OpenCV + +- Yaml + + 更多的安装信息,请参考[教程](../../../../docs/deployment/inference/cpp_inference_cn.md)。 + +### 2、模型和图片 + + - 下载模型 + + 进入`LaneSeg/`目录下,执行如下命令: +```shell + mkdir output # if not exists + wget -P output https://paddleseg.bj.bcebos.com/lane_seg/bisenet/model.pdparams +``` + - 导出模型 +```shell + python export.py \ + --config configs/bisenetV2_tusimple_640x368_300k.yml \ + --model_path output/model.pdparams \ + --save_dir output/export +``` + + - 图片使用 `data/test_images/3.jpg` + +### 3、编译、执行 + +进入目录`LaneSeg/deploy/cpp` + +执行`sh run_seg_cpu.sh`,会进行编译,然后在X86 CPU上执行预测。 + +执行`sh run_seg_gpu.sh`,会进行编译,然后在Nvidia GPU上执行预测。 + +结果会保存在当前目录的`out_img_seg.jpg`和`out_image_points.jpg`图片。 + +- 注意:对于模型和图片的路径,可以按需要对文件`run_seg_cpu.sh`和`run_seg_gpu.sh`进行修改。 diff --git a/contrib/LaneSeg/deploy/cpp/run_seg_cpu.sh b/contrib/LaneSeg/deploy/cpp/run_seg_cpu.sh new file mode 100755 index 0000000000..21cd95814b --- /dev/null +++ b/contrib/LaneSeg/deploy/cpp/run_seg_cpu.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set +x +set -e + +WITH_MKL=ON +WITH_GPU=OFF +USE_TENSORRT=OFF +DEMO_NAME=test_seg + +work_path=$(dirname $(readlink -f $0)) +LIB_DIR="${work_path}/paddle_inference" + +# compile +mkdir -p build +cd build +rm -rf * + +cmake .. \ + -DDEMO_NAME=${DEMO_NAME} \ + -DWITH_MKL=${WITH_MKL} \ + -DWITH_GPU=${WITH_GPU} \ + -DUSE_TENSORRT=${USE_TENSORRT} \ + -DWITH_STATIC_LIB=OFF \ + -DPADDLE_LIB=${LIB_DIR} + +make -j + +# run +cd .. +# change model_dir and img_path according to your needs +./build/test_seg \ + --model_dir=../../output/export/ \ + --img_path=../../data/test_images/3.jpg \ + --use_cpu=true \ + --use_mkldnn=true diff --git a/contrib/LaneSeg/deploy/cpp/run_seg_gpu.sh b/contrib/LaneSeg/deploy/cpp/run_seg_gpu.sh new file mode 100755 index 0000000000..9570e00854 --- /dev/null +++ b/contrib/LaneSeg/deploy/cpp/run_seg_gpu.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set +x +set -e + +WITH_MKL=ON +WITH_GPU=ON +USE_TENSORRT=OFF +DEMO_NAME=test_seg + +work_path=$(dirname $(readlink -f $0)) +LIB_DIR="${work_path}/paddle_inference" + +# compile +mkdir -p build +cd build +rm -rf * + +cmake .. \ + -DDEMO_NAME=${DEMO_NAME} \ + -DWITH_MKL=${WITH_MKL} \ + -DWITH_GPU=${WITH_GPU} \ + -DUSE_TENSORRT=${USE_TENSORRT} \ + -DWITH_STATIC_LIB=OFF \ + -DPADDLE_LIB=${LIB_DIR} + +make -j + +# run +cd .. +# change model_dir and img_path according to your needs +./build/test_seg \ + --model_dir=../../output/export/ \ + --img_path=../../data/test_images/3.jpg \ + --use_cpu=false diff --git a/contrib/LaneSeg/deploy/cpp/src/test_seg.cc b/contrib/LaneSeg/deploy/cpp/src/test_seg.cc index 1613ba030c..3fdb9323bc 100644 --- a/contrib/LaneSeg/deploy/cpp/src/test_seg.cc +++ b/contrib/LaneSeg/deploy/cpp/src/test_seg.cc @@ -35,7 +35,8 @@ DEFINE_string(model_dir, "", "Directory of the inference model. " "It constains deploy.yaml and infer models"); DEFINE_string(img_path, "", "Path of the test image."); DEFINE_bool(use_cpu, false, "Wether use CPU. Default: use GPU."); - +DEFINE_bool(use_trt, false, "Wether enable TensorRT when use GPU. Defualt: false."); +DEFINE_bool(use_mkldnn, false, "Wether enable MKLDNN when use CPU. Defualt: false."); DEFINE_string(save_dir, "", "Directory of the output image."); typedef struct YamlConfig { @@ -77,6 +78,22 @@ std::shared_ptr create_predictor( model_dir + "/" + yaml_config.params_file); infer_config.EnableMemoryOptim(); + if (FLAGS_use_cpu) { + LOG(INFO) << "Use CPU"; + if (FLAGS_use_mkldnn) { + // TODO(jc): fix the bug + //infer_config.EnableMKLDNN(); + infer_config.SetCpuMathLibraryNumThreads(5); + } + } else { + LOG(INFO) << "Use GPU"; + infer_config.EnableUseGpu(100, 0); + if (FLAGS_use_trt) { + infer_config.EnableTensorRtEngine(1 << 20, 1, 3, + paddle_infer::PrecisionType::kFloat32, false, false); + } + } + auto predictor = paddle_infer::CreatePredictor(infer_config); return predictor; } @@ -100,12 +117,13 @@ void process_image(const YamlConfig& yaml_config, cv::Mat& img) { } } + int main(int argc, char *argv[]) { google::ParseCommandLineFlags(&argc, &argv, true); if (FLAGS_model_dir == "") { - LOG(FATAL) << "The model_dir should not be empty."; + LOG(FATAL) << "The model_dir should not be empty."; } - + // Load yaml std::string yaml_path = FLAGS_model_dir + "/deploy.yaml"; YamlConfig yaml_config = load_yaml(yaml_path); @@ -144,13 +162,13 @@ int main(int argc, char *argv[]) { auto output_t = predictor->GetOutputHandle(output_names[0]); std::vector output_shape = output_t->shape(); int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, - std::multiplies()); + std::multiplies()); std::vector out_data(out_num); output_t->CopyToCpu(out_data.data()); - + cv::Size size = cv::Size(cols, rows); int skip_index = size.height * size.width; - + const int num_classes = 7; LanePostProcess* laneNet = new LanePostProcess(input_height, input_width, rows, cols, num_classes); auto lane_coords = laneNet->lane_process(out_data, cut_height); @@ -170,10 +188,9 @@ int main(int argc, char *argv[]) { } lane_id++; } - - cv::imshow("image lane", image_ori); - cv::waitKey(); - + + cv::imwrite("out_image_points.jpg", image_ori); + cv::Mat seg_planes[num_classes]; for(int i = 0; i < num_classes; i++) { seg_planes[i].create(size, CV_32FC(1)); @@ -200,7 +217,7 @@ int main(int argc, char *argv[]) { // Get pseudo image cv::Mat out_eq_img; cv::equalizeHist(binary_image, out_eq_img); - cv::imwrite("out_img.jpg", binary_image*255); + cv::imwrite("out_img_seg.jpg", binary_image*255); LOG(INFO) << "Finish"; }