-
Notifications
You must be signed in to change notification settings - Fork 10
/
model_infer.cpp
628 lines (566 loc) · 20.9 KB
/
model_infer.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gflags/gflags.h>
#include <string>
#include <vector>
#include "model_deploy/common/include/paddle_deploy.h"
#include "model_deploy/common/include/model_infer.h"
#include <windows.h> // GetCurrentThreadId()
/*
* 模型初始化/注册接口
*
* model_type: 初始化模型类型: det,seg,clas,paddlex
*
* model_filename: 模型文件路径
*
* params_filename: 参数文件路径
*
* cfg_file: 配置文件路径
*
* use_gpu: 是否使用GPU
*
* gpu_id: 指定第x号GPU
*
* paddlex_model_type: model_type为paddlx时,返回的实际paddlex模型的类型: det, seg, clas
*/
extern "C" __declspec(dllexport) PaddleDeploy::Model * InitModel(const char* model_type, const char* model_filename, const char* params_filename, const char* cfg_file, bool use_gpu, int gpu_id, char* paddlex_model_type)
{
// create model
PaddleDeploy::Model* model = PaddleDeploy::CreateModel(model_type); //FLAGS_model_type
// model init
model->Init(cfg_file);
// inference engine init
PaddleDeploy::PaddleEngineConfig engine_config;
engine_config.model_filename = model_filename;
engine_config.params_filename = params_filename;
engine_config.use_gpu = use_gpu;
engine_config.gpu_id = gpu_id;
bool init = model->PaddleEngineInit(engine_config);
if (!init)
{
LOGC("ERR", "init model failed");
}
else
{
LOGC("INFO", "init model successfully: use_gpu=%d, gpu_id=%d, model path=%s", use_gpu, gpu_id, model_filename);
}
// det, seg, clas, paddlex
if (strcmp(model_type, "paddlex") == 0) // 是paddlex模型,则返回具体支持的模型类型: det, seg, clas
{
// detector
if (model->yaml_config_["model_type"].as<std::string>() == std::string("detector"))
{
strcpy(paddlex_model_type, "det");
}
else if (model->yaml_config_["model_type"].as<std::string>() == std::string("segmenter"))
{
strcpy(paddlex_model_type, "seg");
}
else if (model->yaml_config_["model_type"].as<std::string>() == std::string("classifier"))
{
strcpy(paddlex_model_type, "clas");
}
}
return model;
}
// 初始化模型带tensorRT加速
// [suliang] 2021-12-15 增加5个输入参数:min_input_shape, max_input_shape, optim_input_shape分别代表输入尺寸的输入范围, precision代表计算精度(0=fp32,1=fp16,2=int8),min_subgraph_size代表最小优化子图
extern "C" __declspec(dllexport) PaddleDeploy::Model * InitModel_TRT(const char* model_type, const char* model_filename, const char* params_filename, const char* cfg_file, bool use_gpu, int gpu_id, char* paddlex_model_type,
std::vector<int>min_input_shape, std::vector<int>max_input_shape, std::vector<int>optim_input_shape, int precision, int min_subgraph_size)
{
// create model
PaddleDeploy::Model* model = PaddleDeploy::CreateModel(model_type); //FLAGS_model_type
// model init
model->Init(cfg_file);
// inference engine init
PaddleDeploy::PaddleEngineConfig engine_config;
engine_config.model_filename = model_filename;
engine_config.params_filename = params_filename;
engine_config.use_gpu = use_gpu;
engine_config.gpu_id = gpu_id;
// 使用tensorRT则强制打开gpu
engine_config.use_gpu = true;
engine_config.use_trt = true;
// 注意:根据优化目标需要手动调整
engine_config.precision = precision; // 精度选择,默认fp32,还有fp16,int8
engine_config.min_subgraph_size = min_subgraph_size;// 最小子图,越大则优化度越低,越大越可能忽略动态图: 设置40+不报错但也没啥优化
engine_config.max_workspace_size = 1 << 30;
// 注意:根据模型和输入图像大小,需要手动调整如下变量
//std::vector<int> min_input_shape = { 1, 3, 512, 512 };
//std::vector<int> max_input_shape = { 1, 3, 1024, 1024 };
//std::vector<int> optim_input_shape = { 1, 3, 1024, 1024 };
// 分别定义最小、最大、最优输入尺寸:需要根据模型输入尺寸调整
// 这里三种模型输入的关键字不同(clas对应inputs, det对应image, seg对应x),可通过netron查看INPUTS.name,比如seg模型INPUTS.name=x
// 另外如果有动态输入尺寸不匹配的节点,需要手动定义
if (strcmp("clas", model_type) == 0) {
// Adjust shape according to the actual model
engine_config.min_input_shape["inputs"] = min_input_shape;
engine_config.max_input_shape["inputs"] = max_input_shape;
engine_config.optim_input_shape["inputs"] = optim_input_shape;
}
else if (strcmp("det", model_type) == 0) {
// Adjust shape according to the actual model
engine_config.min_input_shape["image"] = min_input_shape;
engine_config.max_input_shape["image"] = max_input_shape;
engine_config.optim_input_shape["image"] = optim_input_shape;
}
else if (strcmp("seg", model_type) == 0) {
// Additional nodes need to be added, pay attention to the output prompt
engine_config.min_input_shape["x"] = min_input_shape;
engine_config.max_input_shape["x"] = max_input_shape;
engine_config.optim_input_shape["x"] = optim_input_shape;
}
bool init = model->PaddleEngineInit(engine_config);
if (!init)
{
LOGC("INFO", "init model failed");
}
// det, seg, clas, paddlex
if (strcmp(model_type, "paddlex") == 0) // 是paddlex模型,则返回具体支持的模型类型: det, seg, clas
{
// detector
if (model->yaml_config_["model_type"].as<std::string>() == std::string("detector"))
{
strcpy(paddlex_model_type, "det");
}
else if (model->yaml_config_["model_type"].as<std::string>() == std::string("segmenter"))
{
strcpy(paddlex_model_type, "seg");
}
else if (model->yaml_config_["model_type"].as<std::string>() == std::string("classifier"))
{
strcpy(paddlex_model_type, "clas");
}
}
return model;
}
/*
* 检测推理接口
*
* img: input for predicting.
*
* nWidth: width of img.
*
* nHeight: height of img.
*
* nChannel: channel of img.
*
* output: result of pridict ,include category_id£¬score£¬coordinate¡£
*
* nBoxesNum£º number of box
*
* LabelList: label list of result
*/
extern "C" __declspec(dllexport) void Det_ModelPredict(PaddleDeploy::Model * model, const unsigned char* img, int nWidth, int nHeight, int nChannel, float* output, int* nBoxesNum, char* LabelList)
{
// prepare data
std::vector<cv::Mat> imgs;
int nType = 0;
if (nChannel == 3)
{
nType = CV_8UC3;
}
else
{
std::cout << "Only support 3 channel image." << std::endl;
return;
}
cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
memcpy(input.data, img, nHeight * nWidth * nChannel * sizeof(uchar));
//cv::imwrite("./1.png", input);
imgs.push_back(std::move(input));
// predict
std::vector<PaddleDeploy::Result> results;
model->Predict(imgs, &results, 1);
// nBoxesNum[0] = results.size(); // results.size()得到的是batch_size
nBoxesNum[0] = results[0].det_result->boxes.size(); // 得到单张图片预测的bounding box数
std::string label = "";
//std::cout << "res: " << results[num] << std::endl;
for (int i = 0; i < results[0].det_result->boxes.size(); i++) // 得到所有框的数据
{
//std::cout << "category: " << results[num].det_result->boxes[i].category << std::endl;
label = label + results[0].det_result->boxes[i].category + " ";
// labelindex
output[i * 6 + 0] = results[0].det_result->boxes[i].category_id; // 类别的id
// score
output[i * 6 + 1] = results[0].det_result->boxes[i].score; // 得分
//// box
output[i * 6 + 2] = results[0].det_result->boxes[i].coordinate[0]; // x1, y1, x2, y2
output[i * 6 + 3] = results[0].det_result->boxes[i].coordinate[1]; // 左上、右下的顶点
output[i * 6 + 4] = results[0].det_result->boxes[i].coordinate[2];
output[i * 6 + 5] = results[0].det_result->boxes[i].coordinate[3];
}
memcpy(LabelList, label.c_str(), strlen(label.c_str()));
}
/*
* 分割推理接口
*
* img: input for predicting.
*
* nWidth: width of img.
*
* nHeight: height of img.
*
* nChannel: channel of img.
*
* output: result of pridict ,include label_map
*/
extern "C" __declspec(dllexport) void Seg_ModelPredict(PaddleDeploy::Model * model, const unsigned char* img, int nWidth, int nHeight, int nChannel, unsigned char* output)
{
//LOGC("INFO", "seg in thread id [%d]", GetCurrentThreadId());
// prepare data
std::vector<cv::Mat> imgs;
int nType = 0;
if (nChannel == 3)
{
nType = CV_8UC3;
//LOGC("INFO", "infer input img w=%d, h=%d, c=%d", nWidth, nHeight, nChannel);
}
else
{
//std::cout << "Only support 3 channel image." << std::endl;
LOGC("ERR", "Only support 3 channel images, but got channels ", nChannel);
return;
}
cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
memcpy(input.data, img, nHeight * nWidth * nChannel * sizeof(uchar));
//cv::imwrite("D://modelinfercpp_275.bmp", input);
imgs.push_back(std::move(input));
// predict
std::vector<PaddleDeploy::Result> results;
model->Predict(imgs, &results, 1);
// batch修改:这里应该会得到返回的每张图的label_map,那么下面就应该分别处理results中每张图对应的label_map
std::vector<uint8_t> result_map = results[0].seg_result->label_map.data; // vector<uint8_t> -- 结果map
//LOGC("INFO", "finish infer, with result_map length=%d", result_map.size());
// 拷贝输出结果到输出上返回 -- 将vector<uint8_t>转成unsigned char *
memcpy(output, &result_map[0], result_map.size() * sizeof(uchar));
}
/*
* 分割推理接口batch predict
*
* img: input for predicting.
*
* nWidth: width of img.
*
* nHeight: height of img.
*
* nChannel: channel of img.
*
* output: result of pridict ,include label_map
*/
extern "C" __declspec(dllexport) void Seg_ModelBatchPredict(PaddleDeploy::Model * model, const std::vector<unsigned char*> imgs, int nWidth, int nHeight, int nChannel, std::vector<unsigned char*> output)
{
std::vector<PaddleDeploy::Result> results;
if (imgs.size() != output.size()) {
LOGC("ERR", "image batch size(%d) not match with results size(%d)", imgs.size(), output.size());
}
// Read image
int im_vec_size = imgs.size();
std::vector<cv::Mat> im_vec;
int nType = 0;
if (nChannel == 3)
{
nType = CV_8UC3;
}
else
{
LOGC("ERR", "Only support 3 channel images, but got channels ", nChannel);
return;
}
for (int i = 0; i < im_vec_size; i++) {
cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
memcpy(input.data, imgs[i], nHeight * nWidth * nChannel * sizeof(uchar));
im_vec.emplace_back(std::move(input));
}
if (!model->Predict(im_vec, &results, 1)) {
LOGC("ERR", "predict batch images failed");
}
// batch修改:这里应该会得到返回的每张图的label_map,那么下面就应该分别处理results中每张图对应的label_map
for (int i = 0; i < im_vec_size; i++) {
std::vector<uint8_t> result_map = results[i].seg_result->label_map.data; // vector<uint8_t> -- 结果map
// 拷贝输出结果到输出上返回 -- 将vector<uint8_t>转成unsigned char *
memcpy(output[i], &result_map[0], result_map.size() * sizeof(uchar));
}
}
/*
* 识别推理接口
*
* img: input for predicting.
*
* nWidth: width of img.
*
* nHeight: height of img.
*
* nChannel: channel of img.
*
* score: result of pridict ,include score
*
* category: result of pridict ,include category_string
*
* category_id: result of pridict ,include category_id
*/
extern "C" __declspec(dllexport) void Cls_ModelPredict(PaddleDeploy::Model * model, const unsigned char* img, int nWidth, int nHeight, int nChannel, float* score, char* category, int* category_id)
{
// prepare data
std::vector<cv::Mat> imgs;
int nType = 0;
if (nChannel == 3)
{
nType = CV_8UC3;
}
else
{
std::cout << "Only support 3 channel image." << std::endl;
return;
}
cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
memcpy(input.data, img, nHeight * nWidth * nChannel * sizeof(uchar));
cv::imwrite("D:\\1.png", input);
imgs.push_back(std::move(input));
// predict
std::vector<PaddleDeploy::Result> results;
//LOGC("INFO", "begin predict");
model->Predict(imgs, &results, 1);
//LOGC("INFO", "got pred result: score=%f", results[0].clas_result->score);
//LOGC("INFO", "got pred result: category_id=%d", results[0].clas_result->category_id);
//LOGC("INFO", "got pred result: category=%s", results[0].clas_result->category);
*category_id = results[0].clas_result->category_id;
// 拷贝输出类别结果到输出上返回 -- string --> char*
memcpy(category, results[0].clas_result->category.c_str(), strlen(results[0].clas_result->category.c_str()));
// 拷贝输出概率值返回
*score = results[0].clas_result->score;
}
/*
* MaskRCNN推理接口
*
* img: input for predicting.
*
* nWidth: width of img.
*
* nHeight: height of img.
*
* nChannel: channel of img.
*
* box_output: result of pridict ,include label+score+bbox
*
* mask_output: result of pridict ,include label_map
*
* nBoxesNum: result of pridict ,include BoxesNum
*
* LabelList: result of pridict ,include LabelList
*/
extern "C" __declspec(dllexport) void Mask_ModelPredict(PaddleDeploy::Model * model, const unsigned char* img, int nWidth, int nHeight, int nChannel, float* box_output, unsigned char* mask_output, int* nBoxesNum, char* LabelList)
{
// prepare data
std::vector<cv::Mat> imgs;
int nType = 0;
if (nChannel == 3)
{
nType = CV_8UC3;
}
else
{
std::cout << "Only support 3 channel image." << std::endl;
return;
}
cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
memcpy(input.data, img, nHeight * nWidth * nChannel * sizeof(uchar));
imgs.push_back(std::move(input));
// predict -- 多次点击单张推理时会出错
std::vector<PaddleDeploy::Result> results;
model->Predict(imgs, &results, 1); // 在Infer处发生错误
nBoxesNum[0] = results[0].det_result->boxes.size(); // 得到单张图片预测的bounding box数
std::string label = "";
for (int i = 0; i < results[0].det_result->boxes.size(); i++) // 得到所有框的数据
{
// 边界框预测结果
label = label + results[0].det_result->boxes[i].category + " ";
// labelindex
box_output[i * 6 + 0] = results[0].det_result->boxes[i].category_id; // 类别的id
// score
box_output[i * 6 + 1] = results[0].det_result->boxes[i].score; // 得分
//// box
box_output[i * 6 + 2] = results[0].det_result->boxes[i].coordinate[0]; // x1, y1, x2, y2
box_output[i * 6 + 3] = results[0].det_result->boxes[i].coordinate[1]; // 左上、右下的顶点
box_output[i * 6 + 4] = results[0].det_result->boxes[i].coordinate[2];
box_output[i * 6 + 5] = results[0].det_result->boxes[i].coordinate[3];
//Mask预测结果
for (int j = 0; j < results[0].det_result->boxes[i].mask.data.size(); j++)
{
if (mask_output[j] == 0)
{
mask_output[j] = results[0].det_result->boxes[i].mask.data[j];
}
}
}
memcpy(LabelList, label.c_str(), strlen(label.c_str()));
}
/*
* 模型销毁/注销接口
*/
extern "C" __declspec(dllexport) void DestructModel(PaddleDeploy::Model * model)
{
if (model != NULL) {
delete model;
model = NULL;
}
if (model == NULL) LOGC("INFO", "destruct model success");
else LOGC("ERR", "delete model failed");
}
// 新增二次封装:初始化
void ModelWrapper::InitModelEnter(const char* model_type, const char* model_dir, int gpu_id, bool use_trt,
const std::vector<int>min_input_shape, const std::vector<int>max_input_shape, const std::vector<int>optim_input_shape, int precision, int min_subgraph_size)
{
// 初始化线程池:创建指定个数线程,每个线程指定到线程池的一个线程号
pool = new ThreadPool(num_threads);
pool->init();
std::string model_filename = std::string(model_dir) + "\\model.pdmodel";
std::string params_filename = std::string(model_dir) + "\\model.pdiparams";
std::string cfg_file = std::string(model_dir) + "\\deploy.yaml";
bool use_gpu = true;
char* paddle_model_type = NULL;
if (!use_trt) {
_model = InitModel(model_type,
model_filename.c_str(), // *.pdmodel
params_filename.c_str(), // *.pdiparams
cfg_file.c_str(), // *.yaml
use_gpu,
gpu_id,
paddle_model_type);
}
else
{
_model = InitModel_TRT(model_type,
model_filename.c_str(), // *.pdmodel
params_filename.c_str(), // *.pdiparams
cfg_file.c_str(), // *.yaml
use_gpu,
gpu_id,
paddle_model_type,
min_input_shape, max_input_shape, optim_input_shape, precision, min_subgraph_size);
}
}
// 新增二次封装:单图推理
void ModelWrapper::SegPredictEnter(unsigned char* imageData, int width, int height, int channels, unsigned char* result_map)
{
cv::Mat src;
if (channels == 1) {
src = cv::Mat(height, width, CV_8UC1, imageData);
cv::cvtColor(src, src, cv::COLOR_GRAY2BGR);
}
else
{
src = cv::Mat(height, width, CV_8UC3, imageData);
}
int predChannels = src.channels();
UCHAR* _imageData = src.data;
auto future1 = pool->submit(Seg_ModelPredict, _model, _imageData, width, height, predChannels, result_map);
future1.get();
}
// 检测模型
void ModelWrapper::DetPredictEnter(unsigned char* imageData, int width, int height, int channels, float* output, int* nBoxesNum, char* LabelList)
{
cv::Mat src;
if (channels == 1) {
src = cv::Mat(height, width, CV_8UC1, imageData);
cv::cvtColor(src, src, cv::COLOR_GRAY2BGR);
}
else
{
src = cv::Mat(height, width, CV_8UC3, imageData);
}
int predChannels = src.channels();
UCHAR* _imageData = src.data;
auto future1 = pool->submit(Det_ModelPredict, _model, _imageData, width, height, predChannels, output, nBoxesNum, LabelList);
future1.get();
}
// 分类模型
void ModelWrapper::ClsPredictEnter(unsigned char* imageData, int width, int height, int channels, float* score, char* category, int* category_id)
{
cv::Mat src;
if (channels == 1) {
src = cv::Mat(height, width, CV_8UC1, imageData);
cv::cvtColor(src, src, cv::COLOR_GRAY2BGR);
}
else
{
src = cv::Mat(height, width, CV_8UC3, imageData);
}
int predChannels = src.channels();
UCHAR* _imageData = src.data;
auto future1 = pool->submit(Cls_ModelPredict, _model, _imageData, width, height, predChannels, score, category, category_id);
future1.get();
}
// Mask模型
void ModelWrapper::MaskPredictEnter(unsigned char* imageData, int width, int height, int channels, float* box_output, unsigned char* mask_output, int* nBoxesNum, char* LabelList)
{
cv::Mat src;
if (channels == 1) {
src = cv::Mat(height, width, CV_8UC1, imageData);
cv::cvtColor(src, src, cv::COLOR_GRAY2BGR);
}
else
{
src = cv::Mat(height, width, CV_8UC3, imageData);
}
int predChannels = src.channels();
UCHAR* _imageData = src.data;
auto future1 = pool->submit(Mask_ModelPredict, _model, _imageData, width, height, predChannels, box_output, mask_output, nBoxesNum, LabelList);
future1.get();
}
// 新增二次封装:模型资源释放
void ModelWrapper::DestructModelEnter()
{
// 释放线程池中所有线程
pool->shutdown();
if (pool != NULL) {
delete pool;
pool = NULL;
}
// 释放模型资源
if (_model != NULL) {
DestructModel(_model);
}
}
// 新增二次封装接口api
extern "C" __declspec(dllexport) ModelWrapper * ModelObjInit(const char* model_type, const char* model_dir, int gpu_id, bool use_trt,
const std::vector<int>min_input_shape, const std::vector<int>max_input_shape, const std::vector<int>optim_input_shape, int precision, int min_subgraph_size)
{
ModelWrapper* modelObj = new ModelWrapper();
modelObj->InitModelEnter(model_type, model_dir, gpu_id, use_trt, min_input_shape, max_input_shape, optim_input_shape, precision, min_subgraph_size);
return modelObj;
}
extern "C" __declspec(dllexport) void ModelObjDestruct(ModelWrapper * modelObj)
{
// 先释放模型内部的资源
modelObj->DestructModelEnter();
// 再释放堆区模型资源
delete modelObj;
}
extern "C" __declspec(dllexport) void ModelObjPredict_Seg(ModelWrapper * modelObj, unsigned char* imageData, int width, int height, int channels, unsigned char* resultMap)
{
modelObj->SegPredictEnter(imageData, width, height, channels, resultMap);
}
extern "C" __declspec(dllexport) void ModelObjPredict_Det(ModelWrapper * modelObj, unsigned char* imageData, int width, int height, int channels, float* output, int* nBoxesNum, char* LabelList)
{
modelObj->DetPredictEnter(imageData, width, height, channels, output, nBoxesNum, LabelList);
}
extern "C" __declspec(dllexport) void ModelObjPredict_Cls(ModelWrapper * modelObj, unsigned char* imageData, int width, int height, int channels, float* score, char* category, int* category_id)
{
modelObj->ClsPredictEnter(imageData, width, height, channels, score, category, category_id);
}
extern "C" __declspec(dllexport) void ModelObjPredict_Mask(ModelWrapper * modelObj, unsigned char* imageData, int width, int height, int channels, float* box_output, unsigned char* mask_output, int* nBoxesNum, char* LabelList)
{
modelObj->MaskPredictEnter(imageData, width, height, channels, box_output, mask_output, nBoxesNum, LabelList);
}