diff --git a/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc index df5665b75b34e..33c151a24f7b2 100644 --- a/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc @@ -234,16 +234,13 @@ class CrossMultiheadMatMulOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator("fMHCA", "1"); assert(creator != nullptr); std::vector fields{}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast( - malloc(sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); - auto plugin = creator->createPlugin("fMHA_V2", plugin_collection); - free(plugin_collection); + auto plugin = creator->createPlugin("fMHA_V2", plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(reshape_after_fc_q_layer->getOutput(0)); plugin_inputs.emplace_back(reshape_after_fc_layer->getOutput(0)); diff --git a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc index 340f16330a2e5..71159e3009b6c 100644 --- a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc @@ -119,10 +119,8 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -132,7 +130,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator( "ManyEmbLayerNormVarlenPluginDynamic", "1"); auto plugin_obj = creator->createPlugin( - "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr); + "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -140,7 +138,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormVarlenPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); @@ -218,10 +216,8 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -229,8 +225,8 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator( "ManyEmbLayerNormPluginDynamic", "1"); - auto plugin_obj = - creator->createPlugin("ManyEmbLayerNormPluginDynamic", plugin_ptr); + auto plugin_obj = creator->createPlugin("ManyEmbLayerNormPluginDynamic", + plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -238,7 +234,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); diff --git a/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc index e5904a1cf7543..757f90181cd0d 100644 --- a/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc @@ -210,16 +210,12 @@ class FlashMultiheadMatMulOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator("fMHA_V2", "1"); assert("fmha_v2 plugin creater must not be null" && creator != nullptr); std::vector fields{}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast( - malloc(sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free - + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); - auto plugin = creator->createPlugin("fMHA_V2", plugin_collection); - free(plugin_collection); + auto plugin = creator->createPlugin("fMHA_V2", plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(reshape_before_mha_layer->getOutput(0)); auto plugin_layer = engine_->network()->addPluginV2( diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc index 449593533820b..107217477d14f 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc @@ -102,10 +102,8 @@ class MultiClassNMS3OpConverter : public OpConverter { {"clipBoxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1}, }; - nvinfer1::PluginFieldCollection* plugin_collections = - static_cast( - malloc(sizeof(*plugin_collections) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_collections( + new nvinfer1::PluginFieldCollection); plugin_collections->nbFields = static_cast(fields.size()); plugin_collections->fields = fields.data(); std::string nms_plugin_name = "BatchedNMS_TRT"; @@ -114,9 +112,9 @@ class MultiClassNMS3OpConverter : public OpConverter { } auto creator = GetPluginRegistry()->getPluginCreator(nms_plugin_name.c_str(), "1"); - auto batch_nms_plugin = - creator->createPlugin(nms_plugin_name.c_str(), plugin_collections); - free(plugin_collections); + auto batch_nms_plugin = creator->createPlugin(nms_plugin_name.c_str(), + plugin_collections.get()); + plugin_collections.reset(); auto batch_nms_layer = engine_->network()->addPluginV2( batch_nms_inputs.data(), batch_nms_inputs.size(), *batch_nms_plugin); diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc index e14ee099aa0f8..ab0dacedd2d07 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc @@ -100,10 +100,8 @@ class MultiClassNMSOpConverter : public OpConverter { {"clipBoxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1}, }; - nvinfer1::PluginFieldCollection* plugin_collections = - static_cast( - malloc(sizeof(*plugin_collections) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_collections( + new nvinfer1::PluginFieldCollection); plugin_collections->nbFields = static_cast(fields.size()); plugin_collections->fields = fields.data(); @@ -113,9 +111,9 @@ class MultiClassNMSOpConverter : public OpConverter { } auto creator = GetPluginRegistry()->getPluginCreator(nms_plugin_name.c_str(), "1"); - auto batch_nms_plugin = - creator->createPlugin(nms_plugin_name.c_str(), plugin_collections); - free(plugin_collections); + auto batch_nms_plugin = creator->createPlugin(nms_plugin_name.c_str(), + plugin_collections.get()); + plugin_collections.reset(); auto batch_nms_layer = engine_->network()->addPluginV2( batch_nms_inputs.data(), batch_nms_inputs.size(), *batch_nms_plugin); diff --git a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc index 73c43d39357c0..a092b3215502e 100644 --- a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc @@ -234,17 +234,14 @@ class MultiheadMatMulOpConverter : public OpConverter { nvinfer1::PluginFieldType::kFLOAT32, 1}); } - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast(malloc( - sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", - plugin_collection); - free(plugin_collection); + plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(fc_layer->getOutput(0)); @@ -429,17 +426,14 @@ class MultiheadMatMulOpConverter : public OpConverter { nvinfer1::PluginFieldType::kFLOAT32, 1}); } - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast(malloc( - sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", - plugin_collection); - free(plugin_collection); + plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back( @@ -661,16 +655,13 @@ class MultiheadMatMulOpConverter : public OpConverter { &var_seqlen, nvinfer1::PluginFieldType::kINT32, 1}}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast(malloc( - sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", - plugin_collection); - free(plugin_collection); + plugin_collection.get()); + plugin_collection.reset(); // set inputs std::vector plugin_inputs; // input_0 for plugin diff --git a/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc index 0ec1336f0e2d1..9f9cbe7c6bceb 100644 --- a/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc @@ -173,10 +173,8 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -188,7 +186,7 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { "ManyEmbLayerNormVarlenPluginDynamic", "2"); auto plugin_obj = creator->createPlugin( - "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr); + "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -196,7 +194,7 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormPluginDynamic_V3(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); float out_0_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_0_threshold")); float out_1_scale = diff --git a/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc index 9250807662543..e6beaae910d96 100644 --- a/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc @@ -118,10 +118,8 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -134,7 +132,7 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator( "PromptTuningEmbLayerNormVarlenPluginDynamic", "1"); auto plugin_obj = creator->createPlugin( - "PromptTuningEmbLayerNormVarlenPluginDynamic", plugin_ptr); + "PromptTuningEmbLayerNormVarlenPluginDynamic", plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -143,7 +141,7 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter { ("PromptTuningEmbLayerNormVarlenPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); diff --git a/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc index e8ed4af9cddf7..aafbec6660c67 100644 --- a/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc @@ -258,16 +258,12 @@ class QkMultiheadMatMulOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator("fMHA_V2", "1"); assert(creator != nullptr); std::vector fields{}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast( - malloc(sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free - + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); - auto plugin = creator->createPlugin("fMHA_V2", plugin_collection); - free(plugin_collection); + auto plugin = creator->createPlugin("fMHA_V2", plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(mha_input_tensor); auto plugin_layer = engine_->network()->addPluginV2( diff --git a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc index ab70ebb6ccd81..988d0d064c862 100644 --- a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc @@ -149,17 +149,15 @@ class SkipLayerNormOpConverter : public OpConverter { scale_weight.values, GetPluginFieldType(scale_weight.type), static_cast(scale_weight.count)}}; - nvinfer1::PluginFieldCollection* pluginPtr = - static_cast( - malloc(sizeof(nvinfer1::PluginFieldCollection) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr pluginPtr( + new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); - auto pluginObj = - creator->createPlugin("CustomSkipLayerNormPluginDynamic", pluginPtr); + auto pluginObj = creator->createPlugin("CustomSkipLayerNormPluginDynamic", + pluginPtr.get()); - free(pluginPtr); + pluginPtr.reset(); auto plugin_layer = engine_->network()->addPluginV2( inputs.data(), inputs.size(), *pluginObj); @@ -213,18 +211,15 @@ class SkipLayerNormOpConverter : public OpConverter { smooth_scale.data(), nvinfer1::PluginFieldType::kFLOAT32, static_cast(smooth_scale.size())}); - nvinfer1::PluginFieldCollection* pluginPtr = - static_cast( - malloc(sizeof(nvinfer1::PluginFieldCollection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr pluginPtr( + new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin( - "CustomSkipLayerNormPluginDynamicWithSmooth", pluginPtr); + "CustomSkipLayerNormPluginDynamicWithSmooth", pluginPtr.get()); - free(pluginPtr); + pluginPtr.reset(); auto plugin_layer = engine_->network()->addPluginV2( inputs.data(), inputs.size(), *pluginObj); @@ -237,18 +232,15 @@ class SkipLayerNormOpConverter : public OpConverter { "layer")); layer = plugin_layer; } else { - nvinfer1::PluginFieldCollection* pluginPtr = - static_cast( - malloc(sizeof(nvinfer1::PluginFieldCollection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr pluginPtr( + new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin( - "CustomSkipLayerNormPluginDynamic", pluginPtr); + "CustomSkipLayerNormPluginDynamic", pluginPtr.get()); - free(pluginPtr); + pluginPtr.reset(); auto plugin_layer = engine_->network()->addPluginV2( inputs.data(), inputs.size(), *pluginObj); diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index 64619a0e0f591..030a6065b8415 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -14,6 +14,7 @@ #pragma once +#include #include #include @@ -27,7 +28,7 @@ class IR_API InterfaceValue { template static InterfaceValue Get(); TypeId type_id() const { return type_id_; } - void *model() const { return model_; } + void *model() const { return model_.get(); } InterfaceValue() = default; InterfaceValue(TypeId type_id) : type_id_(type_id) {} // NOLINT @@ -35,7 +36,7 @@ class IR_API InterfaceValue { InterfaceValue(InterfaceValue &&) noexcept; InterfaceValue &operator=(const InterfaceValue &) = delete; InterfaceValue &operator=(InterfaceValue &&) noexcept; - ~InterfaceValue(); + ~InterfaceValue() = default; void swap(InterfaceValue &&val) { using std::swap; swap(type_id_, val.type_id_); @@ -51,7 +52,8 @@ class IR_API InterfaceValue { private: TypeId type_id_; - void *model_{nullptr}; + std::unique_ptr(free))> model_{ + nullptr, static_cast(free)}; }; template @@ -64,13 +66,14 @@ InterfaceValue InterfaceValue::Get() { sizeof(typename Interface::Concept) == sizeof(Model), "Compared with Concept, Model class shouldn't define new data members"); - val.model_ = malloc(sizeof(Model)); - if (val.model_ == nullptr) { + void *model_raw = malloc(sizeof(Model)); + if (model_raw == nullptr) { throw("Alloc memory for interface failed."); } static_assert(std::is_trivially_destructible::value, "interface models must be trivially destructible"); - new (val.model_) Model(); + new (model_raw) Model(); + val.model_.reset(model_raw); return val; } diff --git a/paddle/pir/src/core/interface_support.cc b/paddle/pir/src/core/interface_support.cc index 973b98668a061..0d2a87fc6bd66 100644 --- a/paddle/pir/src/core/interface_support.cc +++ b/paddle/pir/src/core/interface_support.cc @@ -15,14 +15,9 @@ #include "paddle/pir/include/core/interface_support.h" namespace pir { -InterfaceValue::~InterfaceValue() { - if (model_) free(model_); -} - InterfaceValue::InterfaceValue(InterfaceValue&& val) noexcept { type_id_ = val.type_id_; - model_ = val.model_; - val.model_ = nullptr; + model_ = std::move(val.model_); } InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) noexcept { diff --git a/test/deprecated/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc b/test/deprecated/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc index b06c637c86e47..1b66f7c246f7e 100644 --- a/test/deprecated/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc +++ b/test/deprecated/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc @@ -39,7 +39,7 @@ typedef struct RunParameter { size_t shape_size; float* input_data; int32_t out_size; - float* out_data; + std::vector out_data; int32_t thread_index; } RunParameter; @@ -62,9 +62,8 @@ void* run(void* thread_param) { param->out_size = param->out_size * output_shape->data[index]; } PD_OneDimArrayInt32Destroy(output_shape); - param->out_data = - reinterpret_cast(malloc(param->out_size * sizeof(float))); - PD_TensorCopyToCpuFloat(output_tensor, param->out_data); + param->out_data.resize(param->out_size); + PD_TensorCopyToCpuFloat(output_tensor, param->out_data.data()); PD_TensorDestroy(output_tensor); PD_OneDimArrayCstrDestroy(output_names); PD_TensorDestroy(tensor); @@ -80,23 +79,19 @@ void threads_run(int thread_num) { (model_dir + "/__params__").c_str()); PD_Predictor* predictor = PD_PredictorCreate(config); - pthread_t* threads = - reinterpret_cast(malloc(thread_num * sizeof(pthread_t))); - RunParameter* params = reinterpret_cast( - malloc(thread_num * sizeof(RunParameter))); + std::vector threads(thread_num); + std::vector params(thread_num); + std::array shapes = {1, 3, 300, 300}; - float* input = - reinterpret_cast(malloc(1 * 3 * 300 * 300 * sizeof(float))); - memset(input, 0, 1 * 3 * 300 * 300 * sizeof(float)); + std::vector input(1 * 3 * 300 * 300, 0); for (int i = 0; i < thread_num; ++i) { params[i].predictor = PD_PredictorClone(predictor); params[i].shapes = shapes.data(); params[i].shape_size = 4; - params[i].input_data = input; + params[i].input_data = input.data(); params[i].out_size = 0; - params[i].out_data = nullptr; params[i].thread_index = i; - pthread_create(&(threads[i]), nullptr, run, (params + i)); + pthread_create(&(threads[i]), nullptr, run, &(params[i])); } for (int i = 0; i < thread_num; ++i) { pthread_join(threads[i], nullptr); @@ -111,11 +106,7 @@ void threads_run(int thread_num) { } for (int i = 0; i < thread_num; ++i) { PD_PredictorDestroy(params[i].predictor); - free(params[i].out_data); } - free(input); - free(params); - free(threads); PD_PredictorDestroy(predictor); }