From bffd5d241e8d8d55b9f9228ee6e6f3df424cee9a Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Mon, 22 Apr 2024 20:39:31 +0800 Subject: [PATCH 01/15] no-malloc fix --- .../convert/cross_multihead_matmul_op.cc | 11 +++----- .../tensorrt/convert/emb_eltwise_layernorm.cc | 22 +++++++-------- .../convert/flash_multihead_matmul_op.cc | 12 +++------ .../tensorrt/convert/multiclass_nms3_op.cc | 12 ++++----- .../tensorrt/convert/multiclass_nms_op.cc | 12 ++++----- .../tensorrt/convert/multihead_matmul_op.cc | 27 +++++++------------ .../convert/preln_emb_eltwise_layernorm.cc | 10 +++---- .../prompt_tuning_emb_eltwise_layernorm.cc | 10 +++---- .../convert/qk_multihead_matmul_op.cc | 12 +++------ .../tensorrt/convert/skip_layernorm.cc | 26 +++++++----------- paddle/phi/api/profiler/device_tracer.cc | 4 +-- .../analyzer_capi_exp_pd_threads_tester.cc | 21 +++++---------- 12 files changed, 66 insertions(+), 113 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc index df5665b75b34e..33c151a24f7b2 100644 --- a/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/cross_multihead_matmul_op.cc @@ -234,16 +234,13 @@ class CrossMultiheadMatMulOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator("fMHCA", "1"); assert(creator != nullptr); std::vector fields{}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast( - malloc(sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); - auto plugin = creator->createPlugin("fMHA_V2", plugin_collection); - free(plugin_collection); + auto plugin = creator->createPlugin("fMHA_V2", plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(reshape_after_fc_q_layer->getOutput(0)); plugin_inputs.emplace_back(reshape_after_fc_layer->getOutput(0)); diff --git a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc index 340f16330a2e5..c8e3039ec482b 100644 --- a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc @@ -119,10 +119,8 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -132,7 +130,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator( "ManyEmbLayerNormVarlenPluginDynamic", "1"); auto plugin_obj = creator->createPlugin( - "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr); + "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -140,7 +138,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormVarlenPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset; if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); @@ -218,10 +216,8 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -229,8 +225,8 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator( "ManyEmbLayerNormPluginDynamic", "1"); - auto plugin_obj = - creator->createPlugin("ManyEmbLayerNormPluginDynamic", plugin_ptr); + auto plugin_obj = creator->createPlugin("ManyEmbLayerNormPluginDynamic", + plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -238,7 +234,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); diff --git a/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc index e5904a1cf7543..757f90181cd0d 100644 --- a/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/flash_multihead_matmul_op.cc @@ -210,16 +210,12 @@ class FlashMultiheadMatMulOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator("fMHA_V2", "1"); assert("fmha_v2 plugin creater must not be null" && creator != nullptr); std::vector fields{}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast( - malloc(sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free - + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); - auto plugin = creator->createPlugin("fMHA_V2", plugin_collection); - free(plugin_collection); + auto plugin = creator->createPlugin("fMHA_V2", plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(reshape_before_mha_layer->getOutput(0)); auto plugin_layer = engine_->network()->addPluginV2( diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc index 449593533820b..107217477d14f 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc @@ -102,10 +102,8 @@ class MultiClassNMS3OpConverter : public OpConverter { {"clipBoxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1}, }; - nvinfer1::PluginFieldCollection* plugin_collections = - static_cast( - malloc(sizeof(*plugin_collections) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_collections( + new nvinfer1::PluginFieldCollection); plugin_collections->nbFields = static_cast(fields.size()); plugin_collections->fields = fields.data(); std::string nms_plugin_name = "BatchedNMS_TRT"; @@ -114,9 +112,9 @@ class MultiClassNMS3OpConverter : public OpConverter { } auto creator = GetPluginRegistry()->getPluginCreator(nms_plugin_name.c_str(), "1"); - auto batch_nms_plugin = - creator->createPlugin(nms_plugin_name.c_str(), plugin_collections); - free(plugin_collections); + auto batch_nms_plugin = creator->createPlugin(nms_plugin_name.c_str(), + plugin_collections.get()); + plugin_collections.reset(); auto batch_nms_layer = engine_->network()->addPluginV2( batch_nms_inputs.data(), batch_nms_inputs.size(), *batch_nms_plugin); diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc index e14ee099aa0f8..ab0dacedd2d07 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc @@ -100,10 +100,8 @@ class MultiClassNMSOpConverter : public OpConverter { {"clipBoxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1}, }; - nvinfer1::PluginFieldCollection* plugin_collections = - static_cast( - malloc(sizeof(*plugin_collections) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_collections( + new nvinfer1::PluginFieldCollection); plugin_collections->nbFields = static_cast(fields.size()); plugin_collections->fields = fields.data(); @@ -113,9 +111,9 @@ class MultiClassNMSOpConverter : public OpConverter { } auto creator = GetPluginRegistry()->getPluginCreator(nms_plugin_name.c_str(), "1"); - auto batch_nms_plugin = - creator->createPlugin(nms_plugin_name.c_str(), plugin_collections); - free(plugin_collections); + auto batch_nms_plugin = creator->createPlugin(nms_plugin_name.c_str(), + plugin_collections.get()); + plugin_collections.reset(); auto batch_nms_layer = engine_->network()->addPluginV2( batch_nms_inputs.data(), batch_nms_inputs.size(), *batch_nms_plugin); diff --git a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc index 73c43d39357c0..56166a7421a90 100644 --- a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc @@ -234,17 +234,14 @@ class MultiheadMatMulOpConverter : public OpConverter { nvinfer1::PluginFieldType::kFLOAT32, 1}); } - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast(malloc( - sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", plugin_collection); - free(plugin_collection); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(fc_layer->getOutput(0)); @@ -429,17 +426,14 @@ class MultiheadMatMulOpConverter : public OpConverter { nvinfer1::PluginFieldType::kFLOAT32, 1}); } - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast(malloc( - sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", plugin_collection); - free(plugin_collection); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back( @@ -661,16 +655,13 @@ class MultiheadMatMulOpConverter : public OpConverter { &var_seqlen, nvinfer1::PluginFieldType::kINT32, 1}}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast(malloc( - sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", plugin_collection); - free(plugin_collection); + plugin_collection.reset(); // set inputs std::vector plugin_inputs; // input_0 for plugin diff --git a/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc index 0ec1336f0e2d1..9f9cbe7c6bceb 100644 --- a/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc @@ -173,10 +173,8 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -188,7 +186,7 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { "ManyEmbLayerNormVarlenPluginDynamic", "2"); auto plugin_obj = creator->createPlugin( - "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr); + "ManyEmbLayerNormVarlenPluginDynamic", plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -196,7 +194,7 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormPluginDynamic_V3(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); float out_0_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_0_threshold")); float out_1_scale = diff --git a/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc index 9250807662543..e6beaae910d96 100644 --- a/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/prompt_tuning_emb_eltwise_layernorm.cc @@ -118,10 +118,8 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter { static_cast(emb_sizes[i])); } - nvinfer1::PluginFieldCollection* plugin_ptr = - static_cast( - malloc(sizeof(*plugin_ptr) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr plugin_ptr( + new nvinfer1::PluginFieldCollection); plugin_ptr->nbFields = static_cast(fields.size()); plugin_ptr->fields = fields.data(); @@ -134,7 +132,7 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator( "PromptTuningEmbLayerNormVarlenPluginDynamic", "1"); auto plugin_obj = creator->createPlugin( - "PromptTuningEmbLayerNormVarlenPluginDynamic", plugin_ptr); + "PromptTuningEmbLayerNormVarlenPluginDynamic", plugin_ptr.get()); auto plugin_layer = engine_->network()->addPluginV2( plugin_inputs.data(), plugin_inputs.size(), *plugin_obj); @@ -143,7 +141,7 @@ class PromptTuningEmbEltwiseLayerNormOpConverter : public OpConverter { ("PromptTuningEmbLayerNormVarlenPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - free(plugin_ptr); + plugin_ptr.reset(); if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); diff --git a/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc index e8ed4af9cddf7..aafbec6660c67 100644 --- a/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/qk_multihead_matmul_op.cc @@ -258,16 +258,12 @@ class QkMultiheadMatMulOpConverter : public OpConverter { auto creator = GetPluginRegistry()->getPluginCreator("fMHA_V2", "1"); assert(creator != nullptr); std::vector fields{}; - nvinfer1::PluginFieldCollection* plugin_collection = - static_cast( - malloc(sizeof(*plugin_collection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free - + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); - auto plugin = creator->createPlugin("fMHA_V2", plugin_collection); - free(plugin_collection); + auto plugin = creator->createPlugin("fMHA_V2", plugin_collection.get()); + plugin_collection.reset(); std::vector plugin_inputs; plugin_inputs.emplace_back(mha_input_tensor); auto plugin_layer = engine_->network()->addPluginV2( diff --git a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc index ab70ebb6ccd81..11877d1513a7b 100644 --- a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc @@ -149,17 +149,15 @@ class SkipLayerNormOpConverter : public OpConverter { scale_weight.values, GetPluginFieldType(scale_weight.type), static_cast(scale_weight.count)}}; - nvinfer1::PluginFieldCollection* pluginPtr = - static_cast( - malloc(sizeof(nvinfer1::PluginFieldCollection) + - fields.size() * sizeof(nvinfer1::PluginField))); + std::unique_ptr pluginPtr( + new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin("CustomSkipLayerNormPluginDynamic", pluginPtr); - free(pluginPtr); + pluginPtr.reset(); auto plugin_layer = engine_->network()->addPluginV2( inputs.data(), inputs.size(), *pluginObj); @@ -213,18 +211,15 @@ class SkipLayerNormOpConverter : public OpConverter { smooth_scale.data(), nvinfer1::PluginFieldType::kFLOAT32, static_cast(smooth_scale.size())}); - nvinfer1::PluginFieldCollection* pluginPtr = - static_cast( - malloc(sizeof(nvinfer1::PluginFieldCollection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr pluginPtr( + new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin( "CustomSkipLayerNormPluginDynamicWithSmooth", pluginPtr); - free(pluginPtr); + pluginPtr.reset(); auto plugin_layer = engine_->network()->addPluginV2( inputs.data(), inputs.size(), *pluginObj); @@ -237,18 +232,15 @@ class SkipLayerNormOpConverter : public OpConverter { "layer")); layer = plugin_layer; } else { - nvinfer1::PluginFieldCollection* pluginPtr = - static_cast( - malloc(sizeof(nvinfer1::PluginFieldCollection) + - fields.size() * - sizeof(nvinfer1::PluginField))); // remember to free + std::unique_ptr plugin_collection( + new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin( "CustomSkipLayerNormPluginDynamic", pluginPtr); - free(pluginPtr); + pluginPtr.reset(); auto plugin_layer = engine_->network()->addPluginV2( inputs.data(), inputs.size(), *pluginObj); diff --git a/paddle/phi/api/profiler/device_tracer.cc b/paddle/phi/api/profiler/device_tracer.cc index 085d28220a6a9..4e31dc86c1e4f 100644 --- a/paddle/phi/api/profiler/device_tracer.cc +++ b/paddle/phi/api/profiler/device_tracer.cc @@ -176,9 +176,9 @@ void DisableActivity() { void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { - uint8_t *buf = reinterpret_cast(malloc(kBufSize + kAlignSize)); + std::vector buf(kBufSize + kAlignSize); *size = kBufSize; - *buffer = ALIGN_BUFFER(buf, kAlignSize); + *buffer = ALIGN_BUFFER(buf.data(), kAlignSize); *maxNumRecords = 0; } diff --git a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc index b06c637c86e47..b330f09c105fa 100644 --- a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc +++ b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc @@ -62,8 +62,8 @@ void* run(void* thread_param) { param->out_size = param->out_size * output_shape->data[index]; } PD_OneDimArrayInt32Destroy(output_shape); - param->out_data = - reinterpret_cast(malloc(param->out_size * sizeof(float))); + std::vector out_data(param->out_size); + param->out_data = out_data.data(); PD_TensorCopyToCpuFloat(output_tensor, param->out_data); PD_TensorDestroy(output_tensor); PD_OneDimArrayCstrDestroy(output_names); @@ -80,19 +80,16 @@ void threads_run(int thread_num) { (model_dir + "/__params__").c_str()); PD_Predictor* predictor = PD_PredictorCreate(config); - pthread_t* threads = - reinterpret_cast(malloc(thread_num * sizeof(pthread_t))); - RunParameter* params = reinterpret_cast( - malloc(thread_num * sizeof(RunParameter))); + std::vector threads(thread_num); + std::vector params(thread_num); + std::array shapes = {1, 3, 300, 300}; - float* input = - reinterpret_cast(malloc(1 * 3 * 300 * 300 * sizeof(float))); - memset(input, 0, 1 * 3 * 300 * 300 * sizeof(float)); + std::vector input(1 * 3 * 300 * 300, 0); for (int i = 0; i < thread_num; ++i) { params[i].predictor = PD_PredictorClone(predictor); params[i].shapes = shapes.data(); params[i].shape_size = 4; - params[i].input_data = input; + params[i].input_data = input.data(); params[i].out_size = 0; params[i].out_data = nullptr; params[i].thread_index = i; @@ -111,11 +108,7 @@ void threads_run(int thread_num) { } for (int i = 0; i < thread_num; ++i) { PD_PredictorDestroy(params[i].predictor); - free(params[i].out_data); } - free(input); - free(params); - free(threads); PD_PredictorDestroy(predictor); } From bd174f106f2c581b84c6137b8c514d70f51de4a5 Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Mon, 22 Apr 2024 21:05:35 +0800 Subject: [PATCH 02/15] minor changes --- .../fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc index c8e3039ec482b..71159e3009b6c 100644 --- a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc @@ -138,7 +138,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { plugin_layer->setName(("ManyEmbLayerNormVarlenPluginDynamicV1(Output: " + op_desc.Output("Out")[0] + ")") .c_str()); - plugin_ptr.reset; + plugin_ptr.reset(); if (enable_int8) { float out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); From 814df225005c27b1538e6e40a033b759cc3f14b6 Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 01:16:57 +0800 Subject: [PATCH 03/15] minor changes --- paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc index 56166a7421a90..4ed1116a2c512 100644 --- a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc @@ -660,7 +660,7 @@ class MultiheadMatMulOpConverter : public OpConverter { plugin_collection->nbFields = static_cast(fields.size()); plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", - plugin_collection); + plugin_collection.get()); plugin_collection.reset(); // set inputs std::vector plugin_inputs; From f605d06dc6e3f88093bf29e1c01e8bd12ab8eee7 Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 01:20:31 +0800 Subject: [PATCH 04/15] minor changes --- .../inference/tensorrt/convert/multihead_matmul_op.cc | 4 ++-- paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc index 4ed1116a2c512..a092b3215502e 100644 --- a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc @@ -240,7 +240,7 @@ class MultiheadMatMulOpConverter : public OpConverter { plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", - plugin_collection); + plugin_collection.get()); plugin_collection.reset(); std::vector plugin_inputs; @@ -432,7 +432,7 @@ class MultiheadMatMulOpConverter : public OpConverter { plugin_collection->fields = fields.data(); auto plugin = creator->createPlugin("CustomQKVToContextPluginDynamic", - plugin_collection); + plugin_collection.get()); plugin_collection.reset(); std::vector plugin_inputs; diff --git a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc index 11877d1513a7b..bbcdaad7f0f0f 100644 --- a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc @@ -154,8 +154,8 @@ class SkipLayerNormOpConverter : public OpConverter { pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); - auto pluginObj = - creator->createPlugin("CustomSkipLayerNormPluginDynamic", pluginPtr); + auto pluginObj = creator->createPlugin("CustomSkipLayerNormPluginDynamic", + pluginPtr.get()); pluginPtr.reset(); @@ -217,7 +217,7 @@ class SkipLayerNormOpConverter : public OpConverter { pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin( - "CustomSkipLayerNormPluginDynamicWithSmooth", pluginPtr); + "CustomSkipLayerNormPluginDynamicWithSmooth", pluginPtr.get()); pluginPtr.reset(); @@ -238,7 +238,7 @@ class SkipLayerNormOpConverter : public OpConverter { pluginPtr->fields = fields.data(); auto pluginObj = creator->createPlugin( - "CustomSkipLayerNormPluginDynamic", pluginPtr); + "CustomSkipLayerNormPluginDynamic", pluginPtr.get()); pluginPtr.reset(); From 5c13ccd6663508e9c6ddb77b4352be4fb026506a Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 09:27:54 +0800 Subject: [PATCH 05/15] minor changes --- paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc index bbcdaad7f0f0f..988d0d064c862 100644 --- a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc @@ -232,7 +232,7 @@ class SkipLayerNormOpConverter : public OpConverter { "layer")); layer = plugin_layer; } else { - std::unique_ptr plugin_collection( + std::unique_ptr pluginPtr( new nvinfer1::PluginFieldCollection); pluginPtr->nbFields = static_cast(fields.size()); pluginPtr->fields = fields.data(); From 3bdec0b142290c60e3399a5772c8fa5b31017df8 Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 10:39:11 +0800 Subject: [PATCH 06/15] interface value update --- paddle/pir/include/core/interface_value.h | 15 +++++++++------ paddle/pir/src/core/interface_support.cc | 7 +------ .../api/analyzer_capi_exp_pd_threads_tester.cc | 2 +- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index 64619a0e0f591..3041e6d7d0164 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include "paddle/pir/include/core/type_id.h" @@ -27,7 +28,7 @@ class IR_API InterfaceValue { template static InterfaceValue Get(); TypeId type_id() const { return type_id_; } - void *model() const { return model_; } + void *model() const { return model_.get(); } InterfaceValue() = default; InterfaceValue(TypeId type_id) : type_id_(type_id) {} // NOLINT @@ -35,7 +36,7 @@ class IR_API InterfaceValue { InterfaceValue(InterfaceValue &&) noexcept; InterfaceValue &operator=(const InterfaceValue &) = delete; InterfaceValue &operator=(InterfaceValue &&) noexcept; - ~InterfaceValue(); + ~InterfaceValue() = default; void swap(InterfaceValue &&val) { using std::swap; swap(type_id_, val.type_id_); @@ -51,9 +52,10 @@ class IR_API InterfaceValue { private: TypeId type_id_; - void *model_{nullptr}; + std::unique_ptr model_{nullptr, &free}; }; +template template InterfaceValue InterfaceValue::Get() { InterfaceValue val; @@ -64,13 +66,14 @@ InterfaceValue InterfaceValue::Get() { sizeof(typename Interface::Concept) == sizeof(Model), "Compared with Concept, Model class shouldn't define new data members"); - val.model_ = malloc(sizeof(Model)); - if (val.model_ == nullptr) { + void* model_raw = malloc(sizeof(Model)); + if (model_raw == nullptr) { throw("Alloc memory for interface failed."); } static_assert(std::is_trivially_destructible::value, "interface models must be trivially destructible"); - new (val.model_) Model(); + new (model_raw) Model(); + val.model_.reset(model_raw); return val; } diff --git a/paddle/pir/src/core/interface_support.cc b/paddle/pir/src/core/interface_support.cc index 973b98668a061..0d2a87fc6bd66 100644 --- a/paddle/pir/src/core/interface_support.cc +++ b/paddle/pir/src/core/interface_support.cc @@ -15,14 +15,9 @@ #include "paddle/pir/include/core/interface_support.h" namespace pir { -InterfaceValue::~InterfaceValue() { - if (model_) free(model_); -} - InterfaceValue::InterfaceValue(InterfaceValue&& val) noexcept { type_id_ = val.type_id_; - model_ = val.model_; - val.model_ = nullptr; + model_ = std::move(val.model_); } InterfaceValue& InterfaceValue::operator=(InterfaceValue&& val) noexcept { diff --git a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc index b330f09c105fa..209ef6be0ad1e 100644 --- a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc +++ b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc @@ -93,7 +93,7 @@ void threads_run(int thread_num) { params[i].out_size = 0; params[i].out_data = nullptr; params[i].thread_index = i; - pthread_create(&(threads[i]), nullptr, run, (params + i)); + pthread_create(&(threads[i]), nullptr, run, &(params[i])); } for (int i = 0; i < thread_num; ++i) { pthread_join(threads[i], nullptr); From 05bb9761e3848684ded8a7cb05223f0ce5c238cc Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 10:41:18 +0800 Subject: [PATCH 07/15] minor changes --- paddle/pir/include/core/interface_value.h | 1 - 1 file changed, 1 deletion(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index 3041e6d7d0164..3e36a1d59a9a6 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -55,7 +55,6 @@ class IR_API InterfaceValue { std::unique_ptr model_{nullptr, &free}; }; -template template InterfaceValue InterfaceValue::Get() { InterfaceValue val; From 2cc3f5cf8b8d58acf6f4a218dd494485dd2363d4 Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 10:50:32 +0800 Subject: [PATCH 08/15] linter --- paddle/pir/include/core/interface_value.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index 3e36a1d59a9a6..fad0987534414 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -14,8 +14,8 @@ #pragma once -#include #include +#include #include #include "paddle/pir/include/core/type_id.h" @@ -65,7 +65,7 @@ InterfaceValue InterfaceValue::Get() { sizeof(typename Interface::Concept) == sizeof(Model), "Compared with Concept, Model class shouldn't define new data members"); - void* model_raw = malloc(sizeof(Model)); + void *model_raw = malloc(sizeof(Model)); if (model_raw == nullptr) { throw("Alloc memory for interface failed."); } From 375c415432759c93b642a4dd6740e5a676a2de30 Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 13:32:37 +0800 Subject: [PATCH 09/15] capi fix --- .../inference/api/analyzer_capi_exp_pd_threads_tester.cc | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc index 209ef6be0ad1e..1b66f7c246f7e 100644 --- a/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc +++ b/test/cpp/inference/api/analyzer_capi_exp_pd_threads_tester.cc @@ -39,7 +39,7 @@ typedef struct RunParameter { size_t shape_size; float* input_data; int32_t out_size; - float* out_data; + std::vector out_data; int32_t thread_index; } RunParameter; @@ -62,9 +62,8 @@ void* run(void* thread_param) { param->out_size = param->out_size * output_shape->data[index]; } PD_OneDimArrayInt32Destroy(output_shape); - std::vector out_data(param->out_size); - param->out_data = out_data.data(); - PD_TensorCopyToCpuFloat(output_tensor, param->out_data); + param->out_data.resize(param->out_size); + PD_TensorCopyToCpuFloat(output_tensor, param->out_data.data()); PD_TensorDestroy(output_tensor); PD_OneDimArrayCstrDestroy(output_names); PD_TensorDestroy(tensor); @@ -91,7 +90,6 @@ void threads_run(int thread_num) { params[i].shape_size = 4; params[i].input_data = input.data(); params[i].out_size = 0; - params[i].out_data = nullptr; params[i].thread_index = i; pthread_create(&(threads[i]), nullptr, run, &(params[i])); } From 9844a3bdad7a0a656b9702be2a4ebee1c3f9579d Mon Sep 17 00:00:00 2001 From: walk_alone <2390335608@qq.com> Date: Tue, 23 Apr 2024 21:17:03 +0800 Subject: [PATCH 10/15] restore --- paddle/phi/api/profiler/device_tracer.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/phi/api/profiler/device_tracer.cc b/paddle/phi/api/profiler/device_tracer.cc index 4e31dc86c1e4f..085d28220a6a9 100644 --- a/paddle/phi/api/profiler/device_tracer.cc +++ b/paddle/phi/api/profiler/device_tracer.cc @@ -176,9 +176,9 @@ void DisableActivity() { void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords) { - std::vector buf(kBufSize + kAlignSize); + uint8_t *buf = reinterpret_cast(malloc(kBufSize + kAlignSize)); *size = kBufSize; - *buffer = ALIGN_BUFFER(buf.data(), kAlignSize); + *buffer = ALIGN_BUFFER(buf, kAlignSize); *maxNumRecords = 0; } From 54bb40dafe9e039a95e51ddf8bfb9b6fd9750306 Mon Sep 17 00:00:00 2001 From: walkalone20 <73780235+walkalone20@users.noreply.github.com> Date: Wed, 1 May 2024 10:46:34 +0800 Subject: [PATCH 11/15] minor changes --- paddle/pir/include/core/interface_value.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index fad0987534414..02c41cd751168 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -52,7 +52,7 @@ class IR_API InterfaceValue { private: TypeId type_id_; - std::unique_ptr model_{nullptr, &free}; + std::unique_ptr(free))> model_{nullptr, static_cast(free)}; }; template From 2908bc42f9a4c12e7ed100487b1f1e41f05349b2 Mon Sep 17 00:00:00 2001 From: walkalone20 <73780235+walkalone20@users.noreply.github.com> Date: Wed, 1 May 2024 10:48:05 +0800 Subject: [PATCH 12/15] format --- paddle/pir/include/core/interface_value.h | 82 ++++++++++++++++++++++- 1 file changed, 81 insertions(+), 1 deletion(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index 02c41cd751168..ab2e9f85f7d54 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -52,7 +52,87 @@ class IR_API InterfaceValue { private: TypeId type_id_; - std::unique_ptr(free))> model_{nullptr, static_cast(free)}; +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/pir/include/core/type_id.h" +#include "paddle/pir/include/core/utils.h" + +namespace pir { + +class IR_API InterfaceValue { + public: + template + static InterfaceValue Get(); + TypeId type_id() const { return type_id_; } + void *model() const { return model_.get(); } + + InterfaceValue() = default; + InterfaceValue(TypeId type_id) : type_id_(type_id) {} // NOLINT + InterfaceValue(const InterfaceValue &) = delete; + InterfaceValue(InterfaceValue &&) noexcept; + InterfaceValue &operator=(const InterfaceValue &) = delete; + InterfaceValue &operator=(InterfaceValue &&) noexcept; + ~InterfaceValue() = default; + void swap(InterfaceValue &&val) { + using std::swap; + swap(type_id_, val.type_id_); + swap(model_, val.model_); + } + + /// + /// \brief Comparison operations. + /// + inline bool operator<(const InterfaceValue &other) const { + return type_id_ < other.type_id_; + } + + private: + TypeId type_id_; + std::unique_ptr(free))> model_{ + nullptr, static_cast(free)}; +}; + +template +InterfaceValue InterfaceValue::Get() { + InterfaceValue val; + val.type_id_ = TypeId::get(); + static_assert(std::is_base_of::value, + "Model must derived from corresponding Interface Concept."); + static_assert( + sizeof(typename Interface::Concept) == sizeof(Model), + "Compared with Concept, Model class shouldn't define new data members"); + + void *model_raw = malloc(sizeof(Model)); + if (model_raw == nullptr) { + throw("Alloc memory for interface failed."); + } + static_assert(std::is_trivially_destructible::value, + "interface models must be trivially destructible"); + new (model_raw) Model(); + val.model_.reset(model_raw); + return val; +} + +using InterfaceSet = std::set; +} // namespace pir }; template From 9745f0200d2db615ff6e7dbc76ea757a6e445151 Mon Sep 17 00:00:00 2001 From: walkalone20 <73780235+walkalone20@users.noreply.github.com> Date: Wed, 1 May 2024 11:00:03 +0800 Subject: [PATCH 13/15] minor changes --- paddle/pir/include/core/interface_value.h | 91 ++--------------------- 1 file changed, 5 insertions(+), 86 deletions(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index ab2e9f85f7d54..595b8991391c0 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -14,7 +14,6 @@ #pragma once -#include #include #include @@ -28,7 +27,7 @@ class IR_API InterfaceValue { template static InterfaceValue Get(); TypeId type_id() const { return type_id_; } - void *model() const { return model_.get(); } + void *model() const { return model_; } InterfaceValue() = default; InterfaceValue(TypeId type_id) : type_id_(type_id) {} // NOLINT @@ -36,61 +35,7 @@ class IR_API InterfaceValue { InterfaceValue(InterfaceValue &&) noexcept; InterfaceValue &operator=(const InterfaceValue &) = delete; InterfaceValue &operator=(InterfaceValue &&) noexcept; - ~InterfaceValue() = default; - void swap(InterfaceValue &&val) { - using std::swap; - swap(type_id_, val.type_id_); - swap(model_, val.model_); - } - - /// - /// \brief Comparison operations. - /// - inline bool operator<(const InterfaceValue &other) const { - return type_id_ < other.type_id_; - } - - private: - TypeId type_id_; -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include - -#include "paddle/pir/include/core/type_id.h" -#include "paddle/pir/include/core/utils.h" - -namespace pir { - -class IR_API InterfaceValue { - public: - template - static InterfaceValue Get(); - TypeId type_id() const { return type_id_; } - void *model() const { return model_.get(); } - - InterfaceValue() = default; - InterfaceValue(TypeId type_id) : type_id_(type_id) {} // NOLINT - InterfaceValue(const InterfaceValue &) = delete; - InterfaceValue(InterfaceValue &&) noexcept; - InterfaceValue &operator=(const InterfaceValue &) = delete; - InterfaceValue &operator=(InterfaceValue &&) noexcept; - ~InterfaceValue() = default; + ~InterfaceValue(); void swap(InterfaceValue &&val) { using std::swap; swap(type_id_, val.type_id_); @@ -120,39 +65,13 @@ InterfaceValue InterfaceValue::Get() { sizeof(typename Interface::Concept) == sizeof(Model), "Compared with Concept, Model class shouldn't define new data members"); - void *model_raw = malloc(sizeof(Model)); - if (model_raw == nullptr) { - throw("Alloc memory for interface failed."); - } - static_assert(std::is_trivially_destructible::value, - "interface models must be trivially destructible"); - new (model_raw) Model(); - val.model_.reset(model_raw); - return val; -} - -using InterfaceSet = std::set; -} // namespace pir -}; - -template -InterfaceValue InterfaceValue::Get() { - InterfaceValue val; - val.type_id_ = TypeId::get(); - static_assert(std::is_base_of::value, - "Model must derived from corresponding Interface Concept."); - static_assert( - sizeof(typename Interface::Concept) == sizeof(Model), - "Compared with Concept, Model class shouldn't define new data members"); - - void *model_raw = malloc(sizeof(Model)); - if (model_raw == nullptr) { + val.model_ = malloc(sizeof(Model)); + if (val.model_ == nullptr) { throw("Alloc memory for interface failed."); } static_assert(std::is_trivially_destructible::value, "interface models must be trivially destructible"); - new (model_raw) Model(); - val.model_.reset(model_raw); + new (val.model_) Model(); return val; } From 7ad02eeb2d5913bf46e8add920ecefb2caff0c99 Mon Sep 17 00:00:00 2001 From: walkalone20 <73780235+walkalone20@users.noreply.github.com> Date: Wed, 1 May 2024 11:12:20 +0800 Subject: [PATCH 14/15] Update interface_value.h --- paddle/pir/include/core/interface_value.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index 595b8991391c0..be2e4f3d8b7cc 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -27,7 +27,7 @@ class IR_API InterfaceValue { template static InterfaceValue Get(); TypeId type_id() const { return type_id_; } - void *model() const { return model_; } + void *model() const { return model_.get(); } InterfaceValue() = default; InterfaceValue(TypeId type_id) : type_id_(type_id) {} // NOLINT From ae6e00e50c874681b96770b5cb155b4669b39523 Mon Sep 17 00:00:00 2001 From: walkalone20 <73780235+walkalone20@users.noreply.github.com> Date: Wed, 1 May 2024 11:16:30 +0800 Subject: [PATCH 15/15] Update interface_value.h --- paddle/pir/include/core/interface_value.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/paddle/pir/include/core/interface_value.h b/paddle/pir/include/core/interface_value.h index be2e4f3d8b7cc..030a6065b8415 100644 --- a/paddle/pir/include/core/interface_value.h +++ b/paddle/pir/include/core/interface_value.h @@ -14,6 +14,7 @@ #pragma once +#include #include #include @@ -35,7 +36,7 @@ class IR_API InterfaceValue { InterfaceValue(InterfaceValue &&) noexcept; InterfaceValue &operator=(const InterfaceValue &) = delete; InterfaceValue &operator=(InterfaceValue &&) noexcept; - ~InterfaceValue(); + ~InterfaceValue() = default; void swap(InterfaceValue &&val) { using std::swap; swap(type_id_, val.type_id_); @@ -65,13 +66,14 @@ InterfaceValue InterfaceValue::Get() { sizeof(typename Interface::Concept) == sizeof(Model), "Compared with Concept, Model class shouldn't define new data members"); - val.model_ = malloc(sizeof(Model)); - if (val.model_ == nullptr) { + void *model_raw = malloc(sizeof(Model)); + if (model_raw == nullptr) { throw("Alloc memory for interface failed."); } static_assert(std::is_trivially_destructible::value, "interface models must be trivially destructible"); - new (val.model_) Model(); + new (model_raw) Model(); + val.model_.reset(model_raw); return val; }