From 291bc0b7da5ad3d1638224e7f66e85d8d0e59327 Mon Sep 17 00:00:00 2001 From: minghaoBD Date: Fri, 27 May 2022 03:19:27 +0000 Subject: [PATCH] shared_ptr is nullptr in enqueue --- paddle/fluid/inference/tensorrt/plugin/spmm_plugin.cu | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/plugin/spmm_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/spmm_plugin.cu index 3aecd3795cea2..913d2cf24a9ad 100644 --- a/paddle/fluid/inference/tensorrt/plugin/spmm_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/spmm_plugin.cu @@ -678,8 +678,8 @@ int SpmmPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, auto* output = static_cast(outputs[0]); auto* weight_compressed_dev_p_ = weight_compressed_dev_global_.get(); char* test_weight = new char[compressed_size_]; - cudaMemcpy(weight_compressed_dev_global_.get(), test_weight, compressed_size_, - cudaMemcpyHostToDevice); + cudaMemcpy(test_weight, weight_compressed_dev_global_.get(), compressed_size_, + cudaMemcpyDeviceToHost); std::cout << "compressed weight:"; for(int i=0; i<10; i++) { std::cout << " " << static_cast(reinterpret_cast(weight_compressed_)[i]); @@ -691,8 +691,6 @@ int SpmmPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, std::cout << " " << static_cast(reinterpret_cast(test_weight)[i]); } std::cout << std::endl; - - cusparseStatus_t status = paddle::platform::dynload::cusparseLtMatmul( &spmm_context_.handle, &spmm_context_.plan, &alpha, input, weight_compressed_dev_p_, &beta, output, output, workSpace, &stream, 1);