From 00f1ee9413bdcdefa32198681e9cad7d46ef462f Mon Sep 17 00:00:00 2001 From: HexToString <506181616@qq.com> Date: Fri, 12 Mar 2021 12:33:53 +0000 Subject: [PATCH] fix memory bug --- core/predictor/framework/infer.h | 65 +++++++++++++++++--------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/core/predictor/framework/infer.h b/core/predictor/framework/infer.h index a3f08ea7..f70c2b6c 100755 --- a/core/predictor/framework/infer.h +++ b/core/predictor/framework/infer.h @@ -626,44 +626,49 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { //get out and copy to void* out TensorVector* tensorVector_out_pointer = reinterpret_cast(out); std::vector outnames = core->GetOutputNames(); + std::vector output_shape; + int out_num =0; + int dataType =0; + void* databuf_data = NULL; + char* databuf_char = NULL; + size_t databuf_size = 0; for (int i = 0; i < outnames.size(); ++i){ auto lod_tensor_out = core->GetOutputHandle(outnames[i]); - std::vector output_shape = lod_tensor_out->shape(); - int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies()); - int dataType = lod_tensor_out->type(); - char* databuf_data = NULL; - size_t databuf_size = 0; + output_shape = lod_tensor_out->shape(); + out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies()); + dataType = lod_tensor_out->type(); if(dataType == paddle::PaddleDType::FLOAT32){ - float* data_out = new float[out_num]; - lod_tensor_out->CopyToCpu(data_out); - databuf_data = reinterpret_cast(data_out); databuf_size = out_num*sizeof(float); - }else if(dataType == paddle::PaddleDType::INT64){ - int64_t* data_out = new int64_t[out_num]; + void* databuf_data = MempoolWrapper::instance().malloc(databuf_size); + if (!databuf_data) { + LOG(ERROR) << "Malloc failed, size: " << databuf_size; + return -1; + } + float* data_out = reinterpret_cast(databuf_data); + //float* data_out = new float[out_num]; lod_tensor_out->CopyToCpu(data_out); - databuf_data = reinterpret_cast(data_out); + databuf_char = reinterpret_cast(data_out); + }else if(dataType == paddle::PaddleDType::INT64){ databuf_size = out_num*sizeof(int64_t); - }else if(dataType == paddle::PaddleDType::INT32){ - int32_t* data_out = new int32_t[out_num]; + void* databuf_data = MempoolWrapper::instance().malloc(databuf_size); + if (!databuf_data) { + LOG(ERROR) << "Malloc failed, size: " << databuf_size; + return -1; + } + int64_t* data_out = reinterpret_cast(data_out); lod_tensor_out->CopyToCpu(data_out); - databuf_data = reinterpret_cast(data_out); + databuf_char = reinterpret_cast(data_out); + }else if(dataType == paddle::PaddleDType::INT32){ databuf_size = out_num*sizeof(int32_t); + void* databuf_data = MempoolWrapper::instance().malloc(databuf_size); + if (!databuf_data) { + LOG(ERROR) << "Malloc failed, size: " << databuf_size; + return -1; + } + int32_t* data_out = reinterpret_cast(databuf_data); + lod_tensor_out->CopyToCpu(data_out); + databuf_char = reinterpret_cast(data_out); } - /* - paddle::PaddleTensor* tensor_out = new paddle::PaddleTensor(); - tensor_out->name = outnames[i]; - tensor_out->dtype = paddle::PaddleDType(dataType); - tensor_out->shape.assign(output_shape.begin(), output_shape.end()); - std::vector> out_lod = lod_tensor_out->lod(); - for (int li = 0; li < out_lod.size(); ++li) { - std::vector lod_element; - lod_element.assign(out_lod[li].begin(), out_lod[li].end()); - tensor_out->lod.push_back(lod_element); - } - paddle::PaddleBuf* newData = new paddle::PaddleBuf(databuf_data,databuf_size); - tensor_out->data = *newData; - tensorVector_out_pointer->push_back(*tensor_out); - */ paddle::PaddleTensor tensor_out; tensor_out.name = outnames[i]; tensor_out.dtype = paddle::PaddleDType(dataType); @@ -674,7 +679,7 @@ class FluidInferEngine : public CloneDBReloadableInferEngine { lod_element.assign(out_lod[li].begin(), out_lod[li].end()); tensor_out.lod.push_back(lod_element); } - paddle::PaddleBuf paddleBuf(databuf_data,databuf_size); + paddle::PaddleBuf paddleBuf(databuf_char,databuf_size); tensor_out.data = paddleBuf; tensorVector_out_pointer->push_back(tensor_out); } -- GitLab