diff --git a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc index 3d390c8ad3b8897721b634b373b84e0993fc897b..841a95db38ce7cf0cb5961ff04cb569ee2633e6f 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc @@ -37,11 +37,11 @@ class Conv2dOpConverter : public OpConverter { auto* Y_t = Y_v->GetMutable(); platform::CPUPlace cpu_place; - framework::LoDTensor* weight_tensor = new framework::LoDTensor(); + std::unique_ptr weight_tensor( + new framework::LoDTensor()); weight_tensor->Resize(Y_t->dims()); - TensorCopySync((*Y_t), cpu_place, weight_tensor); - engine_->weight_map[op_desc.Input("Filter").front()] = - std::move(std::unique_ptr(weight_tensor)); + TensorCopySync((*Y_t), cpu_place, weight_tensor.get()); + auto* weight_data = weight_tensor->mutable_data(platform::CPUPlace()); @@ -78,6 +78,8 @@ class Conv2dOpConverter : public OpConverter { layer->setNbGroups(groups); auto output_name = op_desc.Output("Output").front(); + engine_->weight_map[op_desc.Input("Filter").front()] = + std::move(weight_tensor); engine_->SetITensor(output_name, layer->getOutput(0)); if (test_mode) { engine_->DeclareOutput(output_name); diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc index 066e5de373d15d50e5583e6ed7c5a07ae8abe791..60a72b4eb5c75b5cd12305f13763a9a1a567213f 100644 --- a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -41,12 +41,10 @@ class ElementwiseWeightOpConverter : public OpConverter { auto* Y_t = Y_v->GetMutable(); platform::CPUPlace cpu_place; - framework::LoDTensor* weight_tensor = new framework::LoDTensor(); + std::unique_ptr weight_tensor( + new framework::LoDTensor()); weight_tensor->Resize(Y_t->dims()); - TensorCopySync((*Y_t), cpu_place, weight_tensor); - engine_->weight_map[op_desc.Input("Y").front()] = - std::move(std::unique_ptr(weight_tensor)); - + TensorCopySync((*Y_t), cpu_place, weight_tensor.get()); auto* weight_data = weight_tensor->mutable_data(platform::CPUPlace()); auto scale_mode = nvinfer1::ScaleMode::kELEMENTWISE; @@ -90,6 +88,8 @@ class ElementwiseWeightOpConverter : public OpConverter { engine_, Scale, *const_cast(X), scale_mode, shift_weights.get(), scale_weights.get(), power_weights.get()); auto output_name = op_desc.Output("Out")[0]; + + engine_->weight_map[op_desc.Input("Y").front()] = std::move(weight_tensor); engine_->SetITensor(output_name, layer->getOutput(0)); if (test_mode) { // the test framework can not determine which is the // output, so place the declaration inside. diff --git a/paddle/fluid/inference/tensorrt/convert/fc_op.cc b/paddle/fluid/inference/tensorrt/convert/fc_op.cc index 653ddb0ccae178008869e069617d7db2764f3866..ad98d85aae9cf594922aca00c43718ccfbce2278 100644 --- a/paddle/fluid/inference/tensorrt/convert/fc_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/fc_op.cc @@ -78,10 +78,8 @@ class FcOpConverter : public OpConverter { PADDLE_ENFORCE_EQ(weight_tensor.dims().size(), 2UL); // a matrix size_t n_output = weight_tensor.dims()[1]; - framework::LoDTensor* tmp = new framework::LoDTensor(); + std::unique_ptr tmp(new framework::LoDTensor()); tmp->Resize(weight_tensor.dims()); - engine_->weight_map[op_desc.Input("Y").front()] = - std::move(std::unique_ptr(tmp)); memcpy(tmp->mutable_data(platform::CPUPlace()), weight_data, Y_t->dims()[0] * Y_t->dims()[1] * sizeof(float)); @@ -110,6 +108,7 @@ class FcOpConverter : public OpConverter { auto output_name = op_desc.Output("Out").front(); engine_->SetITensor(output_name, layer->getOutput(0)); + engine_->weight_map[op_desc.Input("Y").front()] = std::move(tmp); if (test_mode) { engine_->DeclareOutput(output_name); }