diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 011d9a25fa4c4e6851a9e36d642dbd9b11846af6..b6c23d0d7b8eb899c31e3e6a6db04316788ba7e4 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -39,7 +39,7 @@ void TensorRTEngine::InitNetwork() { nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH))); infer_builder_config_.reset(infer_builder_->createBuilderConfig()); infer_ptr infer_builder_config_; - optim_profile_.reset(infer_builder_->createOptimizationProfile()); + optim_profile_ = infer_builder_->createOptimizationProfile(); #endif } else { infer_network_.reset(infer_builder_->createNetwork()); @@ -160,7 +160,7 @@ void TensorRTEngine::FreezeNetwork() { input.first.c_str(), nvinfer1::OptProfileSelector::kOPT, Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true)); } - infer_builder_config_->addOptimizationProfile(optim_profile_.get()); + infer_builder_config_->addOptimizationProfile(optim_profile_); if (WithFp16()) { infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16); if (disable_trt_plugin_fp16()) { diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index 03fd7a283b44c0744fdfb1a6ad50ecf28cc92511..0a4f1c94f90135bc0199b55a6b9ccf6433738244 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -340,7 +340,7 @@ class TensorRTEngine { infer_ptr infer_networkv2_; #if IS_TRT_VERSION_GE(6000) infer_ptr infer_builder_config_; - std::unique_ptr optim_profile_; + nvinfer1::IOptimizationProfile* optim_profile_; std::vector> owned_pluginv2_; #endif std::mutex mutex_;