diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index 9a72de15f3974aa85a7b82a94cdc3936a06330c5..03f5a751511adba7b508db9944c30d17866bad2d 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -39,7 +39,7 @@ void TensorRTEngine::InitNetwork() { nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH))); infer_builder_config_.reset(infer_builder_->createBuilderConfig()); infer_ptr infer_builder_config_; - optim_profile_.reset(infer_builder_->createOptimizationProfile()); + optim_profile_ = infer_builder_->createOptimizationProfile(); #endif } else { infer_network_.reset(infer_builder_->createNetwork()); @@ -185,7 +185,7 @@ void TensorRTEngine::FreezeNetwork() { input.first.c_str(), nvinfer1::OptProfileSelector::kOPT, Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true)); } - infer_builder_config_->addOptimizationProfile(optim_profile_.get()); + infer_builder_config_->addOptimizationProfile(optim_profile_); if (WithFp16()) { infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16); if (disable_trt_plugin_fp16()) { diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index 9ca0e979bf6e74d1730c8e2ae1a26cbf7e0ea576..fdd71b0d884004c84e2ee15eea522c64ff943dd9 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -354,7 +354,7 @@ class TensorRTEngine { infer_ptr infer_networkv2_; #if IS_TRT_VERSION_GE(6000) infer_ptr infer_builder_config_; - std::unique_ptr optim_profile_; + nvinfer1::IOptimizationProfile* optim_profile_; std::vector> owned_pluginv2_; #endif std::mutex mutex_;