未验证 提交 bdb81d16 编写于 作者: P Pei Yang 提交者: GitHub

bugfix for unique_ptr of IOptimizationProfile (#23917) (#26806)

This commit fixs the compiling bug regarding unique_ptr of IOptimizationProfile.

IOptimizationProfile has protected dtor and is controlled by TensorRT
internally. Application shouldn't delete the pointer of IOptimizationProfile.
See TensorRT document: https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/c_api/classnvinfer1_1_1_i_builder.html#a9ac47e100454151d8206ac91d543299a
test=develop
Co-authored-by: NJeng Bai-Cheng <jeng1220@users.noreply.github.com>
上级 c80684fa
...@@ -39,7 +39,7 @@ void TensorRTEngine::InitNetwork() { ...@@ -39,7 +39,7 @@ void TensorRTEngine::InitNetwork() {
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH))); nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
infer_builder_config_.reset(infer_builder_->createBuilderConfig()); infer_builder_config_.reset(infer_builder_->createBuilderConfig());
infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_; infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
optim_profile_.reset(infer_builder_->createOptimizationProfile()); optim_profile_ = infer_builder_->createOptimizationProfile();
#endif #endif
} else { } else {
infer_network_.reset(infer_builder_->createNetwork()); infer_network_.reset(infer_builder_->createNetwork());
...@@ -185,7 +185,7 @@ void TensorRTEngine::FreezeNetwork() { ...@@ -185,7 +185,7 @@ void TensorRTEngine::FreezeNetwork() {
input.first.c_str(), nvinfer1::OptProfileSelector::kOPT, input.first.c_str(), nvinfer1::OptProfileSelector::kOPT,
Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true)); Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true));
} }
infer_builder_config_->addOptimizationProfile(optim_profile_.get()); infer_builder_config_->addOptimizationProfile(optim_profile_);
if (WithFp16()) { if (WithFp16()) {
infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16); infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
if (disable_trt_plugin_fp16()) { if (disable_trt_plugin_fp16()) {
......
...@@ -354,7 +354,7 @@ class TensorRTEngine { ...@@ -354,7 +354,7 @@ class TensorRTEngine {
infer_ptr<nvinfer1::INetworkDefinition> infer_networkv2_; infer_ptr<nvinfer1::INetworkDefinition> infer_networkv2_;
#if IS_TRT_VERSION_GE(6000) #if IS_TRT_VERSION_GE(6000)
infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_; infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
std::unique_ptr<nvinfer1::IOptimizationProfile> optim_profile_; nvinfer1::IOptimizationProfile* optim_profile_;
std::vector<std::unique_ptr<plugin::DynamicPluginTensorRT>> owned_pluginv2_; std::vector<std::unique_ptr<plugin::DynamicPluginTensorRT>> owned_pluginv2_;
#endif #endif
std::mutex mutex_; std::mutex mutex_;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册