未验证 提交 bef4afa6 编写于 作者: J Jeng Bai-Cheng 提交者: GitHub

bugfix for unique_ptr of IOptimizationProfile (#23917)

This commit fixs the compiling bug regarding unique_ptr of IOptimizationProfile.

IOptimizationProfile has protected dtor and is controlled by TensorRT
internally. Application shouldn't delete the pointer of IOptimizationProfile.
See TensorRT document: https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/c_api/classnvinfer1_1_1_i_builder.html#a9ac47e100454151d8206ac91d543299a
test=develop
上级 49e4ee27
......@@ -39,7 +39,7 @@ void TensorRTEngine::InitNetwork() {
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
infer_builder_config_.reset(infer_builder_->createBuilderConfig());
infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
optim_profile_.reset(infer_builder_->createOptimizationProfile());
optim_profile_ = infer_builder_->createOptimizationProfile();
#endif
} else {
infer_network_.reset(infer_builder_->createNetwork());
......@@ -160,7 +160,7 @@ void TensorRTEngine::FreezeNetwork() {
input.first.c_str(), nvinfer1::OptProfileSelector::kOPT,
Vec2TRT_Dims(optim_input_shape_[input.first], input.first, true));
}
infer_builder_config_->addOptimizationProfile(optim_profile_.get());
infer_builder_config_->addOptimizationProfile(optim_profile_);
if (WithFp16()) {
infer_builder_config_->setFlag(nvinfer1::BuilderFlag::kFP16);
if (disable_trt_plugin_fp16()) {
......
......@@ -340,7 +340,7 @@ class TensorRTEngine {
infer_ptr<nvinfer1::INetworkDefinition> infer_networkv2_;
#if IS_TRT_VERSION_GE(6000)
infer_ptr<nvinfer1::IBuilderConfig> infer_builder_config_;
std::unique_ptr<nvinfer1::IOptimizationProfile> optim_profile_;
nvinfer1::IOptimizationProfile* optim_profile_;
std::vector<std::unique_ptr<plugin::DynamicPluginTensorRT>> owned_pluginv2_;
#endif
std::mutex mutex_;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册