未验证 提交 9cf3aa61 编写于 作者: Z Zhang Jun 提交者: GitHub

trt memory set change from setMaxWorkspaceSize to setMemoryPoolLimit since trt 8.3+ (#47795)

上级 6c54e0e8
......@@ -150,8 +150,12 @@ void TensorRTEngine::FreezeNetwork() {
"Call InitNetwork first to initialize network."));
// build engine.
infer_builder_->setMaxBatchSize(max_batch_);
#if IS_TRT_VERSION_GE(8300)
infer_builder_config_->setMemoryPoolLimit(
nvinfer1::MemoryPoolType::kWORKSPACE, max_workspace_);
#else
infer_builder_config_->setMaxWorkspaceSize(max_workspace_);
#endif
bool enable_fp16 = (precision_ == AnalysisConfig::Precision::kHalf);
if (enable_fp16) {
bool support_fp16 = infer_builder_->platformHasFastFp16();
......
......@@ -95,7 +95,11 @@ nvinfer1::IHostMemory* CreateNetwork() {
network->markOutput(*output);
// Build the engine.
builder->setMaxBatchSize(1);
#if IS_TRT_VERSION_GE(8300)
config->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, 1 << 10);
#else
config->setMaxWorkspaceSize(1 << 10);
#endif
auto engine = builder->buildEngineWithConfig(*network, *config);
EXPECT_NE(engine, nullptr);
// Serialize the engine to create a model, then close.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册