From 84c8096cc55e5d8ccc8e0600a9621e143bd9f2b9 Mon Sep 17 00:00:00 2001 From: JingZhuangzhuang <75348594+JZZ-NOTE@users.noreply.github.com> Date: Tue, 19 Apr 2022 11:30:00 +0800 Subject: [PATCH] fix infer gpu strage (#41924) --- paddle/fluid/inference/api/analysis_predictor.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 7badcb395e..7ec3271c66 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1077,6 +1077,12 @@ std::unique_ptr CreatePaddlePredictor< process_level_allocator_enabled = true; } + // TODO(Jingzhuangzhuang): Fix trt error when allocator_strategy is + // auto_growth + if (config.tensorrt_engine_enabled()) { + gflags.push_back("--allocator_strategy=naive_best_fit"); + } + if (framework::InitGflags(gflags)) { VLOG(3) << "The following gpu analysis configurations only take effect " "for the first predictor: "; -- GitLab