diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index b7e811e4c64d6f28e90815c0f591553cfd7ec4c2..19de09ab1523e1715c08be0b82cf2d969ee5112f 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1069,6 +1069,12 @@ std::unique_ptr CreatePaddlePredictor< process_level_allocator_enabled = true; } + // TODO(Jingzhuangzhuang): Fix trt error when allocator_strategy is + // auto_growth + if (config.tensorrt_engine_enabled()) { + gflags.push_back("--allocator_strategy=naive_best_fit"); + } + if (framework::InitGflags(gflags)) { VLOG(3) << "The following gpu analysis configurations only take effect " "for the first predictor: ";