From d0943dbed42619383933f0576ba91d3760c453fa Mon Sep 17 00:00:00 2001 From: Zhaolong Xing Date: Sun, 8 Dec 2019 23:41:12 +0800 Subject: [PATCH] CHERRY_PICK: Fix the bug for inference when using auto grwoth allocator (#21623) test=release/1.6 --- paddle/fluid/inference/api/analysis_predictor.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6319d99ab81..f3fbb1d4d34 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -505,8 +505,6 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); - // use auto growth strategy here. - flags.push_back("--allocator_strategy=auto_growth"); flags.push_back("--cudnn_deterministic=True"); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags); -- GitLab