diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6319d99ab81100bdb36240bdb13112dad594f705..f3fbb1d4d3442a417f743351f8b2f4dcadbb4d54 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -505,8 +505,6 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); - // use auto growth strategy here. - flags.push_back("--allocator_strategy=auto_growth"); flags.push_back("--cudnn_deterministic=True"); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags);