diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index b23d0841c9a5f96348edaff86db88be6e35ffe81..6bb4e8e925a4fdd657552413e2c945bd8cab8c00 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -506,8 +506,6 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); - // use auto growth strategy here. - flags.push_back("--allocator_strategy=auto_growth"); flags.push_back("--cudnn_deterministic=True"); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags);