diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 7dcaf12a7c9ff063abb7b87fdb1e71f305024d74..cccaa095404481a5ad1afc4710c338da365550bb 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -123,7 +123,7 @@ bool AnalysisPredictor::PrepareScope( status_is_cloned_ = true; } else { if (config_.use_gpu_) { - paddle::framework::InitDevices(false, {config_.device_id_}); + paddle::framework::InitDevices(false); } else { paddle::framework::InitDevices(false, {}); } @@ -501,8 +501,6 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); - flags.push_back("--selected_gpus=" + - std::to_string(config.gpu_device_id())); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags); }