diff --git a/paddle/fluid/platform/npu_info.cc b/paddle/fluid/platform/npu_info.cc index 91099d2db2a3005800e42ad10c26a85f610aad0d..6920436399312e18cd9634327e080a4d5605038c 100644 --- a/paddle/fluid/platform/npu_info.cc +++ b/paddle/fluid/platform/npu_info.cc @@ -77,7 +77,7 @@ int GetCurrentNPUDeviceId() { std::vector GetSelectedNPUDevices() { // use user specified NPUs in single-node multi-process mode. std::vector devices; - if (!FLAGS_selected_gpus.empty()) { + if (!FLAGS_selected_npus.empty()) { auto devices_str = paddle::string::Split(FLAGS_selected_npus, ','); for (auto id : devices_str) { devices.push_back(atoi(id.c_str())); diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index f5154f0a0cfc0508a18c638a56d0cf08a09d5d79..a886f7a0298373d170f72e2cee9c973d2f931941 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -64,7 +64,7 @@ int main(int argc, char** argv) { undefok.push_back("initial_cpu_memory_in_mb"); #endif -#if defined(PADDLE_WITH_CUDA) +#if defined(PADDLE_WITH_ASCEND_CL) envs.push_back("selected_npus"); #endif