diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index e2649a7fd334d6dab6dde44792024f251da84a4f..35ad27f4c62b5a01715156ad875e0c7e98468215 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -835,6 +835,16 @@ class AllocatorFacadePrivate { platform::MLUPlace p(i); system_allocators_[p] = std::make_shared(p); } +#endif +#ifdef PADDLE_WITH_CUSTOM_DEVICE + auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes(); + for (const auto& dev_type : device_types) { + for (size_t dev_id = 0; + dev_id < phi::DeviceManager::GetDeviceCount(dev_type); dev_id++) { + platform::CustomPlace p(dev_type, dev_id); + system_allocators_[p] = std::make_shared(p); + } + } #endif } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 843083fa0ad48e404ae0c3ffb665a4f5ca575f19..3a242fe2582a5051a050375062f57bf019ab13c3 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -2206,6 +2206,7 @@ All parameter, weight, gradient are variables in Paddle. std::exit(-1); #endif }) + .def("_type", &PlaceIndex) .def("get_device_id", [](const platform::CustomPlace &self) { return self.GetDeviceId(); }) .def("get_device_type", diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 56b743f4463ae4d8f555fb100bbb03e4d4d17f19..c6ff3a583d6a3873f6a32255daf1ba09b8fcba86 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -1386,7 +1386,8 @@ class Executor(object): def _can_use_interpreter_core(program, place): if core.is_compiled_with_npu() or core.is_compiled_with_xpu( - ) or core.is_compiled_with_mlu() or core.is_compiled_with_ipu(): + ) or core.is_compiled_with_mlu() or core.is_compiled_with_ipu( + ) or isinstance(place, core.CustomPlace): return False compiled = isinstance(program, compiler.CompiledProgram) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 738ed90b12e658ab02ab9eae86b18f699ff73b88..a2441b28bf96d00cc9acd9b9df5b328f06fb503f 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -341,6 +341,10 @@ class OpTest(unittest.TestCase): def is_mlu_op_test(): return hasattr(cls, "use_mlu") and cls.use_mlu == True + def is_custom_device_op_test(): + return hasattr( + cls, "use_custom_device") and cls.use_custom_device == True + if not hasattr(cls, "op_type"): raise AssertionError( "This test do not have op_type in class attrs, " @@ -364,7 +368,8 @@ class OpTest(unittest.TestCase): and not is_mkldnn_op_test() \ and not is_rocm_op_test() \ and not is_npu_op_test() \ - and not is_mlu_op_test(): + and not is_mlu_op_test() \ + and not is_custom_device_op_test(): raise AssertionError( "This test of %s op needs check_grad with fp64 precision." % cls.op_type)