From 4df02fdfce40a1daee7a900938d0d36017fce926 Mon Sep 17 00:00:00 2001 From: Aganlengzi Date: Wed, 27 Apr 2022 14:32:10 +0800 Subject: [PATCH] [CustomDevice] op_test supports custom device (#42227) * [DO NOT MERGE] test op_test * update with more related modifications * split op_test.py to use test=allcases for testing * split op_test.py to use test=allcases for testing --- paddle/fluid/memory/allocation/allocator_facade.cc | 10 ++++++++++ paddle/fluid/pybind/pybind.cc | 1 + python/paddle/fluid/executor.py | 3 ++- python/paddle/fluid/tests/unittests/op_test.py | 7 ++++++- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index e2649a7fd3..35ad27f4c6 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -835,6 +835,16 @@ class AllocatorFacadePrivate { platform::MLUPlace p(i); system_allocators_[p] = std::make_shared(p); } +#endif +#ifdef PADDLE_WITH_CUSTOM_DEVICE + auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes(); + for (const auto& dev_type : device_types) { + for (size_t dev_id = 0; + dev_id < phi::DeviceManager::GetDeviceCount(dev_type); dev_id++) { + platform::CustomPlace p(dev_type, dev_id); + system_allocators_[p] = std::make_shared(p); + } + } #endif } diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 843083fa0a..3a242fe258 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -2206,6 +2206,7 @@ All parameter, weight, gradient are variables in Paddle. std::exit(-1); #endif }) + .def("_type", &PlaceIndex) .def("get_device_id", [](const platform::CustomPlace &self) { return self.GetDeviceId(); }) .def("get_device_type", diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 56b743f446..c6ff3a583d 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -1386,7 +1386,8 @@ class Executor(object): def _can_use_interpreter_core(program, place): if core.is_compiled_with_npu() or core.is_compiled_with_xpu( - ) or core.is_compiled_with_mlu() or core.is_compiled_with_ipu(): + ) or core.is_compiled_with_mlu() or core.is_compiled_with_ipu( + ) or isinstance(place, core.CustomPlace): return False compiled = isinstance(program, compiler.CompiledProgram) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 738ed90b12..a2441b28bf 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -341,6 +341,10 @@ class OpTest(unittest.TestCase): def is_mlu_op_test(): return hasattr(cls, "use_mlu") and cls.use_mlu == True + def is_custom_device_op_test(): + return hasattr( + cls, "use_custom_device") and cls.use_custom_device == True + if not hasattr(cls, "op_type"): raise AssertionError( "This test do not have op_type in class attrs, " @@ -364,7 +368,8 @@ class OpTest(unittest.TestCase): and not is_mkldnn_op_test() \ and not is_rocm_op_test() \ and not is_npu_op_test() \ - and not is_mlu_op_test(): + and not is_mlu_op_test() \ + and not is_custom_device_op_test(): raise AssertionError( "This test of %s op needs check_grad with fp64 precision." % cls.op_type) -- GitLab