diff --git a/paddle/fluid/operators/array_operator.h b/paddle/fluid/operators/array_operator.h index bcf4c53947b780ba85e7dab2003345f8f319db9a..19b90d360201e928287d7ac2e2257f305f65daef 100644 --- a/paddle/fluid/operators/array_operator.h +++ b/paddle/fluid/operators/array_operator.h @@ -52,7 +52,8 @@ class ArrayOp : public framework::OperatorBase { size_t offset; if (platform::is_gpu_place(i_tensor.place()) || platform::is_xpu_place(i_tensor.place()) || - platform::is_npu_place(i_tensor.place())) { + platform::is_npu_place(i_tensor.place()) || + platform::is_custom_place(i_tensor.place())) { // FIXME: Avoid copy from GPU to CPU framework::Tensor t; framework::TensorCopy(i_tensor, platform::CPUPlace(), dev_ctx, &t); diff --git a/paddle/fluid/operators/reduce_ops/reduce_op.h b/paddle/fluid/operators/reduce_ops/reduce_op.h index 2c275dcfc0a12efd5beae4cbb17e8ce261e3b6d4..ec3cf1908c5b5d430f510a9068d3ea7a979d680c 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_op.h +++ b/paddle/fluid/operators/reduce_ops/reduce_op.h @@ -566,7 +566,8 @@ class ReduceOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()) || platform::is_npu_place(ctx.GetPlace()) || - platform::is_mlu_place(ctx.GetPlace()), + platform::is_mlu_place(ctx.GetPlace()) || + platform::is_custom_place(ctx.GetPlace()), true, platform::errors::InvalidArgument( "float16 can only be used on GPU or NPU or MLU place")); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a49e11aba685153f57c2e33ff7ef45fe84b5937b..fef27358498e0922cf22ecb87ff51326518d1013 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -2801,6 +2801,7 @@ All parameter, weight, gradient are variables in Paddle. .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) + .def("_equals", &IsSamePlace) .def("is_gpu_place", [](platform::Place &self) { return platform::is_gpu_place(self); }) .def("is_cpu_place", diff --git a/python/paddle/device/__init__.py b/python/paddle/device/__init__.py index 929a1c2d77fb4c3fae3a16a685bd7e7e1766e79a..4fcf9c5d21b263639fbd72173c0c1b3d4c57819e 100644 --- a/python/paddle/device/__init__.py +++ b/python/paddle/device/__init__.py @@ -349,6 +349,10 @@ def get_device(): elif isinstance(place, core.MLUPlace): device_id = place.get_device_id() device = 'mlu:' + str(device_id) + elif isinstance(place, core.CustomPlace): + device_id = place.get_device_id() + device_type = place.get_device_type() + device = device_type + ':' + str(device_id) else: raise ValueError("The device specification {} is invalid".format(place)) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index ba694f535308391cc14b36ae6faf89b2fdd126bf..860a72193e2e42e007dd3cebec0e264325f94197 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -1685,7 +1685,8 @@ class OpTest(unittest.TestCase): # Currently not support ParallelExecutor on XPUPlace. if not paddle.is_compiled_with_xpu( ) and not paddle.is_compiled_with_npu( - ) and not paddle.is_compiled_with_mlu(): + ) and not paddle.is_compiled_with_mlu() and not isinstance( + place, core.CustomPlace): self.check_inplace_output_with_place(place, no_check_set=no_check_set, inplace_atol=inplace_atol)