diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 3a156f1fa3c4cfb39d8dd3524353fd0c6a616184..8bc9775381be572a07501bff2a60a4942048362b 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -240,6 +240,12 @@ class AllocatorFacadePrivate { places.emplace_back(platform::XPUPlace(dev_id)); } #endif +#ifdef PADDLE_WITH_ASCEND_CL + int device_count = platform::GetNPUDeviceCount(); + for (int dev_id = 0; dev_id < device_count; ++dev_id) { + places.emplace_back(platform::NPUPlace(dev_id)); + } +#endif for (auto& p : places) { zero_size_allocators_[p] = std::make_shared(p); diff --git a/paddle/fluid/operators/is_empty_op_npu.cc b/paddle/fluid/operators/is_empty_op_npu.cc new file mode 100644 index 0000000000000000000000000000000000000000..9155afecd021b73d8168ad221bc64cf556255218 --- /dev/null +++ b/paddle/fluid/operators/is_empty_op_npu.cc @@ -0,0 +1,22 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the Licnse. */ + +#include "paddle/fluid/operators/is_empty_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_NPU_KERNEL( + is_empty, ops::IsEmptyOpKernel, + ops::IsEmptyOpKernel); diff --git a/paddle/fluid/platform/npu_info.cc b/paddle/fluid/platform/npu_info.cc index bb36eedb83238149cc38bdfc8e2033834140c7d1..11795f1611f7e6ba7092196d91bb09967cc7b81c 100644 --- a/paddle/fluid/platform/npu_info.cc +++ b/paddle/fluid/platform/npu_info.cc @@ -192,6 +192,10 @@ void NPUMemcpySync(void *dst, const void *src, size_t count, dst_max_count = dst_max_count ? dst_max_count : count; VLOG(4) << dst << " " << dst_max_count << " " << src << " " << count << " " << kind; + if (dst == nullptr && dst_max_count == 0) { + VLOG(4) << "Dot not call aclrtMemcpy for zero_size_allocation on NPU"; + return; + } PADDLE_ENFORCE_NPU_SUCCESS(aclrtMemcpy(dst, dst_max_count, src, count, kind)); } diff --git a/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py new file mode 100644 index 0000000000000000000000000000000000000000..09801b0f5ec3ea12a0ac2fb563969a9168373c69 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_is_empty_op_npu.py @@ -0,0 +1,101 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import sys +sys.path.append("..") +from op_test import OpTest +import paddle + +paddle.enable_static() + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestEmpty(OpTest): + def setUp(self): + self.set_npu() + self.init_dtype() + self.op_type = "is_empty" + self.set_data() + + def set_npu(self): + self.__class__.use_npu = True + self.place = paddle.NPUPlace(0) + + def init_dtype(self): + self.dtype = np.float32 + + def set_data(self): + self.inputs = {'X': np.array([1, 2, 3]).astype(self.dtype)} + self.outputs = {'Out': np.array([False])} + + def test_check_output(self): + self.check_output_with_place(self.place) + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestNotEmpty(TestEmpty): + def set_data(self): + self.inputs = {'X': np.array([])} + self.outputs = {'Out': np.array([True])} + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestIsEmptyOpError(unittest.TestCase): + def test_errors(self): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + input_data = np.random.random((3, 2)).astype("float32") + + def test_Variable(): + # the input type must be Variable + paddle.is_empty(x=input_data) + + self.assertRaises(TypeError, test_Variable) + + def test_type(): + # dtype must be float32, float16 in NPU + x3 = paddle.static.data( + name="x3", shape=[4, 32, 32], dtype="bool") + res = paddle.is_empty(x=x3) + + self.assertRaises(TypeError, test_type) + + def test_name_type(): + # name type must be string. + x4 = paddle.static.data( + name="x4", shape=[3, 2], dtype="float32") + res = paddle.is_empty(x=x4, name=1) + + self.assertRaises(TypeError, test_name_type) + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestIsEmptyOpDygraph(unittest.TestCase): + def test_dygraph(self): + paddle.disable_static(paddle.NPUPlace(0)) + input = paddle.rand(shape=[4, 32, 32], dtype='float32') + res = paddle.is_empty(x=input) + + +if __name__ == "__main__": + unittest.main()