From 31ea3231a8a00c3e5ef08c46a87ad32b27e4c286 Mon Sep 17 00:00:00 2001 From: QingshuChen Date: Mon, 9 Jan 2023 15:56:52 +0800 Subject: [PATCH] add fill/fill_any for kunlun (#49645) --- paddle/phi/backends/xpu/xpu2_op_list.cc | 12 ++ paddle/phi/kernels/xpu/fill_kernel.cc | 29 ++++ .../unittests/xpu/test_fill_any_op_xpu.py | 125 ++++++++++++++++++ .../tests/unittests/xpu/test_fill_op_xpu.py | 111 ++++++++++++++++ 4 files changed, 277 insertions(+) create mode 100644 paddle/phi/kernels/xpu/fill_kernel.cc create mode 100644 python/paddle/fluid/tests/unittests/xpu/test_fill_any_op_xpu.py create mode 100644 python/paddle/fluid/tests/unittests/xpu/test_fill_op_xpu.py diff --git a/paddle/phi/backends/xpu/xpu2_op_list.cc b/paddle/phi/backends/xpu/xpu2_op_list.cc index b98ef9dc67..8b0d0851ef 100644 --- a/paddle/phi/backends/xpu/xpu2_op_list.cc +++ b/paddle/phi/backends/xpu/xpu2_op_list.cc @@ -214,6 +214,18 @@ XPUOpMap& get_kl2_ops() { phi::DataType::BOOL, phi::DataType::FLOAT16, phi::DataType::FLOAT32})}, + {"fill", + XPUKernelSet({phi::DataType::INT64, + phi::DataType::INT32, + phi::DataType::FLOAT16, + phi::DataType::FLOAT64, + phi::DataType::FLOAT32})}, + {"fill_any", + XPUKernelSet({phi::DataType::INT64, + phi::DataType::INT32, + phi::DataType::FLOAT16, + phi::DataType::FLOAT64, + phi::DataType::FLOAT32})}, {"fill_any_like", XPUKernelSet({phi::DataType::INT64, phi::DataType::INT32, diff --git a/paddle/phi/kernels/xpu/fill_kernel.cc b/paddle/phi/kernels/xpu/fill_kernel.cc new file mode 100644 index 0000000000..7e3005a9e2 --- /dev/null +++ b/paddle/phi/kernels/xpu/fill_kernel.cc @@ -0,0 +1,29 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/impl/fill_kernel_impl.h" + +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" + +PD_REGISTER_KERNEL(fill, + XPU, + ALL_LAYOUT, + phi::FillKernel, + float, + double, + int64_t, + int, + phi::dtype::float16, + bool) {} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fill_any_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fill_any_op_xpu.py new file mode 100644 index 0000000000..95d514d94c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_fill_any_op_xpu.py @@ -0,0 +1,125 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append("..") +import unittest + +import numpy as np +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import ( + XPUOpTestWrapper, + create_test_class, + get_xpu_op_support_types, +) + +import paddle + +paddle.enable_static() + + +class XPUTestFillAnyOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'fill_any' + self.use_dynamic_create_class = False + + class TestFillAnyOp(XPUOpTest): + def setUp(self): + self.op_type = "fill_any" + self.dtype = 'float64' + self.value = 0.0 + self.init() + self.inputs = {'X': np.random.random((20, 30)).astype(self.dtype)} + self.attrs = { + 'value_float': float(self.value), + 'value_int': int(self.value), + } + self.outputs = { + 'Out': self.value + * np.ones_like(self.inputs["X"]).astype(self.dtype) + } + + def init(self): + pass + + def test_check_output(self): + self.check_output_with_place(paddle.XPUPlace(0)) + + def test_check_grad(self): + self.check_grad_with_place(paddle.XPUPlace(0), ['X'], 'Out') + + class TestFillAnyOpFloat32(TestFillAnyOp): + def init(self): + self.dtype = np.float32 + self.value = 0.0 + + class TestFillAnyOpFloat16(TestFillAnyOp): + def init(self): + self.dtype = np.float16 + + class TestFillAnyOpvalue1(TestFillAnyOp): + def init(self): + self.dtype = np.float32 + self.value = 111111555 + + class TestFillAnyOpvalue2(TestFillAnyOp): + def init(self): + self.dtype = np.float32 + self.value = 11111.1111 + + class TestFillAnyInplace(unittest.TestCase): + def test_fill_any_version(self): + with paddle.fluid.dygraph.guard(): + var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) + self.assertEqual(var.inplace_version, 0) + + var.fill_(0) + self.assertEqual(var.inplace_version, 1) + + var.fill_(0) + self.assertEqual(var.inplace_version, 2) + + var.fill_(0) + self.assertEqual(var.inplace_version, 3) + + def test_fill_any_eqaul(self): + with paddle.fluid.dygraph.guard(): + tensor = paddle.to_tensor( + np.random.random((20, 30)).astype(np.float32) + ) + target = tensor.numpy() + target[...] = 1 + + tensor.fill_(1) + self.assertEqual((tensor.numpy() == target).all().item(), True) + + def test_backward(self): + with paddle.fluid.dygraph.guard(): + x = paddle.full([10, 10], -1.0, dtype='float32') + x.stop_gradient = False + y = 2 * x + y.fill_(1) + y.backward() + np.testing.assert_array_equal( + x.grad.numpy(), np.zeros([10, 10]) + ) + + +support_types = get_xpu_op_support_types('fill_any') +for stype in support_types: + create_test_class(globals(), XPUTestFillAnyOp, stype) + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fill_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fill_op_xpu.py new file mode 100644 index 0000000000..868849c115 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_fill_op_xpu.py @@ -0,0 +1,111 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +sys.path.append("..") +import unittest + +import numpy as np +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import ( + XPUOpTestWrapper, + create_test_class, + get_xpu_op_support_types, +) + +import paddle +import paddle.fluid.core as core +from paddle.fluid.op import Operator + +paddle.enable_static() + + +class XPUTestFillOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'fill' + self.use_dynamic_create_class = False + + class TestFillOp1(XPUOpTest): + def setUp(self): + self.op_type = "fill" + val = np.random.random(size=[100, 200]) + self.inputs = {} + self.attrs = { + 'value': val.flatten().tolist(), + 'shape': [100, 200], + 'dtype': int(core.VarDesc.VarType.FP64), + 'force_cpu': False, + } + self.outputs = {'Out': val.astype('float64')} + + def test_check_output(self): + self.check_output_with_place(paddle.XPUPlace(0)) + + class TestFillOp2(XPUOpTest): + def setUp(self): + self.op_type = "fill" + val = np.random.random(size=[100, 200]) + self.inputs = {} + self.attrs = { + 'value': val.flatten().tolist(), + 'shape': [100, 200], + 'dtype': int(core.VarDesc.VarType.FP64), + 'force_cpu': True, + } + self.outputs = {'Out': val.astype('float64')} + + def test_check_output(self): + self.check_output() + + class TestFillOp3(unittest.TestCase): + def check_with_place(self, place, f_cpu): + scope = core.Scope() + # create Out Variable + out = scope.var('Out').get_tensor() + + # create and run fill_op operator + val = np.random.random(size=[300, 200]) + fill_op = Operator( + "fill", + value=val.flatten(), + shape=[300, 200], + dtype=int(core.VarDesc.VarType.FP32), + force_cpu=f_cpu, + Out='Out', + ) + fill_op.run(scope, place) + + # get result from Out + result_array = np.array(out) + full_array = np.array(val, 'float32') + + np.testing.assert_array_equal(result_array, full_array) + + def test_fill_op(self): + places = [core.CPUPlace()] + if core.is_compiled_with_xpu(): + places.append(core.XPUPlace(0)) + + for place in places: + self.check_with_place(place, True) + self.check_with_place(place, False) + + +support_types = get_xpu_op_support_types('fill') +for stype in support_types: + create_test_class(globals(), XPUTestFillOp, stype) + +if __name__ == '__main__': + unittest.main() -- GitLab