diff --git a/paddle/phi/backends/xpu/xpu2_op_list.cc b/paddle/phi/backends/xpu/xpu2_op_list.cc index 8cc09d1a9be1363374848b00155d898b950b2680..91c42bb7005f981bd23838cf2358a1ba092339d2 100644 --- a/paddle/phi/backends/xpu/xpu2_op_list.cc +++ b/paddle/phi/backends/xpu/xpu2_op_list.cc @@ -48,6 +48,9 @@ XPUOpMap& get_kl2_ops() { phi::DataType::INT64, phi::DataType::BOOL})}, {"assign_value", XPUKernelSet({phi::DataType::FLOAT32})}, + {"atan", XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})}, + {"atan_grad", + XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})}, {"batch_norm_grad", XPUKernelSet({phi::DataType::FLOAT32})}, {"batch_norm", XPUKernelSet({phi::DataType::FLOAT32})}, {"bmm", XPUKernelSet({phi::DataType::FLOAT32})}, @@ -132,6 +135,8 @@ XPUOpMap& get_kl2_ops() { phi::DataType::INT64})}, {"deformable_conv_grad", XPUKernelSet({phi::DataType::FLOAT32})}, {"deformable_conv", XPUKernelSet({phi::DataType::FLOAT32})}, + {"deformable_conv_v1_grad", XPUKernelSet({phi::DataType::FLOAT32})}, + {"deformable_conv_v1", XPUKernelSet({phi::DataType::FLOAT32})}, {"depthwise_conv2d_grad", XPUKernelSet({phi::DataType::FLOAT32})}, {"depthwise_conv2d", XPUKernelSet({phi::DataType::FLOAT32})}, {"diag_v2", diff --git a/paddle/phi/kernels/xpu/atan_grad_kernel.cc b/paddle/phi/kernels/xpu/atan_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..7f361d727c419db69d94f354aa87c4d3cee03b51 --- /dev/null +++ b/paddle/phi/kernels/xpu/atan_grad_kernel.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/activation_grad_kernel.h" + +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { +template +void AtanGradKernel(const Context& dev_ctx, + const DenseTensor& x, + const DenseTensor& dout, + DenseTensor* dx) { + using XPUType = typename XPUTypeTrait::Type; + + const T* x_ptr = x.data(); + const T* dout_ptr = dout.data(); + T* dx_ptr = dev_ctx.template Alloc(dx); + + int r = xpu::arctan_grad(dev_ctx.x_context(), + reinterpret_cast(x_ptr), + reinterpret_cast(dout_ptr), + reinterpret_cast(dx_ptr), + x.numel()); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "arctan_grad"); +} +} // namespace phi + +PD_REGISTER_KERNEL(atan_grad, + XPU, + ALL_LAYOUT, + phi::AtanGradKernel, + float, + phi::dtype::float16) {} diff --git a/paddle/phi/kernels/xpu/atan_kernel.cc b/paddle/phi/kernels/xpu/atan_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..3252a03bc158d5e6019b1d1b231bd1ae8cc9bb95 --- /dev/null +++ b/paddle/phi/kernels/xpu/atan_kernel.cc @@ -0,0 +1,39 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/activation_kernel.h" + +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { +template +void AtanKernel(const Context& dev_ctx, + const DenseTensor& x, + DenseTensor* out) { + using XPUType = typename XPUTypeTrait::Type; + + const T* x_ptr = x.data(); + T* out_ptr = dev_ctx.template Alloc(out); + + int r = xpu::arctan(dev_ctx.x_context(), + reinterpret_cast(x_ptr), + reinterpret_cast(out_ptr), + x.numel()); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "arctan"); +} +} // namespace phi + +PD_REGISTER_KERNEL( + atan, XPU, ALL_LAYOUT, phi::AtanKernel, float, phi::dtype::float16) {} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_atan_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_atan_op_xpu.py new file mode 100644 index 0000000000000000000000000000000000000000..5cd35ffbfcf4f7e07402c20a1ccf1e6611c0c7c1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_atan_op_xpu.py @@ -0,0 +1,83 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import numpy as np + +import paddle + +paddle.enable_static() + +sys.path.append("..") + +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import ( + XPUOpTestWrapper, + create_test_class, + get_xpu_op_support_types, +) + + +class XPUTestAtanOp(XPUOpTestWrapper): + def __init__(self): + self.op_name = "atan" + self.use_dynamic_create_class = False + + class TestAtanOp(XPUOpTest): + def setUp(self): + self.set_xpu() + self.op_type = "atan" + self.eager_mode = True + + # override + self.init_input_shape() + + x = np.random.random(self.x_shape).astype(self.in_type) + y = np.arctan(x) + + self.inputs = {'X': x} + self.outputs = {'Out': y} + + def init_input_shape(self): + self.x_shape = [15, 6] + + def set_xpu(self): + self.__class__.no_need_check_grad = False + self.place = paddle.XPUPlace(0) + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad_with_place( + self.place, ['X'], 'Out', check_eager=self.eager_mode + ) + + class Test1x1(TestAtanOp): + def init_input_shape(self): + self.x_shape = [1, 1] + + class Test1(TestAtanOp): + def init_input_shape(self): + self.x_shape = [1] + + +support_types = get_xpu_op_support_types("atan") +for stype in support_types: + create_test_class(globals(), XPUTestAtanOp, stype) + +if __name__ == "__main__": + unittest.main()