diff --git a/paddle/fluid/operators/angle_op.cc b/paddle/fluid/operators/angle_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..3cb0148681496572d26ac0902d9bacfda2694245 --- /dev/null +++ b/paddle/fluid/operators/angle_op.cc @@ -0,0 +1,125 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/angle_op.h" + +#include +#include +#include +#include +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +class AngleOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "angle"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "angle"); + + auto in_dims = ctx->GetInputDim("X"); + + ctx->SetOutputDim("Out", in_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class AngleOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of angle op."); + AddOutput("Out", "(Tensor), The output tensor of angle op."); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddAttr("use_cudnn", + "(bool, default false) Only used in cudnn kernel, need " + "install cudnn") + .SetDefault(false); + AddComment(R"DOC( +Angle Operator. + +This operator is used to perform elementwise angle for input $X$. +$$out = angle(x)$$ + +)DOC"); + } +}; + +class AngleGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@Grad", "angle_grad"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "Out@Grad", "angle_grad"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + "X@Grad", "angle_grad"); + + auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); + ctx->SetOutputDim(framework::GradVarName("X"), dout_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto dtype = OperatorWithKernel::IndicateVarDataType(ctx, "X"); + return framework::OpKernelType(dtype, ctx.GetPlace()); + } +}; + +template +class AngleGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr retv) const override { + retv->SetType("angle_grad"); + retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + retv->SetInput("X", this->Input("X")); + retv->SetAttrMap(this->Attrs()); + retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(angle, ops::AngleOp, ops::AngleOpMaker, + ops::AngleGradMaker, + ops::AngleGradMaker); + +REGISTER_OP_CPU_KERNEL( + angle, ops::AngleKernel, + ops::AngleKernel, + ops::AngleKernel>, + ops::AngleKernel>); + +REGISTER_OPERATOR(angle_grad, ops::AngleGradOp); + +REGISTER_OP_CPU_KERNEL( + angle_grad, ops::AngleGradKernel, + ops::AngleGradKernel, + ops::AngleGradKernel>, + ops::AngleGradKernel>); diff --git a/paddle/fluid/operators/angle_op.cu b/paddle/fluid/operators/angle_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..3264f426a77d1e4ce6256e0187a618cf30a18891 --- /dev/null +++ b/paddle/fluid/operators/angle_op.cu @@ -0,0 +1,31 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/angle_op.h" +#include "paddle/fluid/platform/complex.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_CUDA_KERNEL( + angle, ops::AngleKernel, + ops::AngleKernel, + ops::AngleKernel>, + ops::AngleKernel>); + +REGISTER_OP_CUDA_KERNEL( + angle_grad, ops::AngleGradKernel, + ops::AngleGradKernel, + ops::AngleGradKernel>, + ops::AngleGradKernel>); diff --git a/paddle/fluid/operators/angle_op.h b/paddle/fluid/operators/angle_op.h new file mode 100644 index 0000000000000000000000000000000000000000..093a04f03df95681d7837d5c44717c678589e679 --- /dev/null +++ b/paddle/fluid/operators/angle_op.h @@ -0,0 +1,147 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#ifndef _USE_MATH_DEFINES +#define _USE_MATH_DEFINES +#endif +#include +#include "paddle/fluid/operators/math/complex_functors.h" + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +namespace math { +template +struct AngleFunctor; + +// angel function for complex +template +struct AngleFunctor>> { + AngleFunctor(const T* input, Real* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = arg(input_[idx]); + } + + const T* input_; + Real* output_; + int64_t numel_; +}; + +// angel function for real +template +struct AngleFunctor>> { + AngleFunctor(const T* input, T* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = input_[idx] < static_cast(0) ? M_PI : 0; + } + + const T* input_; + T* output_; + int64_t numel_; +}; + +template +struct AngleGradFunctor; + +// angle grad for complex +template +struct AngleGradFunctor>> { + AngleGradFunctor(const math::Real* dout, const T* x, T* dx, int64_t numel) + : dout_(dout), x_(x), dx_(dx), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + if (x_[idx] == T(0)) { + dx_[idx] = T(0); + } else { + const math::Real r_square = + x_[idx].real * x_[idx].real + x_[idx].imag * x_[idx].imag; + dx_[idx] = T(-dout_[idx] * x_[idx].imag / r_square, + dout_[idx] * x_[idx].real / r_square); + } + } + + const math::Real* dout_; + const T* x_; + T* dx_; + int64_t numel_; +}; + +// angle grad for real +template +struct AngleGradFunctor>> { + AngleGradFunctor(const math::Real* dout, const T* x, T* dx, int64_t numel) + : dout_(dout), x_(x), dx_(dx), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { dx_[idx] = 0; } + + const math::Real* dout_; + const T* x_; + T* dx_; + int64_t numel_; +}; +} // namespace math + +using Tensor = framework::Tensor; +template +class AngleKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* x = context.Input("X"); + Tensor* out = context.Output("Out"); + + auto numel = x->numel(); + auto* x_data = x->data(); + auto* out_data = out->mutable_data>( + context.GetPlace(), size_t(x->numel() * sizeof(math::Real))); + + auto& dev_ctx = context.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + math::AngleFunctor functor(x_data, out_data, numel); + for_range(functor); + } +}; + +template +class AngleGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + const framework::Tensor* d_out = + ctx.Input(framework::GradVarName("Out")); + const framework::Tensor* x = ctx.Input("X"); + framework::Tensor* d_x = + ctx.Output(framework::GradVarName("X")); + + auto numel = d_out->numel(); + auto* dout_data = d_out->data>(); + auto* x_data = x->data(); + auto* dx_data = d_x->mutable_data( + ctx.GetPlace(), static_cast(numel * sizeof(T))); + + auto& dev_ctx = ctx.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + math::AngleGradFunctor functor(dout_data, x_data, dx_data, numel); + for_range(functor); + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/platform/complex.h b/paddle/fluid/platform/complex.h index 35de34086c57d07fd4faceb69e70c7774be0ada5..e50b74133847ccf450c9829b1e3e9d3075d5cb45 100644 --- a/paddle/fluid/platform/complex.h +++ b/paddle/fluid/platform/complex.h @@ -401,6 +401,16 @@ HOSTDEVICE inline T abs(const complex& a) { #endif } +template +HOSTDEVICE inline T arg(const complex& a) { +#if defined(PADDLE_WITH_CUDA_OR_HIP_COMPLEX) && \ + (defined(__CUDA_ARCH__) || defined(__HIPCC__)) + return thrust::arg(thrust::complex(a)); +#else + return std::arg(std::complex(a)); +#endif +} + template HOSTDEVICE inline complex pow(const complex& a, const complex& b) { #if defined(PADDLE_WITH_CUDA_OR_HIP_COMPLEX) && \ diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 73b186efed37b315c40efbeb5af84f5543b579cb..c37c331bae4a6e2c0ff0bebde569c1c123ef3121 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -226,6 +226,7 @@ from .tensor.math import lgamma # noqa: F401 from .tensor.math import rad2deg # noqa: F401 from .tensor.math import deg2rad # noqa: F401 from .tensor.math import diff # noqa: F401 +from .tensor.math import angle # noqa: F401 from .tensor.random import multinomial # noqa: F401 from .tensor.random import standard_normal # noqa: F401 @@ -537,5 +538,6 @@ __all__ = [ # noqa 'einsum', 'set_flags', 'get_flags', - 'diff' + 'diff', + 'angle', ] diff --git a/python/paddle/fluid/tests/unittests/test_angle_op.py b/python/paddle/fluid/tests/unittests/test_angle_op.py new file mode 100644 index 0000000000000000000000000000000000000000..05397c2434d8c80722f08fe05466840e8fa9f419 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_angle_op.py @@ -0,0 +1,109 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest + +import paddle +from paddle.fluid import dygraph +from paddle import static +paddle.enable_static() + + +def angle_grad(x, dout): + if np.iscomplexobj(x): + + def angle_grad_element(xi, douti): + if xi == 0: + return 0 + rsquare = np.abs(xi)**2 + return -douti * xi.imag / rsquare + 1j * douti * xi.real / rsquare + + return np.vectorize(angle_grad_element)(x, dout) + else: + return np.zeros_like(x).astype(x.dtype) + + +class TestAngleOpFloat(OpTest): + def setUp(self): + self.op_type = "angle" + self.dtype = "float64" + self.x = np.linspace(-5, 5, 101).astype(self.dtype) + out_ref = np.angle(self.x) + self.inputs = {'X': self.x} + self.outputs = {'Out': out_ref} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ + angle_grad(self.x, np.ones_like(self.x) / self.x.size) + ]) + + +class TestAngleOpComplex(OpTest): + def setUp(self): + self.op_type = "angle" + self.dtype = "complex128" + real = np.expand_dims(np.linspace(-2, 2, 11), -1).astype("float64") + imag = np.linspace(-2, 2, 11).astype("float64") + self.x = real + 1j * imag + out_ref = np.angle(self.x) + self.inputs = {'X': self.x} + self.outputs = {'Out': out_ref} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[ + angle_grad(self.x, np.ones_like(self.x) / self.x.size) + ]) + + +class TestAngleAPI(unittest.TestCase): + def setUp(self): + self.x = np.random.randn(2, 3) + 1j * np.random.randn(2, 3) + self.out = np.angle(self.x) + + def test_dygraph(self): + with dygraph.guard(): + x = paddle.to_tensor(self.x) + out_np = paddle.angle(x).numpy() + self.assertTrue(np.allclose(self.out, out_np)) + + def test_static(self): + mp, sp = static.Program(), static.Program() + with static.program_guard(mp, sp): + x = static.data("x", shape=[2, 3], dtype="complex128") + out = paddle.angle(x) + + exe = static.Executor() + exe.run(sp) + [out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out]) + self.assertTrue(np.allclose(self.out, out_np)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index d8acb946a6d9766c388ab13596f216b138da94d7..7cc2c7623a9ff6a624cdd40188ad8c2e0bdbb70e 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -192,6 +192,7 @@ from .math import diagonal # noqa: F401 from .math import rad2deg # noqa: F401 from .math import deg2rad # noqa: F401 from .math import diff # noqa: F401 +from .math import angle # noqa: F401 from .random import multinomial # noqa: F401 from .random import standard_normal # noqa: F401 @@ -404,7 +405,8 @@ tensor_method_func = [ #noqa 'multi_dot', 'solve', 'triangular_solve', - 'diff' + 'diff', + 'angle', ] #this list used in math_op_patch.py for magic_method bind diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 66c3eaece76d43edbbcb69b3c1d59fb551d3661f..36d61fa08546bf37f5f7a789f42ffb2f9e974e0e 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -24,6 +24,7 @@ from paddle.common_ops_import import templatedoc from paddle.common_ops_import import dygraph_utils from paddle.tensor import cast +from paddle.tensor.attribute import _complex_to_real_dtype import paddle from ..fluid import layers from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_ @@ -2884,3 +2885,57 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): out = layers.elementwise_sub(input_back, input_front, axis=axis) return out + + +def angle(x, name=None): + r""" + Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while + for negative real numbers, the angle is :math:`\pi`. + + Equation: + .. math:: + + angle(x)=arctan2(x.imag, x.real) + + Args: + x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 . + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + out (Tensor): y (Tensor): An N-D Tensor of real data type with the same precision as that of x's data type. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32') + y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32') + z = x + 1j * y + print(z.numpy()) + # [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j] + # [-1.-2.j -1.-1.j -1.+0.j -1.+1.j] + # [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j] + # [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]] + + theta = paddle.angle(z) + print(theta.numpy()) + # [[-2.3561945 -2.6779451 3.1415927 2.6779451] + # [-2.0344439 -2.3561945 3.1415927 2.3561945] + # [-1.5707964 -1.5707964 0. 1.5707964] + # [-1.1071488 -0.7853982 0. 0.7853982]] + """ + + if in_dygraph_mode(): + return _C_ops.angle(x) + + check_variable_and_dtype(x, 'x', + ['float32', 'float64', 'complex64', 'complex128'], 'angle') + op_type = "angle" + helper = LayerHelper(op_type, **locals()) + inputs = {"X": x} + out = helper.create_variable_for_type_inference( + dtype=_complex_to_real_dtype(x.dtype)) + outputs = {"Out": out} + helper.append_op(type=op_type, inputs=inputs, outputs=outputs) + return out