未验证 提交 28b43111 编写于 作者: F Feiyu Chan 提交者: GitHub

add angle_op (#37689)

* add angle_op
上级 3ef89a47
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/angle_op.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
namespace paddle {
namespace operators {
class AngleOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "angle");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "angle");
auto in_dims = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", in_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};
class AngleOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor), The input tensor of angle op.");
AddOutput("Out", "(Tensor), The output tensor of angle op.");
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
AddAttr<bool>("use_cudnn",
"(bool, default false) Only used in cudnn kernel, need "
"install cudnn")
.SetDefault(false);
AddComment(R"DOC(
Angle Operator.
This operator is used to perform elementwise angle for input $X$.
$$out = angle(x)$$
)DOC");
}
};
class AngleGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@Grad", "angle_grad");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "Out@Grad", "angle_grad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"X@Grad", "angle_grad");
auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out"));
ctx->SetOutputDim(framework::GradVarName("X"), dout_dims);
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto dtype = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(dtype, ctx.GetPlace());
}
};
template <typename T>
class AngleGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("angle_grad");
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetInput("X", this->Input("X"));
retv->SetAttrMap(this->Attrs());
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(angle, ops::AngleOp, ops::AngleOpMaker,
ops::AngleGradMaker<paddle::framework::OpDesc>,
ops::AngleGradMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(
angle, ops::AngleKernel<paddle::platform::CPUDeviceContext, float>,
ops::AngleKernel<paddle::platform::CPUDeviceContext, double>,
ops::AngleKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex<float>>,
ops::AngleKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex<double>>);
REGISTER_OPERATOR(angle_grad, ops::AngleGradOp);
REGISTER_OP_CPU_KERNEL(
angle_grad, ops::AngleGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::AngleGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::AngleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex<float>>,
ops::AngleGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex<double>>);
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/angle_op.h"
#include "paddle/fluid/platform/complex.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
angle, ops::AngleKernel<plat::CUDADeviceContext, float>,
ops::AngleKernel<plat::CUDADeviceContext, double>,
ops::AngleKernel<plat::CUDADeviceContext, plat::complex<float>>,
ops::AngleKernel<plat::CUDADeviceContext, plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
angle_grad, ops::AngleGradKernel<plat::CUDADeviceContext, float>,
ops::AngleGradKernel<plat::CUDADeviceContext, double>,
ops::AngleGradKernel<plat::CUDADeviceContext, plat::complex<float>>,
ops::AngleGradKernel<plat::CUDADeviceContext, plat::complex<double>>);
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <cmath>
#include "paddle/fluid/operators/math/complex_functors.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, typename Enable = void>
struct AngleFunctor;
// angel function for complex
template <typename T>
struct AngleFunctor<T, Complex<T, Real<T>>> {
AngleFunctor(const T* input, Real<T>* output, int64_t numel)
: input_(input), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
output_[idx] = arg(input_[idx]);
}
const T* input_;
Real<T>* output_;
int64_t numel_;
};
// angel function for real
template <typename T>
struct AngleFunctor<T, NoComplex<T, Real<T>>> {
AngleFunctor(const T* input, T* output, int64_t numel)
: input_(input), output_(output), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
output_[idx] = input_[idx] < static_cast<T>(0) ? M_PI : 0;
}
const T* input_;
T* output_;
int64_t numel_;
};
template <typename T, typename Enable = void>
struct AngleGradFunctor;
// angle grad for complex
template <typename T>
struct AngleGradFunctor<T, Complex<T, Real<T>>> {
AngleGradFunctor(const math::Real<T>* dout, const T* x, T* dx, int64_t numel)
: dout_(dout), x_(x), dx_(dx), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const {
if (x_[idx] == T(0)) {
dx_[idx] = T(0);
} else {
const math::Real<T> r_square =
x_[idx].real * x_[idx].real + x_[idx].imag * x_[idx].imag;
dx_[idx] = T(-dout_[idx] * x_[idx].imag / r_square,
dout_[idx] * x_[idx].real / r_square);
}
}
const math::Real<T>* dout_;
const T* x_;
T* dx_;
int64_t numel_;
};
// angle grad for real
template <typename T>
struct AngleGradFunctor<T, NoComplex<T, Real<T>>> {
AngleGradFunctor(const math::Real<T>* dout, const T* x, T* dx, int64_t numel)
: dout_(dout), x_(x), dx_(dx), numel_(numel) {}
HOSTDEVICE void operator()(int64_t idx) const { dx_[idx] = 0; }
const math::Real<T>* dout_;
const T* x_;
T* dx_;
int64_t numel_;
};
} // namespace math
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class AngleKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
auto numel = x->numel();
auto* x_data = x->data<T>();
auto* out_data = out->mutable_data<math::Real<T>>(
context.GetPlace(), size_t(x->numel() * sizeof(math::Real<T>)));
auto& dev_ctx = context.template device_context<DeviceContext>();
platform::ForRange<DeviceContext> for_range(dev_ctx, numel);
math::AngleFunctor<T> functor(x_data, out_data, numel);
for_range(functor);
}
};
template <typename DeviceContext, typename T>
class AngleGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
const framework::Tensor* d_out =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
const framework::Tensor* x = ctx.Input<framework::Tensor>("X");
framework::Tensor* d_x =
ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto numel = d_out->numel();
auto* dout_data = d_out->data<math::Real<T>>();
auto* x_data = x->data<T>();
auto* dx_data = d_x->mutable_data<T>(
ctx.GetPlace(), static_cast<size_t>(numel * sizeof(T)));
auto& dev_ctx = ctx.template device_context<DeviceContext>();
platform::ForRange<DeviceContext> for_range(dev_ctx, numel);
math::AngleGradFunctor<T> functor(dout_data, x_data, dx_data, numel);
for_range(functor);
}
};
} // namespace operators
} // namespace paddle
......@@ -401,6 +401,16 @@ HOSTDEVICE inline T abs(const complex<T>& a) {
#endif
}
template <typename T>
HOSTDEVICE inline T arg(const complex<T>& a) {
#if defined(PADDLE_WITH_CUDA_OR_HIP_COMPLEX) && \
(defined(__CUDA_ARCH__) || defined(__HIPCC__))
return thrust::arg(thrust::complex<T>(a));
#else
return std::arg(std::complex<T>(a));
#endif
}
template <typename T>
HOSTDEVICE inline complex<T> pow(const complex<T>& a, const complex<T>& b) {
#if defined(PADDLE_WITH_CUDA_OR_HIP_COMPLEX) && \
......
......@@ -226,6 +226,7 @@ from .tensor.math import lgamma # noqa: F401
from .tensor.math import rad2deg # noqa: F401
from .tensor.math import deg2rad # noqa: F401
from .tensor.math import diff # noqa: F401
from .tensor.math import angle # noqa: F401
from .tensor.random import multinomial # noqa: F401
from .tensor.random import standard_normal # noqa: F401
......@@ -537,5 +538,6 @@ __all__ = [ # noqa
'einsum',
'set_flags',
'get_flags',
'diff'
'diff',
'angle',
]
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
from paddle.fluid import dygraph
from paddle import static
paddle.enable_static()
def angle_grad(x, dout):
if np.iscomplexobj(x):
def angle_grad_element(xi, douti):
if xi == 0:
return 0
rsquare = np.abs(xi)**2
return -douti * xi.imag / rsquare + 1j * douti * xi.real / rsquare
return np.vectorize(angle_grad_element)(x, dout)
else:
return np.zeros_like(x).astype(x.dtype)
class TestAngleOpFloat(OpTest):
def setUp(self):
self.op_type = "angle"
self.dtype = "float64"
self.x = np.linspace(-5, 5, 101).astype(self.dtype)
out_ref = np.angle(self.x)
self.inputs = {'X': self.x}
self.outputs = {'Out': out_ref}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[
angle_grad(self.x, np.ones_like(self.x) / self.x.size)
])
class TestAngleOpComplex(OpTest):
def setUp(self):
self.op_type = "angle"
self.dtype = "complex128"
real = np.expand_dims(np.linspace(-2, 2, 11), -1).astype("float64")
imag = np.linspace(-2, 2, 11).astype("float64")
self.x = real + 1j * imag
out_ref = np.angle(self.x)
self.inputs = {'X': self.x}
self.outputs = {'Out': out_ref}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[
angle_grad(self.x, np.ones_like(self.x) / self.x.size)
])
class TestAngleAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(2, 3) + 1j * np.random.randn(2, 3)
self.out = np.angle(self.x)
def test_dygraph(self):
with dygraph.guard():
x = paddle.to_tensor(self.x)
out_np = paddle.angle(x).numpy()
self.assertTrue(np.allclose(self.out, out_np))
def test_static(self):
mp, sp = static.Program(), static.Program()
with static.program_guard(mp, sp):
x = static.data("x", shape=[2, 3], dtype="complex128")
out = paddle.angle(x)
exe = static.Executor()
exe.run(sp)
[out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out])
self.assertTrue(np.allclose(self.out, out_np))
if __name__ == "__main__":
unittest.main()
......@@ -192,6 +192,7 @@ from .math import diagonal # noqa: F401
from .math import rad2deg # noqa: F401
from .math import deg2rad # noqa: F401
from .math import diff # noqa: F401
from .math import angle # noqa: F401
from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
......@@ -404,7 +405,8 @@ tensor_method_func = [ #noqa
'multi_dot',
'solve',
'triangular_solve',
'diff'
'diff',
'angle',
]
#this list used in math_op_patch.py for magic_method bind
......
......@@ -24,6 +24,7 @@ from paddle.common_ops_import import templatedoc
from paddle.common_ops_import import dygraph_utils
from paddle.tensor import cast
from paddle.tensor.attribute import _complex_to_real_dtype
import paddle
from ..fluid import layers
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_
......@@ -2884,3 +2885,57 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
def angle(x, name=None):
r"""
Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
for negative real numbers, the angle is :math:`\pi`.
Equation:
.. math::
angle(x)=arctan2(x.imag, x.real)
Args:
x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 .
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): y (Tensor): An N-D Tensor of real data type with the same precision as that of x's data type.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
z = x + 1j * y
print(z.numpy())
# [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j]
# [-1.-2.j -1.-1.j -1.+0.j -1.+1.j]
# [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j]
# [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]]
theta = paddle.angle(z)
print(theta.numpy())
# [[-2.3561945 -2.6779451 3.1415927 2.6779451]
# [-2.0344439 -2.3561945 3.1415927 2.3561945]
# [-1.5707964 -1.5707964 0. 1.5707964]
# [-1.1071488 -0.7853982 0. 0.7853982]]
"""
if in_dygraph_mode():
return _C_ops.angle(x)
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'complex64', 'complex128'], 'angle')
op_type = "angle"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype))
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册