未验证 提交 ae40370d 编写于 作者: F Feiyu Chan 提交者: GitHub

add as_complex and as_real op (#37784)

* add as_complex and as_real op
上级 01b6bdf4
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/complex_view_op.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
class AsComplexOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "as_complex");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "as_complex");
auto in_dims = ctx->GetInputDim("X");
const int input_rank = in_dims.size();
PADDLE_ENFORCE_GE(
input_rank, 1,
platform::errors::InvalidArgument(
"The rank of input(X) is less than 1. "
"Expected the rank of input(X) to be equal to or greater than 1."
"But received rank of input(X) = %d",
input_rank));
const int last_dim_size = in_dims[input_rank - 1];
PADDLE_ENFORCE_EQ(
last_dim_size, 2,
platform::errors::InvalidArgument(
"The size of the last dimension of input(X)"
"does not equals 2."
"Expected the size of last dimension of input(X) to be 2."
"But received %d",
last_dim_size));
const framework::DDim out_dims(in_dims.Get(), input_rank - 1);
ctx->SetOutputDim("Out", out_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};
class AsComplexOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor), The input tensor of view_as_complex op.");
AddOutput("Out", "(Tensor), The output tensor of view_as_complex op.");
AddComment(R"DOC(
As_complex Operator.
This operator is used to return a complex tensor represented
by an old-fashioned real tensor. The size of the last dimension of
the input tensor should be 2, which corresponds to 'real' and
'complex', respectively.
)DOC");
}
};
template <typename T>
class AsComplexGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("as_real");
retv->SetInput("X", this->OutputGrad("Out"));
retv->SetAttrMap(this->Attrs());
retv->SetOutput("Out", this->InputGrad("X"));
}
};
class AsRealOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "as_real");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "as_real");
auto out_dims_v = framework::vectorize(ctx->GetInputDim("X"));
out_dims_v.push_back(2);
const framework::DDim out_dims = framework::make_ddim(out_dims_v);
ctx->SetOutputDim("Out", out_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(framework::ToRealType(input_data_type),
ctx.GetPlace());
}
};
class AsRealOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor), The input tensor of as_real op.");
AddOutput("Out", "(Tensor), The output tensor of as_real op.");
AddComment(R"DOC(
AsReal Operator.
This operator is used to return an old-fashioned real tensor from a
complex tensor. The size of the last dimension of the output tensor is 2,
which corresponds to 'real' and 'complex', respectively.
)DOC");
}
};
template <typename T>
class AsRealGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("as_complex");
retv->SetInput("X", this->OutputGrad("Out"));
retv->SetAttrMap(this->Attrs());
retv->SetOutput("Out", this->InputGrad("X"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(as_complex, ops::AsComplexOp, ops::AsComplexOpMaker,
ops::AsComplexGradMaker<paddle::framework::OpDesc>,
ops::AsComplexGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(as_real, ops::AsRealOp, ops::AsRealOpMaker,
ops::AsRealGradMaker<paddle::framework::OpDesc>,
ops::AsRealGradMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL(
as_complex, ops::AsComplexKernel<paddle::platform::CPUDeviceContext, float>,
ops::AsComplexKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
as_real, ops::AsRealKernel<paddle::platform::CPUDeviceContext, float>,
ops::AsRealKernel<paddle::platform::CPUDeviceContext, double>);
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/complex_view_op.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/platform/enforce.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
as_complex,
ops::AsComplexKernel<paddle::platform::CUDADeviceContext, float>,
ops::AsComplexKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
as_real, ops::AsRealKernel<paddle::platform::CUDADeviceContext, float>,
ops::AsRealKernel<paddle::platform::CUDADeviceContext, double>);
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/math/complex_functors.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class AsComplexKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<framework::LoDTensor>("X");
auto* out = context.Output<framework::LoDTensor>("Out");
out->mutable_data<platform::complex<T>>(context.GetPlace());
// TensorCopy also changes output's shape & dtype
const framework::DDim out_dims_original = out->dims();
framework::TensorCopy(*x, context.GetPlace(), out);
out->Resize(out_dims_original); // restored the shape
out->mutable_data<platform::complex<T>>(
context.GetPlace()); // restore the dtype
}
};
template <typename DeviceContext, typename T>
class AsRealKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<framework::LoDTensor>("X");
auto* out = context.Output<framework::LoDTensor>("Out");
out->mutable_data<T>(context.GetPlace());
const framework::DDim out_dims_original = out->dims();
framework::TensorCopy(*x, context.GetPlace(), out);
out->Resize(out_dims_original); // restored the shape
out->mutable_data<T>(context.GetPlace()); // restore the dtype
}
};
} // namespace operators
} // namespace paddle
...@@ -156,6 +156,9 @@ from .tensor.manipulation import roll # noqa: F401 ...@@ -156,6 +156,9 @@ from .tensor.manipulation import roll # noqa: F401
from .tensor.manipulation import chunk # noqa: F401 from .tensor.manipulation import chunk # noqa: F401
from .tensor.manipulation import tolist # noqa: F401 from .tensor.manipulation import tolist # noqa: F401
from .tensor.manipulation import tensordot # noqa: F401 from .tensor.manipulation import tensordot # noqa: F401
from .tensor.manipulation import as_complex # noqa: F401
from .tensor.manipulation import as_real # noqa: F401
from .tensor.math import abs # noqa: F401 from .tensor.math import abs # noqa: F401
from .tensor.math import acos # noqa: F401 from .tensor.math import acos # noqa: F401
from .tensor.math import asin # noqa: F401 from .tensor.math import asin # noqa: F401
...@@ -559,6 +562,8 @@ __all__ = [ # noqa ...@@ -559,6 +562,8 @@ __all__ = [ # noqa
'einsum', 'einsum',
'set_flags', 'set_flags',
'get_flags', 'get_flags',
'as_complex',
'as_real',
'diff', 'diff',
'angle', 'angle',
] ]
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
from paddle.fluid import dygraph
from paddle import static
paddle.enable_static()
def ref_view_as_complex(x):
real, imag = np.take(x, 0, axis=-1), np.take(x, 1, axis=-1)
return real + 1j * imag
def ref_view_as_real(x):
return np.stack([x.real, x.imag], -1)
class TestViewAsComplexOp(OpTest):
def setUp(self):
self.op_type = "as_complex"
x = np.random.randn(10, 10, 2).astype("float64")
out_ref = ref_view_as_complex(x)
self.out_grad = np.ones(
[10, 10], dtype="float64") + 1j * np.ones(
[10, 10], dtype="float64")
self.inputs = {'X': x}
self.outputs = {'Out': out_ref}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[ref_view_as_real(self.out_grad)],
user_defined_grad_outputs=[self.out_grad])
class TestViewAsRealOp(OpTest):
def setUp(self):
self.op_type = "as_real"
real = np.random.randn(10, 10).astype("float64")
imag = np.random.randn(10, 10).astype("float64")
x = real + 1j * imag
out_ref = ref_view_as_real(x)
self.inputs = {'X': x}
self.outputs = {'Out': out_ref}
self.out_grad = np.ones([10, 10, 2], dtype="float64")
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
user_defined_grads=[ref_view_as_complex(self.out_grad)],
user_defined_grad_outputs=[self.out_grad])
class TestViewAsComplexAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(10, 10, 2)
self.out = ref_view_as_complex(self.x)
def test_dygraph(self):
with dygraph.guard():
x = paddle.to_tensor(self.x)
out_np = paddle.as_complex(x).numpy()
self.assertTrue(np.allclose(self.out, out_np))
def test_static(self):
mp, sp = static.Program(), static.Program()
with static.program_guard(mp, sp):
x = static.data("x", shape=[10, 10, 2], dtype="float64")
out = paddle.as_complex(x)
exe = static.Executor()
exe.run(sp)
[out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out])
self.assertTrue(np.allclose(self.out, out_np))
class TestViewAsRealAPI(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10)
self.out = ref_view_as_real(self.x)
def test_dygraph(self):
with dygraph.guard():
x = paddle.to_tensor(self.x)
out_np = paddle.as_real(x).numpy()
self.assertTrue(np.allclose(self.out, out_np))
def test_static(self):
mp, sp = static.Program(), static.Program()
with static.program_guard(mp, sp):
x = static.data("x", shape=[10, 10], dtype="complex128")
out = paddle.as_real(x)
exe = static.Executor()
exe.run(sp)
[out_np] = exe.run(mp, feed={"x": self.x}, fetch_list=[out])
self.assertTrue(np.allclose(self.out, out_np))
if __name__ == "__main__":
unittest.main()
...@@ -111,6 +111,9 @@ from .manipulation import unbind # noqa: F401 ...@@ -111,6 +111,9 @@ from .manipulation import unbind # noqa: F401
from .manipulation import roll # noqa: F401 from .manipulation import roll # noqa: F401
from .manipulation import chunk # noqa: F401 from .manipulation import chunk # noqa: F401
from .manipulation import tensordot # noqa: F401 from .manipulation import tensordot # noqa: F401
from .manipulation import as_complex # noqa: F401
from .manipulation import as_real # noqa: F401
from .math import abs # noqa: F401 from .math import abs # noqa: F401
from .math import acos # noqa: F401 from .math import acos # noqa: F401
from .math import asin # noqa: F401 from .math import asin # noqa: F401
...@@ -411,6 +414,8 @@ tensor_method_func = [ #noqa ...@@ -411,6 +414,8 @@ tensor_method_func = [ #noqa
'multi_dot', 'multi_dot',
'solve', 'solve',
'triangular_solve', 'triangular_solve',
'as_complex',
'as_real',
'rad2deg', 'rad2deg',
'deg2rad', 'deg2rad',
'gcd', 'gcd',
......
...@@ -34,6 +34,7 @@ from ..fluid import layers ...@@ -34,6 +34,7 @@ from ..fluid import layers
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
import paddle import paddle
from paddle import _C_ops from paddle import _C_ops
from paddle.tensor.attribute import _complex_to_real_dtype, _real_to_complex_dtype
__all__ = [] __all__ = []
...@@ -2488,3 +2489,94 @@ def tensordot(x, y, axes=2, name=None): ...@@ -2488,3 +2489,94 @@ def tensordot(x, y, axes=2, name=None):
[contraction_size, not_contraction_size_y]) [contraction_size, not_contraction_size_y])
out = x.matmul(y).reshape(shape_out) out = x.matmul(y).reshape(shape_out)
return out return out
def as_complex(x, name=None):
"""Transform a real tensor to a complex tensor.
The data type of the input tensor is 'float32' or 'float64', and the data
type of the returned tensor is 'complex64' or 'complex128', respectively.
The shape of the input tensor is ``(* ,2)``, (``*`` means arbitary shape), i.e.
the size of the last axis shoule be 2, which represent the real and imag part
of a complex number. The shape of the returned tensor is ``(*,)``.
Args:
x (Tensor): The input tensor. Data type is 'float32' or 'float64'.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output. Data type is 'complex64' or 'complex128', with the same precision as the input.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
print(y.numpy())
# [[ 0. +1.j 2. +3.j 4. +5.j]
# [ 6. +7.j 8. +9.j 10.+11.j]]
"""
if in_dygraph_mode():
return paddle._C_ops.as_complex(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'as_complex')
op_type = "as_complex"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_real_to_complex_dtype(x.dtype))
outputs = {"Out": out}
attrs = {}
helper.append_op(type=op_type, inputs=inputs, attrs=attrs, outputs=outputs)
return out
def as_real(x, name=None):
"""Transform a complex tensor to a real tensor.
The data type of the input tensor is 'complex64' or 'complex128', and the data
type of the returned tensor is 'float32' or 'float64', respectively.
When the shape of the input tensor is ``(*, )``, (``*`` means arbitary shape),
the shape of the output tensor is ``(*, 2)``, i.e. the shape of the output is
the shape of the input appended by an extra ``2``.
Args:
x (Tensor): The input tensor. Data type is 'complex64' or 'complex128'.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output. Data type is 'float32' or 'float64', with the same precision as the input.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(12, dtype=paddle.float32).reshape([2, 3, 2])
y = paddle.as_complex(x)
z = paddle.as_real(y)
print(z.numpy())
# [[[ 0. 1.]
# [ 2. 3.]
# [ 4. 5.]]
# [[ 6. 7.]
# [ 8. 9.]
# [10. 11.]]]
"""
if in_dygraph_mode():
return paddle._C_ops.as_real(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'as_real')
op_type = "as_real"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype))
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
...@@ -3082,7 +3082,6 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): ...@@ -3082,7 +3082,6 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
return out return out
def angle(x, name=None): def angle(x, name=None):
r""" r"""
Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
...@@ -3098,7 +3097,7 @@ def angle(x, name=None): ...@@ -3098,7 +3097,7 @@ def angle(x, name=None):
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
out (Tensor): y (Tensor): An N-D Tensor of real data type with the same precision as that of x's data type. Tensor: An N-D Tensor of real data type with the same precision as that of x's data type.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册