From 879e913b6d6cc4a13a108066f45ab2b7b51b221b Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Fri, 4 Dec 2020 23:38:30 +0800 Subject: [PATCH] Make transpose, trace, kron, reshape, sum op support complex type (#29321) * add complex64 and complex128 type; add +-*/@ and slice opreator for complex types * add test cases for complex elementwise, matmul and getitem unittest * add test cases for complex types * add test cases for complex matmul unittest * kron, reshape, transpose support complex types * sum and trace op support complex types * add test case of sum and trace op * fix the bug of imag part of complex not initialized * format file * format code style * kron support type promotion; modify test cases --- paddle/fluid/operators/kron_op.cc | 32 ++++++- paddle/fluid/operators/kron_op.cu | 14 +++- .../operators/reduce_ops/reduce_sum_op.cc | 10 ++- .../operators/reduce_ops/reduce_sum_op.cu | 4 +- paddle/fluid/operators/reshape_op.cc | 80 +++++++++--------- paddle/fluid/operators/trace_op.cc | 12 ++- paddle/fluid/operators/trace_op.cu | 12 ++- paddle/fluid/operators/transpose_op.cc | 24 +++++- paddle/fluid/operators/transpose_op.cu | 22 ++++- paddle/fluid/platform/complex64.h | 1 + .../tests/unittests/test_complex_kron.py | 74 ++++++++++------ .../tests/unittests/test_complex_reshape.py | 84 ++++++++++++++----- .../tests/unittests/test_complex_sum_layer.py | 41 ++++++--- .../unittests/test_complex_trace_layer.py | 41 ++++++--- .../tests/unittests/test_complex_transpose.py | 46 +++++++--- 15 files changed, 360 insertions(+), 137 deletions(-) mode change 100755 => 100644 paddle/fluid/operators/reshape_op.cc diff --git a/paddle/fluid/operators/kron_op.cc b/paddle/fluid/operators/kron_op.cc index 6f7aeb63b1c..db25d05c6b2 100644 --- a/paddle/fluid/operators/kron_op.cc +++ b/paddle/fluid/operators/kron_op.cc @@ -18,6 +18,8 @@ limitations under the License. */ #include #include "paddle/fluid/operators/kron_op.h" +#include "paddle/fluid/platform/complex128.h" +#include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace paddle { @@ -51,8 +53,22 @@ class KronOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); + auto data_type = + OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); + return framework::OpKernelType(data_type, ctx.GetPlace()); + } + + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const framework::Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const { + if (framework::IsComplexType(expected_kernel_type.data_type_)) { + // only promote inputs’s types when contains complex input + return framework::OpKernelType(tensor.type(), tensor.place(), + tensor.layout()); + } else { + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } } }; @@ -154,7 +170,11 @@ REGISTER_OP_CPU_KERNEL( ops::KronKernel, ops::KronKernel, - ops::KronKernel); + ops::KronKernel, + ops::KronKernel, + ops::KronKernel); REGISTER_OPERATOR(kron_grad, ops::KronGradOp); REGISTER_OP_CPU_KERNEL( @@ -163,4 +183,8 @@ REGISTER_OP_CPU_KERNEL( ops::KronGradKernel, ops::KronGradKernel, - ops::KronGradKernel); + ops::KronGradKernel, + ops::KronGradKernel, + ops::KronGradKernel); diff --git a/paddle/fluid/operators/kron_op.cu b/paddle/fluid/operators/kron_op.cu index 02eeefeabbe..a348cb2e175 100644 --- a/paddle/fluid/operators/kron_op.cu +++ b/paddle/fluid/operators/kron_op.cu @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/kron_op.h" +#include "paddle/fluid/platform/complex128.h" +#include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; @@ -22,7 +24,11 @@ REGISTER_OP_CUDA_KERNEL( ops::KronKernel, ops::KronKernel, - ops::KronKernel); + ops::KronKernel, + ops::KronKernel, + ops::KronKernel); REGISTER_OP_CUDA_KERNEL( kron_grad, ops::KronGradKernel, @@ -30,4 +36,8 @@ REGISTER_OP_CUDA_KERNEL( ops::KronGradKernel, ops::KronGradKernel, - ops::KronGradKernel); + ops::KronGradKernel, + ops::KronGradKernel, + ops::KronGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc index a3850c5e264..5a8e8894e1c 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc @@ -115,6 +115,12 @@ REGISTER_OP_CPU_KERNEL( ops::SumFunctor>, ops::ReduceKernel, ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); template @@ -125,4 +131,6 @@ using CPUReduceSumGradKernel = REGISTER_OP_CPU_KERNEL(reduce_sum_grad, CPUReduceSumGradKernel, CPUReduceSumGradKernel, CPUReduceSumGradKernel, - CPUReduceSumGradKernel); + CPUReduceSumGradKernel, + CPUReduceSumGradKernel, + CPUReduceSumGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu index e64845a4f74..219cc231a1e 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu @@ -72,4 +72,6 @@ class ReduceSumKernel : public framework::OpKernel { REGISTER_OP_CUDA_KERNEL(reduce_sum, ops::ReduceSumKernel, ops::ReduceSumKernel, ops::ReduceSumKernel, - ops::ReduceSumKernel); + ops::ReduceSumKernel, + ops::ReduceSumKernel, + ops::ReduceSumKernel); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc old mode 100755 new mode 100644 index 59037ca6965..1a0a8581184 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -618,26 +618,26 @@ REGISTER_OPERATOR(reshape2_grad_grad, ops::Reshape2DoubleGradOp, ops::ReshapeDoubleGradInplaceInferer, ops::ReshapeDoubleGradOpNoNeedBufferVarInferer); -REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int8_t, ops::ReshapeKernel, - uint8_t, ops::ReshapeKernel, int, - ops::ReshapeKernel, int64_t, ops::ReshapeKernel, - bool, ops::ReshapeKernel, - paddle::platform::bfloat16, ops::ReshapeKernel); - -REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, - ops::ReshapeGradKernel, uint8_t, - ops::ReshapeGradKernel, int64_t, - ops::ReshapeGradKernel, bool, - ops::ReshapeGradKernel); -REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad_grad, float, - ops::ReshapeDoubleGradKernel, double, - ops::ReshapeDoubleGradKernel, int, - ops::ReshapeDoubleGradKernel, uint8_t, - ops::ReshapeDoubleGradKernel, int64_t, - ops::ReshapeDoubleGradKernel, bool, - ops::ReshapeDoubleGradKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR( + reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int8_t, + ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel, bool, ops::ReshapeKernel, + paddle::platform::bfloat16, ops::ReshapeKernel, paddle::platform::complex64, + ops::ReshapeKernel, paddle::platform::complex128, ops::ReshapeKernel); + +REGISTER_OP_CPU_KERNEL_FUNCTOR( + reshape2_grad, float, ops::ReshapeGradKernel, double, + ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, uint8_t, + ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, bool, + ops::ReshapeGradKernel, paddle::platform::complex64, ops::ReshapeGradKernel, + paddle::platform::complex128, ops::ReshapeGradKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR( + reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double, + ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, uint8_t, + ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel, bool, + ops::ReshapeDoubleGradKernel, paddle::platform::complex64, + ops::ReshapeDoubleGradKernel, paddle::platform::complex128, + ops::ReshapeDoubleGradKernel); #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, @@ -656,34 +656,38 @@ REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel, bool, ops::ReshapeKernel); -REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, - ops::ReshapeGradKernel, uint8_t, - ops::ReshapeGradKernel, int64_t, - ops::ReshapeGradKernel, plat::float16, - ops::ReshapeGradKernel, bool, - ops::ReshapeGradKernel); - -REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad_grad, float, - ops::ReshapeDoubleGradKernel, double, - ops::ReshapeDoubleGradKernel, int, - ops::ReshapeDoubleGradKernel, uint8_t, - ops::ReshapeDoubleGradKernel, int64_t, - ops::ReshapeDoubleGradKernel, plat::float16, - ops::ReshapeDoubleGradKernel, bool, - ops::ReshapeDoubleGradKernel); + ops::ReshapeKernel, bool, ops::ReshapeKernel, + plat::complex64, ops::ReshapeKernel, + plat::complex128, ops::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR( + reshape2_grad, float, ops::ReshapeGradKernel, double, + ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, uint8_t, + ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, + ops::ReshapeGradKernel, bool, ops::ReshapeGradKernel, plat::complex64, + ops::ReshapeGradKernel, plat::complex128, ops::ReshapeGradKernel); + +REGISTER_OP_CUDA_KERNEL_FUNCTOR( + reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double, + ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, uint8_t, + ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel, + plat::float16, ops::ReshapeDoubleGradKernel, bool, + ops::ReshapeDoubleGradKernel, plat::complex64, ops::ReshapeDoubleGradKernel, + plat::complex128, ops::ReshapeDoubleGradKernel); #endif #ifdef PADDLE_WITH_XPU REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel, bool, ops::ReshapeKernel); + ops::ReshapeKernel, bool, ops::ReshapeKernel, + plat::complex64, ops::ReshapeKernel, + plat::complex128, ops::ReshapeKernel); REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, ops::ReshapeGradKernel, bool, + ops::ReshapeGradKernel, plat::complex64, + ops::ReshapeGradKernel, plat::complex128, ops::ReshapeGradKernel); #endif diff --git a/paddle/fluid/operators/trace_op.cc b/paddle/fluid/operators/trace_op.cc index 66766b4e1cd..e90cf2054f7 100644 --- a/paddle/fluid/operators/trace_op.cc +++ b/paddle/fluid/operators/trace_op.cc @@ -163,9 +163,17 @@ REGISTER_OP_CPU_KERNEL( trace, ops::TraceKernel, ops::TraceKernel, ops::TraceKernel, - ops::TraceKernel); + ops::TraceKernel, + ops::TraceKernel, + ops::TraceKernel); REGISTER_OP_CPU_KERNEL( trace_grad, ops::TraceGradKernel, ops::TraceGradKernel, ops::TraceGradKernel, - ops::TraceGradKernel); + ops::TraceGradKernel, + ops::TraceGradKernel, + ops::TraceGradKernel); diff --git a/paddle/fluid/operators/trace_op.cu b/paddle/fluid/operators/trace_op.cu index 452f2dd9d62..ea328361ded 100644 --- a/paddle/fluid/operators/trace_op.cu +++ b/paddle/fluid/operators/trace_op.cu @@ -60,11 +60,19 @@ REGISTER_OP_CUDA_KERNEL( ops::TraceCUDAKernel, ops::TraceCUDAKernel, - ops::TraceCUDAKernel); + ops::TraceCUDAKernel, + ops::TraceCUDAKernel, + ops::TraceCUDAKernel); REGISTER_OP_CUDA_KERNEL( trace_grad, ops::TraceGradKernel, ops::TraceGradKernel, ops::TraceGradKernel, ops::TraceGradKernel, - ops::TraceGradKernel); + ops::TraceGradKernel, + ops::TraceGradKernel, + ops::TraceGradKernel); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index a098327ab29..42f4a819baa 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -321,11 +321,19 @@ REGISTER_OPERATOR(transpose_grad, ops::TransposeOpGrad); REGISTER_OP_CPU_KERNEL( transpose, ops::TransposeKernel, - ops::TransposeKernel); + ops::TransposeKernel, + ops::TransposeKernel, + ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose_grad, ops::TransposeGradKernel, - ops::TransposeGradKernel); + ops::TransposeGradKernel, + ops::TransposeGradKernel, + ops::TransposeGradKernel); REGISTER_OPERATOR(transpose2, ops::Transpose2Op, ops::Transpose2OpMaker, ops::Transpose2GradMaker, @@ -336,10 +344,18 @@ REGISTER_OP_CPU_KERNEL( transpose2, ops::TransposeKernel, ops::TransposeKernel, ops::TransposeKernel, - ops::TransposeKernel); + ops::TransposeKernel, + ops::TransposeKernel, + ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose2_grad, ops::TransposeGradKernel, ops::TransposeGradKernel, ops::TransposeGradKernel, - ops::TransposeGradKernel); + ops::TransposeGradKernel, + ops::TransposeGradKernel, + ops::TransposeGradKernel); diff --git a/paddle/fluid/operators/transpose_op.cu b/paddle/fluid/operators/transpose_op.cu index 0679668cf1b..afeb22bd6fa 100644 --- a/paddle/fluid/operators/transpose_op.cu +++ b/paddle/fluid/operators/transpose_op.cu @@ -730,14 +730,21 @@ REGISTER_OP_CUDA_KERNEL( transpose, ops::TransposeGPUKernel, ops::TransposeGPUKernel, + ops::TransposeGPUKernel, ops::TransposeGPUKernel); + paddle::platform::complex64>, + ops::TransposeGPUKernel); REGISTER_OP_CUDA_KERNEL( transpose_grad, ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel); + plat::float16>, + ops::TransposeGradGPUKernel, + ops::TransposeGradGPUKernel); REGISTER_OP_CUDA_KERNEL( transpose2, @@ -745,8 +752,11 @@ REGISTER_OP_CUDA_KERNEL( ops::TransposeGPUKernel, ops::TransposeGPUKernel, ops::TransposeGPUKernel, + ops::TransposeGPUKernel, ops::TransposeGPUKernel); + paddle::platform::complex64>, + ops::TransposeGPUKernel); REGISTER_OP_CUDA_KERNEL( transpose2_grad, ops::TransposeGradGPUKernel, @@ -754,4 +764,8 @@ REGISTER_OP_CUDA_KERNEL( ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel); + plat::float16>, + ops::TransposeGradGPUKernel, + ops::TransposeGradGPUKernel); diff --git a/paddle/fluid/platform/complex64.h b/paddle/fluid/platform/complex64.h index d4ab7f3fda4..7da11cfe5ed 100644 --- a/paddle/fluid/platform/complex64.h +++ b/paddle/fluid/platform/complex64.h @@ -124,6 +124,7 @@ struct PADDLE_ALIGN(8) complex64 { HOSTDEVICE inline complex64& operator=(int32_t val) { real = static_cast(val); + imag = 0; return *this; } diff --git a/python/paddle/fluid/tests/unittests/test_complex_kron.py b/python/paddle/fluid/tests/unittests/test_complex_kron.py index 863d61e6027..0edcb2be19a 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_kron.py +++ b/python/paddle/fluid/tests/unittests/test_complex_kron.py @@ -27,42 +27,68 @@ class ComplexKronTestCase(unittest.TestCase): def setUp(self): self.ref_result = np.kron(self.x, self.y) + self._places = [paddle.CPUPlace()] + if fluid.is_compiled_with_cuda(): + self._places.append(paddle.CUDAPlace(0)) def runTest(self): - place = fluid.CPUPlace() - self.test_identity(place) - - if fluid.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - self.test_identity(place) + for place in self._places: + self.test_complex_api(place) + self.test_basic_api(place) - def test_identity(self, place): + def test_complex_api(self, place): with dg.guard(place): x_var = dg.to_variable(self.x) y_var = dg.to_variable(self.y) out_var = paddle.complex.kron(x_var, y_var) - np.testing.assert_allclose(out_var.numpy(), self.ref_result) + self.assertTrue(np.allclose(out_var.numpy(), self.ref_result)) + + def test_basic_api(self, place): + with dg.guard(place): + x_var = paddle.Tensor( + value=self.x, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + + y_var = paddle.Tensor( + value=self.y, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + + out_var = tensor.math.kron(x_var, y_var) + self.assertTrue(np.allclose(out_var.numpy(), self.ref_result)) def load_tests(loader, standard_tests, pattern): suite = unittest.TestSuite() - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2) + 1j * np.random.randn(2, 2), - y=np.random.randn(3, 3) + 1j * np.random.randn(3, 3))) - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2), - y=np.random.randn(3, 3) + 1j * np.random.randn(3, 3))) - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2) + 1j * np.random.randn(2, 2), - y=np.random.randn(3, 3))) + for dtype in ["float32", "float64"]: + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + 1j * np.random.randn( + 2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype) + 1j * np.random.randn( + 3, 3).astype(dtype))) + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype) + 1j * np.random.randn( + 3, 3).astype(dtype))) + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + 1j * np.random.randn( + 2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype))) + + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + 1j * np.random.randn( + 2, 2).astype(dtype), + y=np.random.randn(2, 2, 3).astype(dtype))) - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2) + 1j * np.random.randn(2, 2), - y=np.random.randn(2, 2, 3))) return suite diff --git a/python/paddle/fluid/tests/unittests/test_complex_reshape.py b/python/paddle/fluid/tests/unittests/test_complex_reshape.py index 6d124d8da2b..2d041354797 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_reshape.py +++ b/python/paddle/fluid/tests/unittests/test_complex_reshape.py @@ -13,6 +13,7 @@ # limitations under the License. import paddle.fluid as fluid +import paddle from paddle import complex as cpx import paddle.fluid.dygraph as dg import numpy as np @@ -20,31 +21,72 @@ import unittest class TestComplexReshape(unittest.TestCase): + def setUp(self): + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] + if fluid.core.is_compiled_with_cuda(): + self._places.append(paddle.CUDAPlace(0)) + def test_case1(self): - x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) - shape = (2, -1) + for dtype in self._dtypes: + x_np = np.random.randn( + 2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3, + 4).astype(dtype) + shape = (2, -1) + for place in self._places: + with dg.guard(place): + x_var = dg.to_variable(x_np) + y_var = cpx.reshape(x_var, shape) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape), y_np) + + def test_case2(self): + for dtype in self._dtypes: + x_np = np.random.randn( + 2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3, + 4).astype(dtype) + shape = (0, -1) + shape_ = (2, 12) + for place in self._places: + with dg.guard(place): + x_var = dg.to_variable(x_np) + y_var = cpx.reshape(x_var, shape, inplace=True) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape_), y_np) - place = fluid.CPUPlace() - with dg.guard(place): - x_var = dg.to_variable(x_np) - y_var = cpx.reshape(x_var, shape) - y_np = y_var.numpy() + def test_case3(self): + for dtype in self._dtypes: + x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) + shape = (2, -1) + for place in self._places: + with dg.guard(place): + x_var = paddle.Tensor( + value=x_np, + place=fluid.framework._current_expected_place(), + persistable=False, + zero_copy=None, + stop_gradient=True) + y_var = fluid.layers.reshape(x_var, shape) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape), y_np) - np.testing.assert_allclose(np.reshape(x_np, shape), y_np) + def test_case4(self): + for dtype in self._dtypes: + x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) + shape = (0, -1) + shape_ = (2, 12) - def test_case2(self): - x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) - shape = (0, -1) - shape_ = (2, 12) - - place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( - ) else fluid.CPUPlace() - with dg.guard(place): - x_var = dg.to_variable(x_np) - y_var = cpx.reshape(x_var, shape, inplace=True) - y_np = y_var.numpy() - - np.testing.assert_allclose(np.reshape(x_np, shape_), y_np) + for place in self._places: + with dg.guard(place): + x_var = paddle.Tensor( + value=x_np, + place=fluid.framework._current_expected_place(), + persistable=False, + zero_copy=None, + stop_gradient=True) + y_var = fluid.layers.reshape(x_var, shape) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape_), y_np) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py index f8637b44888..f2a9049c02a 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py @@ -14,28 +14,47 @@ import unittest import numpy as np +import paddle from numpy.random import random as rand from paddle import complex as cpx +from paddle import tensor import paddle.fluid as fluid import paddle.fluid.dygraph as dg class TestComplexSumLayer(unittest.TestCase): def setUp(self): - self._dtype = "float64" - self._places = [fluid.CPUPlace()] + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places.append(paddle.CUDAPlace(0)) def test_complex_x(self): - input = rand([2, 10, 10]).astype(self._dtype) + 1j * rand( - [2, 10, 10]).astype(self._dtype) - for place in self._places: - with dg.guard(place): - var_x = dg.to_variable(input) - result = cpx.sum(var_x, dim=[1, 2]).numpy() - target = np.sum(input, axis=(1, 2)) - self.assertTrue(np.allclose(result, target)) + for dtype in self._dtypes: + input = rand([2, 10, 10]).astype(dtype) + 1j * rand( + [2, 10, 10]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = dg.to_variable(input) + result = cpx.sum(var_x, dim=[1, 2]).numpy() + target = np.sum(input, axis=(1, 2)) + self.assertTrue(np.allclose(result, target)) + + def test_complex_basic_api(self): + for dtype in self._dtypes: + input = rand([2, 10, 10]).astype(dtype) + 1j * rand( + [2, 10, 10]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.Tensor( + value=input, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + result = tensor.sum(var_x, axis=[1, 2]).numpy() + target = np.sum(input, axis=(1, 2)) + self.assertTrue(np.allclose(result, target)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py index acc1e41b246..9912b782513 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py @@ -14,28 +14,49 @@ import unittest import numpy as np +import paddle from numpy.random import random as rand from paddle import complex as cpx +from paddle import tensor import paddle.fluid as fluid import paddle.fluid.dygraph as dg class TestComplexTraceLayer(unittest.TestCase): def setUp(self): - self._dtype = "float64" + self._dtypes = ["float32", "float64"] self._places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): self._places.append(fluid.CUDAPlace(0)) - def test_complex_x(self): - input = rand([2, 20, 2, 3]).astype(self._dtype) + 1j * rand( - [2, 20, 2, 3]).astype(self._dtype) - for place in self._places: - with dg.guard(place): - var_x = dg.to_variable(input) - result = cpx.trace(var_x, offset=1, axis1=0, axis2=2).numpy() - target = np.trace(input, offset=1, axis1=0, axis2=2) - self.assertTrue(np.allclose(result, target)) + def test_complex_api(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = dg.to_variable(input) + result = cpx.trace( + var_x, offset=1, axis1=0, axis2=2).numpy() + target = np.trace(input, offset=1, axis1=0, axis2=2) + self.assertTrue(np.allclose(result, target)) + + def test_basic_api(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.Tensor( + value=input, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + result = tensor.trace( + var_x, offset=1, axis1=0, axis2=2).numpy() + target = np.trace(input, offset=1, axis1=0, axis2=2) + self.assertTrue(np.allclose(result, target)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_complex_transpose.py b/python/paddle/fluid/tests/unittests/test_complex_transpose.py index e31cb9e7051..a8fa2524d44 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_complex_transpose.py @@ -21,21 +21,41 @@ import paddle.fluid.dygraph as dg class TestComplexTransposeLayer(unittest.TestCase): def setUp(self): - self._places = [fluid.CPUPlace()] + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places.append(paddle.CUDAPlace(0)) - def test_identity(self): - data = np.random.random( - (2, 3, 4, 5)).astype("float32") + 1J * np.random.random( - (2, 3, 4, 5)).astype("float32") - perm = [3, 2, 0, 1] - np_trans = np.transpose(data, perm) - for place in self._places: - with dg.guard(place): - var = dg.to_variable(data) - trans = paddle.complex.transpose(var, perm=perm) - self.assertTrue(np.allclose(trans.numpy(), np_trans)) + def test_transpose_by_complex_api(self): + for dtype in self._dtypes: + data = np.random.random( + (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(dtype) + perm = [3, 2, 0, 1] + np_trans = np.transpose(data, perm) + for place in self._places: + with dg.guard(place): + var = dg.to_variable(data) + trans = paddle.complex.transpose(var, perm=perm) + self.assertTrue(np.allclose(trans.numpy(), np_trans)) + + def test_transpose_by_basic_api(self): + for dtype in self._dtypes: + data = np.random.random( + (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(dtype) + perm = [3, 2, 0, 1] + np_trans = np.transpose(data, perm) + for place in self._places: + with dg.guard(place): + var = paddle.Tensor( + value=data, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + trans = paddle.transpose(var, perm=perm) + self.assertTrue(np.allclose(trans.numpy(), np_trans)) if __name__ == '__main__': -- GitLab