diff --git a/paddle/fluid/operators/kron_op.cc b/paddle/fluid/operators/kron_op.cc index 6f7aeb63b1ced096954d64c2882dabfca808acd6..db25d05c6b24346db3ac5c14a0fd0eacaf913c28 100644 --- a/paddle/fluid/operators/kron_op.cc +++ b/paddle/fluid/operators/kron_op.cc @@ -18,6 +18,8 @@ limitations under the License. */ #include #include "paddle/fluid/operators/kron_op.h" +#include "paddle/fluid/platform/complex128.h" +#include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace paddle { @@ -51,8 +53,22 @@ class KronOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType( - OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); + auto data_type = + OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y"); + return framework::OpKernelType(data_type, ctx.GetPlace()); + } + + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const framework::Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const { + if (framework::IsComplexType(expected_kernel_type.data_type_)) { + // only promote inputs’s types when contains complex input + return framework::OpKernelType(tensor.type(), tensor.place(), + tensor.layout()); + } else { + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); + } } }; @@ -154,7 +170,11 @@ REGISTER_OP_CPU_KERNEL( ops::KronKernel, ops::KronKernel, - ops::KronKernel); + ops::KronKernel, + ops::KronKernel, + ops::KronKernel); REGISTER_OPERATOR(kron_grad, ops::KronGradOp); REGISTER_OP_CPU_KERNEL( @@ -163,4 +183,8 @@ REGISTER_OP_CPU_KERNEL( ops::KronGradKernel, ops::KronGradKernel, - ops::KronGradKernel); + ops::KronGradKernel, + ops::KronGradKernel, + ops::KronGradKernel); diff --git a/paddle/fluid/operators/kron_op.cu b/paddle/fluid/operators/kron_op.cu index 02eeefeabbeb9b1822d95a5e14793a72d3d997f4..a348cb2e1759e8ad8c2f70c7c25478c94e35e786 100644 --- a/paddle/fluid/operators/kron_op.cu +++ b/paddle/fluid/operators/kron_op.cu @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/kron_op.h" +#include "paddle/fluid/platform/complex128.h" +#include "paddle/fluid/platform/complex64.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; @@ -22,7 +24,11 @@ REGISTER_OP_CUDA_KERNEL( ops::KronKernel, ops::KronKernel, - ops::KronKernel); + ops::KronKernel, + ops::KronKernel, + ops::KronKernel); REGISTER_OP_CUDA_KERNEL( kron_grad, ops::KronGradKernel, @@ -30,4 +36,8 @@ REGISTER_OP_CUDA_KERNEL( ops::KronGradKernel, ops::KronGradKernel, - ops::KronGradKernel); + ops::KronGradKernel, + ops::KronGradKernel, + ops::KronGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc index a3850c5e264548c8b886c9c1c1e12500c1436ffe..5a8e8894e1c5da8e0d34f15f2e402b7ecbbea364 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc @@ -115,6 +115,12 @@ REGISTER_OP_CPU_KERNEL( ops::SumFunctor>, ops::ReduceKernel, ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); template @@ -125,4 +131,6 @@ using CPUReduceSumGradKernel = REGISTER_OP_CPU_KERNEL(reduce_sum_grad, CPUReduceSumGradKernel, CPUReduceSumGradKernel, CPUReduceSumGradKernel, - CPUReduceSumGradKernel); + CPUReduceSumGradKernel, + CPUReduceSumGradKernel, + CPUReduceSumGradKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu index e64845a4f74e34e9e3835ed111798b9a89ea2bc7..219cc231a1ea7a0786026d6dcc6d63ce78e24025 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu @@ -72,4 +72,6 @@ class ReduceSumKernel : public framework::OpKernel { REGISTER_OP_CUDA_KERNEL(reduce_sum, ops::ReduceSumKernel, ops::ReduceSumKernel, ops::ReduceSumKernel, - ops::ReduceSumKernel); + ops::ReduceSumKernel, + ops::ReduceSumKernel, + ops::ReduceSumKernel); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc old mode 100755 new mode 100644 index 59037ca6965a0e5e5826ecdaff76dac6c42f7021..1a0a8581184909e976c66cf6fc021f79576982c7 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -618,26 +618,26 @@ REGISTER_OPERATOR(reshape2_grad_grad, ops::Reshape2DoubleGradOp, ops::ReshapeDoubleGradInplaceInferer, ops::ReshapeDoubleGradOpNoNeedBufferVarInferer); -REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int8_t, ops::ReshapeKernel, - uint8_t, ops::ReshapeKernel, int, - ops::ReshapeKernel, int64_t, ops::ReshapeKernel, - bool, ops::ReshapeKernel, - paddle::platform::bfloat16, ops::ReshapeKernel); - -REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, - ops::ReshapeGradKernel, uint8_t, - ops::ReshapeGradKernel, int64_t, - ops::ReshapeGradKernel, bool, - ops::ReshapeGradKernel); -REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad_grad, float, - ops::ReshapeDoubleGradKernel, double, - ops::ReshapeDoubleGradKernel, int, - ops::ReshapeDoubleGradKernel, uint8_t, - ops::ReshapeDoubleGradKernel, int64_t, - ops::ReshapeDoubleGradKernel, bool, - ops::ReshapeDoubleGradKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR( + reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int8_t, + ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel, bool, ops::ReshapeKernel, + paddle::platform::bfloat16, ops::ReshapeKernel, paddle::platform::complex64, + ops::ReshapeKernel, paddle::platform::complex128, ops::ReshapeKernel); + +REGISTER_OP_CPU_KERNEL_FUNCTOR( + reshape2_grad, float, ops::ReshapeGradKernel, double, + ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, uint8_t, + ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, bool, + ops::ReshapeGradKernel, paddle::platform::complex64, ops::ReshapeGradKernel, + paddle::platform::complex128, ops::ReshapeGradKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR( + reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double, + ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, uint8_t, + ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel, bool, + ops::ReshapeDoubleGradKernel, paddle::platform::complex64, + ops::ReshapeDoubleGradKernel, paddle::platform::complex128, + ops::ReshapeDoubleGradKernel); #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, @@ -656,34 +656,38 @@ REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel, bool, ops::ReshapeKernel); -REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, - ops::ReshapeGradKernel, uint8_t, - ops::ReshapeGradKernel, int64_t, - ops::ReshapeGradKernel, plat::float16, - ops::ReshapeGradKernel, bool, - ops::ReshapeGradKernel); - -REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad_grad, float, - ops::ReshapeDoubleGradKernel, double, - ops::ReshapeDoubleGradKernel, int, - ops::ReshapeDoubleGradKernel, uint8_t, - ops::ReshapeDoubleGradKernel, int64_t, - ops::ReshapeDoubleGradKernel, plat::float16, - ops::ReshapeDoubleGradKernel, bool, - ops::ReshapeDoubleGradKernel); + ops::ReshapeKernel, bool, ops::ReshapeKernel, + plat::complex64, ops::ReshapeKernel, + plat::complex128, ops::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR( + reshape2_grad, float, ops::ReshapeGradKernel, double, + ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, uint8_t, + ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, + ops::ReshapeGradKernel, bool, ops::ReshapeGradKernel, plat::complex64, + ops::ReshapeGradKernel, plat::complex128, ops::ReshapeGradKernel); + +REGISTER_OP_CUDA_KERNEL_FUNCTOR( + reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double, + ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, uint8_t, + ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel, + plat::float16, ops::ReshapeDoubleGradKernel, bool, + ops::ReshapeDoubleGradKernel, plat::complex64, ops::ReshapeDoubleGradKernel, + plat::complex128, ops::ReshapeDoubleGradKernel); #endif #ifdef PADDLE_WITH_XPU REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel, bool, ops::ReshapeKernel); + ops::ReshapeKernel, bool, ops::ReshapeKernel, + plat::complex64, ops::ReshapeKernel, + plat::complex128, ops::ReshapeKernel); REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, ops::ReshapeGradKernel, bool, + ops::ReshapeGradKernel, plat::complex64, + ops::ReshapeGradKernel, plat::complex128, ops::ReshapeGradKernel); #endif diff --git a/paddle/fluid/operators/trace_op.cc b/paddle/fluid/operators/trace_op.cc index 66766b4e1cd830f8dda40befa228294b976a4ff7..e90cf2054f72d8bb59c8fa13a3c3f6502ae14ba2 100644 --- a/paddle/fluid/operators/trace_op.cc +++ b/paddle/fluid/operators/trace_op.cc @@ -163,9 +163,17 @@ REGISTER_OP_CPU_KERNEL( trace, ops::TraceKernel, ops::TraceKernel, ops::TraceKernel, - ops::TraceKernel); + ops::TraceKernel, + ops::TraceKernel, + ops::TraceKernel); REGISTER_OP_CPU_KERNEL( trace_grad, ops::TraceGradKernel, ops::TraceGradKernel, ops::TraceGradKernel, - ops::TraceGradKernel); + ops::TraceGradKernel, + ops::TraceGradKernel, + ops::TraceGradKernel); diff --git a/paddle/fluid/operators/trace_op.cu b/paddle/fluid/operators/trace_op.cu index 452f2dd9d62bedb449979a11698e4eb0bb116ce9..ea328361ded75ade9228fffe4dee0b4c6f0fc3e6 100644 --- a/paddle/fluid/operators/trace_op.cu +++ b/paddle/fluid/operators/trace_op.cu @@ -60,11 +60,19 @@ REGISTER_OP_CUDA_KERNEL( ops::TraceCUDAKernel, ops::TraceCUDAKernel, - ops::TraceCUDAKernel); + ops::TraceCUDAKernel, + ops::TraceCUDAKernel, + ops::TraceCUDAKernel); REGISTER_OP_CUDA_KERNEL( trace_grad, ops::TraceGradKernel, ops::TraceGradKernel, ops::TraceGradKernel, ops::TraceGradKernel, - ops::TraceGradKernel); + ops::TraceGradKernel, + ops::TraceGradKernel, + ops::TraceGradKernel); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index a098327ab29af57d619dabe1c814eca6b97ee2cc..42f4a819baa225eaa36ea93fc2cb263a1b1a9889 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -321,11 +321,19 @@ REGISTER_OPERATOR(transpose_grad, ops::TransposeOpGrad); REGISTER_OP_CPU_KERNEL( transpose, ops::TransposeKernel, - ops::TransposeKernel); + ops::TransposeKernel, + ops::TransposeKernel, + ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose_grad, ops::TransposeGradKernel, - ops::TransposeGradKernel); + ops::TransposeGradKernel, + ops::TransposeGradKernel, + ops::TransposeGradKernel); REGISTER_OPERATOR(transpose2, ops::Transpose2Op, ops::Transpose2OpMaker, ops::Transpose2GradMaker, @@ -336,10 +344,18 @@ REGISTER_OP_CPU_KERNEL( transpose2, ops::TransposeKernel, ops::TransposeKernel, ops::TransposeKernel, - ops::TransposeKernel); + ops::TransposeKernel, + ops::TransposeKernel, + ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose2_grad, ops::TransposeGradKernel, ops::TransposeGradKernel, ops::TransposeGradKernel, - ops::TransposeGradKernel); + ops::TransposeGradKernel, + ops::TransposeGradKernel, + ops::TransposeGradKernel); diff --git a/paddle/fluid/operators/transpose_op.cu b/paddle/fluid/operators/transpose_op.cu index 0679668cf1b5aad6e0a439e33159669f9b50c49b..afeb22bd6fa2d4e1c4d222b01d65bff8bf05a74b 100644 --- a/paddle/fluid/operators/transpose_op.cu +++ b/paddle/fluid/operators/transpose_op.cu @@ -730,14 +730,21 @@ REGISTER_OP_CUDA_KERNEL( transpose, ops::TransposeGPUKernel, ops::TransposeGPUKernel, + ops::TransposeGPUKernel, ops::TransposeGPUKernel); + paddle::platform::complex64>, + ops::TransposeGPUKernel); REGISTER_OP_CUDA_KERNEL( transpose_grad, ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel); + plat::float16>, + ops::TransposeGradGPUKernel, + ops::TransposeGradGPUKernel); REGISTER_OP_CUDA_KERNEL( transpose2, @@ -745,8 +752,11 @@ REGISTER_OP_CUDA_KERNEL( ops::TransposeGPUKernel, ops::TransposeGPUKernel, ops::TransposeGPUKernel, + ops::TransposeGPUKernel, ops::TransposeGPUKernel); + paddle::platform::complex64>, + ops::TransposeGPUKernel); REGISTER_OP_CUDA_KERNEL( transpose2_grad, ops::TransposeGradGPUKernel, @@ -754,4 +764,8 @@ REGISTER_OP_CUDA_KERNEL( ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel, ops::TransposeGradGPUKernel); + plat::float16>, + ops::TransposeGradGPUKernel, + ops::TransposeGradGPUKernel); diff --git a/paddle/fluid/platform/complex64.h b/paddle/fluid/platform/complex64.h index d4ab7f3fda4c4c309621b0b6955a556e677c870a..7da11cfe5ed761b257ea70bc5f1f99063b016666 100644 --- a/paddle/fluid/platform/complex64.h +++ b/paddle/fluid/platform/complex64.h @@ -124,6 +124,7 @@ struct PADDLE_ALIGN(8) complex64 { HOSTDEVICE inline complex64& operator=(int32_t val) { real = static_cast(val); + imag = 0; return *this; } diff --git a/python/paddle/fluid/tests/unittests/test_complex_kron.py b/python/paddle/fluid/tests/unittests/test_complex_kron.py index 863d61e6027ea2cfec27a7dff86d8da735becada..0edcb2be19a4faf05eb995f7c59c99e5d1b6f0a3 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_kron.py +++ b/python/paddle/fluid/tests/unittests/test_complex_kron.py @@ -27,42 +27,68 @@ class ComplexKronTestCase(unittest.TestCase): def setUp(self): self.ref_result = np.kron(self.x, self.y) + self._places = [paddle.CPUPlace()] + if fluid.is_compiled_with_cuda(): + self._places.append(paddle.CUDAPlace(0)) def runTest(self): - place = fluid.CPUPlace() - self.test_identity(place) - - if fluid.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - self.test_identity(place) + for place in self._places: + self.test_complex_api(place) + self.test_basic_api(place) - def test_identity(self, place): + def test_complex_api(self, place): with dg.guard(place): x_var = dg.to_variable(self.x) y_var = dg.to_variable(self.y) out_var = paddle.complex.kron(x_var, y_var) - np.testing.assert_allclose(out_var.numpy(), self.ref_result) + self.assertTrue(np.allclose(out_var.numpy(), self.ref_result)) + + def test_basic_api(self, place): + with dg.guard(place): + x_var = paddle.Tensor( + value=self.x, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + + y_var = paddle.Tensor( + value=self.y, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + + out_var = tensor.math.kron(x_var, y_var) + self.assertTrue(np.allclose(out_var.numpy(), self.ref_result)) def load_tests(loader, standard_tests, pattern): suite = unittest.TestSuite() - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2) + 1j * np.random.randn(2, 2), - y=np.random.randn(3, 3) + 1j * np.random.randn(3, 3))) - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2), - y=np.random.randn(3, 3) + 1j * np.random.randn(3, 3))) - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2) + 1j * np.random.randn(2, 2), - y=np.random.randn(3, 3))) + for dtype in ["float32", "float64"]: + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + 1j * np.random.randn( + 2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype) + 1j * np.random.randn( + 3, 3).astype(dtype))) + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype) + 1j * np.random.randn( + 3, 3).astype(dtype))) + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + 1j * np.random.randn( + 2, 2).astype(dtype), + y=np.random.randn(3, 3).astype(dtype))) + + suite.addTest( + ComplexKronTestCase( + x=np.random.randn(2, 2).astype(dtype) + 1j * np.random.randn( + 2, 2).astype(dtype), + y=np.random.randn(2, 2, 3).astype(dtype))) - suite.addTest( - ComplexKronTestCase( - x=np.random.randn(2, 2) + 1j * np.random.randn(2, 2), - y=np.random.randn(2, 2, 3))) return suite diff --git a/python/paddle/fluid/tests/unittests/test_complex_reshape.py b/python/paddle/fluid/tests/unittests/test_complex_reshape.py index 6d124d8da2b2bed129f918a4be1c51a5d8f28bb6..2d0413547974c62dae24b1e9f5cb9204993cea63 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_reshape.py +++ b/python/paddle/fluid/tests/unittests/test_complex_reshape.py @@ -13,6 +13,7 @@ # limitations under the License. import paddle.fluid as fluid +import paddle from paddle import complex as cpx import paddle.fluid.dygraph as dg import numpy as np @@ -20,31 +21,72 @@ import unittest class TestComplexReshape(unittest.TestCase): + def setUp(self): + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] + if fluid.core.is_compiled_with_cuda(): + self._places.append(paddle.CUDAPlace(0)) + def test_case1(self): - x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) - shape = (2, -1) + for dtype in self._dtypes: + x_np = np.random.randn( + 2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3, + 4).astype(dtype) + shape = (2, -1) + for place in self._places: + with dg.guard(place): + x_var = dg.to_variable(x_np) + y_var = cpx.reshape(x_var, shape) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape), y_np) + + def test_case2(self): + for dtype in self._dtypes: + x_np = np.random.randn( + 2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3, + 4).astype(dtype) + shape = (0, -1) + shape_ = (2, 12) + for place in self._places: + with dg.guard(place): + x_var = dg.to_variable(x_np) + y_var = cpx.reshape(x_var, shape, inplace=True) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape_), y_np) - place = fluid.CPUPlace() - with dg.guard(place): - x_var = dg.to_variable(x_np) - y_var = cpx.reshape(x_var, shape) - y_np = y_var.numpy() + def test_case3(self): + for dtype in self._dtypes: + x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) + shape = (2, -1) + for place in self._places: + with dg.guard(place): + x_var = paddle.Tensor( + value=x_np, + place=fluid.framework._current_expected_place(), + persistable=False, + zero_copy=None, + stop_gradient=True) + y_var = fluid.layers.reshape(x_var, shape) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape), y_np) - np.testing.assert_allclose(np.reshape(x_np, shape), y_np) + def test_case4(self): + for dtype in self._dtypes: + x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) + shape = (0, -1) + shape_ = (2, 12) - def test_case2(self): - x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) - shape = (0, -1) - shape_ = (2, 12) - - place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda( - ) else fluid.CPUPlace() - with dg.guard(place): - x_var = dg.to_variable(x_np) - y_var = cpx.reshape(x_var, shape, inplace=True) - y_np = y_var.numpy() - - np.testing.assert_allclose(np.reshape(x_np, shape_), y_np) + for place in self._places: + with dg.guard(place): + x_var = paddle.Tensor( + value=x_np, + place=fluid.framework._current_expected_place(), + persistable=False, + zero_copy=None, + stop_gradient=True) + y_var = fluid.layers.reshape(x_var, shape) + y_np = y_var.numpy() + np.testing.assert_allclose(np.reshape(x_np, shape_), y_np) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py index f8637b448880a451da4a6323f70feae4eab9ef7f..f2a9049c02a752d12ab9d4d7c6e9d68d0d9e6764 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_sum_layer.py @@ -14,28 +14,47 @@ import unittest import numpy as np +import paddle from numpy.random import random as rand from paddle import complex as cpx +from paddle import tensor import paddle.fluid as fluid import paddle.fluid.dygraph as dg class TestComplexSumLayer(unittest.TestCase): def setUp(self): - self._dtype = "float64" - self._places = [fluid.CPUPlace()] + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places.append(paddle.CUDAPlace(0)) def test_complex_x(self): - input = rand([2, 10, 10]).astype(self._dtype) + 1j * rand( - [2, 10, 10]).astype(self._dtype) - for place in self._places: - with dg.guard(place): - var_x = dg.to_variable(input) - result = cpx.sum(var_x, dim=[1, 2]).numpy() - target = np.sum(input, axis=(1, 2)) - self.assertTrue(np.allclose(result, target)) + for dtype in self._dtypes: + input = rand([2, 10, 10]).astype(dtype) + 1j * rand( + [2, 10, 10]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = dg.to_variable(input) + result = cpx.sum(var_x, dim=[1, 2]).numpy() + target = np.sum(input, axis=(1, 2)) + self.assertTrue(np.allclose(result, target)) + + def test_complex_basic_api(self): + for dtype in self._dtypes: + input = rand([2, 10, 10]).astype(dtype) + 1j * rand( + [2, 10, 10]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.Tensor( + value=input, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + result = tensor.sum(var_x, axis=[1, 2]).numpy() + target = np.sum(input, axis=(1, 2)) + self.assertTrue(np.allclose(result, target)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py index acc1e41b246309b312426b0a5b0bb7670c2bdfb7..9912b78251399e3c48e85744c817218dff5e3299 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py +++ b/python/paddle/fluid/tests/unittests/test_complex_trace_layer.py @@ -14,28 +14,49 @@ import unittest import numpy as np +import paddle from numpy.random import random as rand from paddle import complex as cpx +from paddle import tensor import paddle.fluid as fluid import paddle.fluid.dygraph as dg class TestComplexTraceLayer(unittest.TestCase): def setUp(self): - self._dtype = "float64" + self._dtypes = ["float32", "float64"] self._places = [fluid.CPUPlace()] if fluid.core.is_compiled_with_cuda(): self._places.append(fluid.CUDAPlace(0)) - def test_complex_x(self): - input = rand([2, 20, 2, 3]).astype(self._dtype) + 1j * rand( - [2, 20, 2, 3]).astype(self._dtype) - for place in self._places: - with dg.guard(place): - var_x = dg.to_variable(input) - result = cpx.trace(var_x, offset=1, axis1=0, axis2=2).numpy() - target = np.trace(input, offset=1, axis1=0, axis2=2) - self.assertTrue(np.allclose(result, target)) + def test_complex_api(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = dg.to_variable(input) + result = cpx.trace( + var_x, offset=1, axis1=0, axis2=2).numpy() + target = np.trace(input, offset=1, axis1=0, axis2=2) + self.assertTrue(np.allclose(result, target)) + + def test_basic_api(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.Tensor( + value=input, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + result = tensor.trace( + var_x, offset=1, axis1=0, axis2=2).numpy() + target = np.trace(input, offset=1, axis1=0, axis2=2) + self.assertTrue(np.allclose(result, target)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_complex_transpose.py b/python/paddle/fluid/tests/unittests/test_complex_transpose.py index e31cb9e7051b47cd5d418cb83b9e074adae88cdb..a8fa2524d44305ee0c2b9507f316df3a9be7e60d 100644 --- a/python/paddle/fluid/tests/unittests/test_complex_transpose.py +++ b/python/paddle/fluid/tests/unittests/test_complex_transpose.py @@ -21,21 +21,41 @@ import paddle.fluid.dygraph as dg class TestComplexTransposeLayer(unittest.TestCase): def setUp(self): - self._places = [fluid.CPUPlace()] + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places.append(paddle.CUDAPlace(0)) - def test_identity(self): - data = np.random.random( - (2, 3, 4, 5)).astype("float32") + 1J * np.random.random( - (2, 3, 4, 5)).astype("float32") - perm = [3, 2, 0, 1] - np_trans = np.transpose(data, perm) - for place in self._places: - with dg.guard(place): - var = dg.to_variable(data) - trans = paddle.complex.transpose(var, perm=perm) - self.assertTrue(np.allclose(trans.numpy(), np_trans)) + def test_transpose_by_complex_api(self): + for dtype in self._dtypes: + data = np.random.random( + (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(dtype) + perm = [3, 2, 0, 1] + np_trans = np.transpose(data, perm) + for place in self._places: + with dg.guard(place): + var = dg.to_variable(data) + trans = paddle.complex.transpose(var, perm=perm) + self.assertTrue(np.allclose(trans.numpy(), np_trans)) + + def test_transpose_by_basic_api(self): + for dtype in self._dtypes: + data = np.random.random( + (2, 3, 4, 5)).astype(dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(dtype) + perm = [3, 2, 0, 1] + np_trans = np.transpose(data, perm) + for place in self._places: + with dg.guard(place): + var = paddle.Tensor( + value=data, + place=place, + persistable=False, + zero_copy=None, + stop_gradient=True) + trans = paddle.transpose(var, perm=perm) + self.assertTrue(np.allclose(trans.numpy(), np_trans)) if __name__ == '__main__':