未验证 提交 9fec4bce 编写于 作者: C chentianyu03 提交者: GitHub

remove complexvariable (#29390) (#29417)

* rm complexvariable

* modify test_var_base unittest

* remove duplicated codes
上级 f223c786
......@@ -32,7 +32,6 @@ monkey_patch_variable()
monkey_patch_math_varbase()
import paddle.framework
from .framework import VarBase as Tensor
from .framework import ComplexVariable as ComplexTensor
import paddle.compat
import paddle.distributed
import paddle.sysconfig
......@@ -43,7 +42,6 @@ import paddle.distributed.fleet
import paddle.optimizer
import paddle.metric
import paddle.device
import paddle.incubate.complex as complex
import paddle.regularizer
# TODO: define alias in tensor and framework directory
......
......@@ -593,12 +593,12 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
r"""
:api_attr: imperative
The API will create a ``Variable`` or ``ComplexVariable`` object from
tuple, list, numpy\.ndarray, Variable or ComplexVariable object.
The API will create a ``Variable`` object from
tuple, list, numpy\.ndarray or Variable object.
Parameters:
value(tuple|list|ndarray|Variable|Tensor|ComplexVariable): Initial data.
Can be a list, tuple, NumPy ndarray, Variable, Tensor, ComplexVariable.
value(tuple|list|ndarray|Variable|Tensor): Initial data.
Can be a list, tuple, NumPy ndarray, Variable, Tensor.
The shape can be multi-dimensional. The data type is one of
numpy\.{float16, float32, float64, int16, int32, int64,
uint8, uint16, complex64, complex128}.
......@@ -613,10 +613,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
'int32' , 'int64' , 'uint8' . Default: None.
Returns:
Variable or ComplexVariable: If ``value`` is a tuple/list/numpy\.ndarray object,
Variable : If ``value`` is a tuple/list/numpy\.ndarray object,
return ``Tensor`` created from the corresponding numpy\.ndarray object, which has
same data type and shape with ``value``. If ``value`` is a Variable or ComplexVariable
object, just return ``value``.
same data type and shape with ``value``.
Examples:
......@@ -647,13 +646,12 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
"""
support_type = (list, tuple, np.ndarray, core.VarBase, framework.Variable,
framework.ComplexVariable, core.Tensor, core.LoDTensor)
core.Tensor, core.LoDTensor)
if not isinstance(value, support_type):
raise TypeError(
"The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
% (support_type, type(value)))
if isinstance(value, (core.VarBase, framework.Variable,
framework.ComplexVariable)):
if isinstance(value, (core.VarBase, framework.Variable)):
return value
elif isinstance(value, (core.Tensor, core.LoDTensor)):
return core.VarBase(value)
......@@ -682,23 +680,6 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
if value.dtype != dtype:
value = value.astype(dtype)
if np.iscomplexobj(value):
if not name:
name = framework.unique_name.generate('_generated_var')
real_var = core.VarBase(
value=value.real,
place=framework._current_expected_place(),
persistable=False,
zero_copy=zero_copy,
name=name + ".real")
imag_var = core.VarBase(
value=value.imag,
place=framework._current_expected_place(),
persistable=False,
zero_copy=zero_copy,
name=name + ".imag")
return framework.ComplexVariable(real_var, imag_var)
else:
py_var = core.VarBase(
value=value,
place=framework._current_expected_place(),
......
......@@ -15,7 +15,7 @@
from __future__ import print_function
from .. import core
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator, ComplexVariable
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
from ..layers.layer_function_generator import OpProtoHolder
from . import no_grad
......@@ -170,13 +170,6 @@ def monkey_patch_math_varbase():
reverse=False,
scalar_method=None):
def __impl__(self, other_var):
# 0. check tensor and ComplexVariable opetator
if isinstance(other_var, ComplexVariable):
# need import paddle in closure
import paddle
math_op = getattr(paddle.incubate.complex.tensor, op_type)
return math_op(self, other_var)
# 1. scalar exists cases
# we need combine the tensor.dtype and scalar.dtype, cast correct object
if isinstance(other_var, float):
......
......@@ -51,7 +51,6 @@ __all__ = [
'is_compiled_with_cuda',
'is_compiled_with_xpu',
'Variable',
'ComplexVariable',
'load_op_library',
'require_version',
'device_guard',
......@@ -1783,97 +1782,6 @@ def get_all_op_protos():
return ret_values
class ComplexVariable(object):
"""
The ComplexTensor defined on the complex number domain. It contains two common
real number Tensor as its members, :attr:`real` and :attr:`imag`
holding the real part and imaginary part of complex numbers respectively.
**Notes**:
**The constructor of ComplexTensor should not be invoked directly.**
Args:
real (Tensor): The Tensor holding real-part data.
imag (Tensor): The Tensor holding imaginery-part data.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1.0+2.0j, 0.2])
print(x.name, x.dtype, x.shape)
# ({'real': 'generated_tensor_0.real', 'imag': 'generated_tensor_0.imag'}, complex64, [2])
print(x)
# ComplexTensor[real](shape=[2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [ 1., 0.20000000])
# ComplexTensor[imag](shape=[2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [2., 0.])
print(type(x))
# <class 'paddle.ComplexTensor'>
"""
def __new__(cls, *arg, **kwargs):
cls.__module__ = "paddle"
cls.__name__ = "ComplexTensor"
return super(ComplexVariable, cls).__new__(cls)
def __init__(self, real, imag):
assert real.shape == imag.shape, "The real part and imaginary part " \
"of a ComplexVariable should have the same shape!"
assert real.dtype == imag.dtype, "The real part and imaginary part " \
"of a ComplexVariable should have the same data type!"
self.real = real
self.imag = imag
if self.real.dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32
]:
self._dtype = "complex64"
else:
self._dtype = "complex128"
self._shape = self.real.shape
def __getitem__(self, idx):
return ComplexVariable(self.real[idx], self.imag[idx])
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
@property
def name(self):
return {"real": self.real.name, "imag": self.imag.name}
@name.setter
def name(self, name):
# rename
if isinstance(name, str):
self.real.name = name + ".real"
self.imag.name = name + ".imag"
elif (isinstance(name, tuple) or isinstance(name,
list)) and len(name) == 2:
self.real.name, self.imag.name = name[0], name[1]
else:
raise ValueError(
"An invalid name assigned to the ComplexVariable, "
"which must be a string, or a tuple or a list with length 2!")
def numpy(self):
return self.real.numpy() + 1j * self.imag.numpy()
def __str__(self):
from paddle.tensor.to_string import to_string
return "ComplexTensor containing:\n{real}\n{imag}".format(
real=to_string(self.real, "[real part]Tensor"),
imag=to_string(self.imag, "[imag part]Tensor"))
__repr__ = __str__
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
......
......@@ -19,14 +19,6 @@ from numpy.random import random as rand
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
from paddle import complex as cpx
layers = {
"add": cpx.elementwise_add,
"sub": cpx.elementwise_sub,
"mul": cpx.elementwise_mul,
"div": cpx.elementwise_div,
}
paddle_apis = {
"add": paddle.add,
......@@ -43,26 +35,10 @@ class TestComplexElementwiseLayers(unittest.TestCase):
if fluid.core.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def calc(self, x, y, op, place):
with dg.guard(place):
var_x = dg.to_variable(x)
var_y = dg.to_variable(y)
return layers[op](var_x, var_y).numpy()
def paddle_calc(self, x, y, op, place):
with dg.guard(place):
x_t = paddle.Tensor(
value=x,
place=place,
persistable=False,
zero_copy=False,
stop_gradient=True)
y_t = paddle.Tensor(
value=y,
place=place,
persistable=False,
zero_copy=False,
stop_gradient=True)
x_t = dg.to_variable(x)
y_t = dg.to_variable(y)
return paddle_apis[op](x_t, y_t).numpy()
def assert_check(self, pd_result, np_result, place):
......@@ -72,13 +48,6 @@ class TestComplexElementwiseLayers(unittest.TestCase):
format(place, pd_result[~np.isclose(pd_result, np_result)],
np_result[~np.isclose(pd_result, np_result)]))
def compare_by_complex_api(self, x, y):
for place in self._places:
self.assert_check(self.calc(x, y, "add", place), x + y, place)
self.assert_check(self.calc(x, y, "sub", place), x - y, place)
self.assert_check(self.calc(x, y, "mul", place), x * y, place)
self.assert_check(self.calc(x, y, "div", place), x / y, place)
def compare_by_basic_api(self, x, y):
for place in self._places:
self.assert_check(
......@@ -90,7 +59,7 @@ class TestComplexElementwiseLayers(unittest.TestCase):
self.assert_check(
self.paddle_calc(x, y, "div", place), x / y, place)
def compare_op_by_complex_api(self, x, y):
def compare_op_by_basic_api(self, x, y):
for place in self._places:
with dg.guard(place):
var_x = dg.to_variable(x)
......@@ -100,26 +69,6 @@ class TestComplexElementwiseLayers(unittest.TestCase):
self.assert_check((var_x * var_y).numpy(), x * y, place)
self.assert_check((var_x / var_y).numpy(), x / y, place)
def compare_op_by_basic_api(self, x, y):
for place in self._places:
with dg.guard(place):
x_t = paddle.Tensor(
value=x,
place=place,
persistable=False,
zero_copy=False,
stop_gradient=True)
y_t = paddle.Tensor(
value=y,
place=place,
persistable=False,
zero_copy=False,
stop_gradient=True)
self.assert_check((x_t + y_t).numpy(), x + y, place)
self.assert_check((x_t - y_t).numpy(), x - y, place)
self.assert_check((x_t * y_t).numpy(), x * y, place)
self.assert_check((x_t / y_t).numpy(), x / y, place)
def test_complex_xy(self):
for dtype in self._dtypes:
x = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand(
......@@ -127,10 +76,7 @@ class TestComplexElementwiseLayers(unittest.TestCase):
y = rand([2, 3, 4, 5]).astype(dtype) + 1j * rand(
[2, 3, 4, 5]).astype(dtype)
self.compare_by_complex_api(x, y)
self.compare_op_by_complex_api(x, y)
self.compare_op_by_complex_api(x, y)
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
def test_complex_x_real_y(self):
......@@ -139,9 +85,6 @@ class TestComplexElementwiseLayers(unittest.TestCase):
[2, 3, 4, 5]).astype(dtype)
y = rand([4, 5]).astype(dtype)
self.compare_by_complex_api(x, y)
self.compare_op_by_complex_api(x, y)
# promote types cases
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
......@@ -151,9 +94,6 @@ class TestComplexElementwiseLayers(unittest.TestCase):
x = rand([2, 3, 4, 5]).astype(dtype)
y = rand([5]).astype(dtype) + 1j * rand([5]).astype(dtype)
self.compare_by_complex_api(x, y)
self.compare_op_by_complex_api(x, y)
# promote types cases
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
......
......@@ -36,18 +36,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
name='')
x_var_slice = x_var[0]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
def test_case2(self):
x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4)
x_np_slice = x_np[0][1]
......@@ -59,18 +47,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
name='')
x_var_slice = x_var[0][1]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
def test_case3(self):
x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4)
x_np_slice = x_np[0][1][2]
......@@ -82,18 +58,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
name='')
x_var_slice = x_var[0][1][2]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
def test_case4(self):
x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4)
x_np_slice = x_np[0][1][0:3]
......@@ -105,18 +69,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
name='')
x_var_slice = x_var[0][1][0:3]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
def test_case5(self):
x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4)
x_np_slice = x_np[0][1][0:4:2]
......@@ -128,16 +80,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
name='')
x_var_slice = x_var[0][1][0:4:2]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
def test_case6(self):
......@@ -150,17 +92,6 @@ class TestComplexGetitemLayer(unittest.TestCase):
x_var_slice = x_var[0][1:3][0:4:2]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
name='')
x_var_slice = x_var[0][1:3][0:4:2]
np.testing.assert_allclose(x_var_slice.numpy(), x_np_slice)
if __name__ == '__main__':
......
......@@ -33,33 +33,13 @@ class ComplexKronTestCase(unittest.TestCase):
def runTest(self):
for place in self._places:
self.test_complex_api(place)
self.test_basic_api(place)
self.test_kron_api(place)
def test_complex_api(self, place):
def test_kron_api(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.x)
y_var = dg.to_variable(self.y)
out_var = paddle.complex.kron(x_var, y_var)
self.assertTrue(np.allclose(out_var.numpy(), self.ref_result))
def test_basic_api(self, place):
with dg.guard(place):
x_var = paddle.Tensor(
value=self.x,
place=place,
persistable=False,
zero_copy=None,
stop_gradient=True)
y_var = paddle.Tensor(
value=self.y,
place=place,
persistable=False,
zero_copy=None,
stop_gradient=True)
out_var = tensor.math.kron(x_var, y_var)
out_var = paddle.kron(x_var, y_var)
self.assertTrue(np.allclose(out_var.numpy(), self.ref_result))
......
......@@ -26,34 +26,11 @@ class TestComplexMatMulLayer(unittest.TestCase):
if fluid.core.is_compiled_with_cuda():
self._places.append(fluid.CUDAPlace(0))
def compare_by_complex_api(self, x, y, np_result):
def compare_by_basic_api(self, x, y, np_result):
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x)
y_var = dg.to_variable(y)
result = paddle.complex.matmul(x_var, y_var)
pd_result = result.numpy()
self.assertTrue(
np.allclose(pd_result, np_result),
"\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n".
format(place, pd_result[~np.isclose(pd_result, np_result)],
np_result[~np.isclose(pd_result, np_result)]))
def compare_by_basic_api(self, x, y, np_result):
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x,
place=place,
persistable=False,
zero_copy=None,
name='')
y_var = fluid.core.VarBase(
value=y,
place=place,
persistable=False,
zero_copy=None,
name='')
result = paddle.matmul(x_var, y_var)
pd_result = result.numpy()
self.assertTrue(
......@@ -62,7 +39,7 @@ class TestComplexMatMulLayer(unittest.TestCase):
format(place, pd_result[~np.isclose(pd_result, np_result)],
np_result[~np.isclose(pd_result, np_result)]))
def compare_op_by_complex_api(self, x, y, np_result):
def compare_op_by_basic_api(self, x, y, np_result):
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x)
......@@ -75,29 +52,6 @@ class TestComplexMatMulLayer(unittest.TestCase):
format(place, pd_result[~np.isclose(pd_result, np_result)],
np_result[~np.isclose(pd_result, np_result)]))
def compare_op_by_basic_api(self, x, y, np_result):
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x,
place=place,
persistable=False,
zero_copy=None,
name='')
y_var = fluid.core.VarBase(
value=y,
place=place,
persistable=False,
zero_copy=None,
name='')
result = x_var.matmul(y_var)
pd_result = result.numpy()
self.assertTrue(
np.allclose(pd_result, np_result),
"\nplace: {}\npaddle diff result:\n {}\nnumpy diff result:\n {}\n".
format(place, pd_result[~np.isclose(pd_result, np_result)],
np_result[~np.isclose(pd_result, np_result)]))
def test_complex_xy(self):
for dtype in self._dtypes:
x = np.random.random(
......@@ -109,9 +63,6 @@ class TestComplexMatMulLayer(unittest.TestCase):
np_result = np.matmul(x, y)
self.compare_by_complex_api(x, y, np_result)
self.compare_op_by_complex_api(x, y, np_result)
self.compare_by_basic_api(x, y, np_result)
self.compare_op_by_basic_api(x, y, np_result)
......@@ -124,9 +75,6 @@ class TestComplexMatMulLayer(unittest.TestCase):
np_result = np.matmul(x, y)
self.compare_by_complex_api(x, y, np_result)
self.compare_op_by_complex_api(x, y, np_result)
# float -> complex type promotion
self.compare_by_basic_api(x, y, np_result)
self.compare_op_by_basic_api(x, y, np_result)
......@@ -140,8 +88,6 @@ class TestComplexMatMulLayer(unittest.TestCase):
np_result = np.matmul(x, y)
self.compare_by_complex_api(x, y, np_result)
# float -> complex type promotion
self.compare_by_basic_api(x, y, np_result)
self.compare_op_by_basic_api(x, y, np_result)
......
......@@ -14,7 +14,6 @@
import paddle.fluid as fluid
import paddle
from paddle import complex as cpx
import paddle.fluid.dygraph as dg
import numpy as np
import unittest
......@@ -27,7 +26,7 @@ class TestComplexReshape(unittest.TestCase):
if fluid.core.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def test_case1(self):
def test_shape_norm_dims(self):
for dtype in self._dtypes:
x_np = np.random.randn(
2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3,
......@@ -36,11 +35,11 @@ class TestComplexReshape(unittest.TestCase):
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x_np)
y_var = cpx.reshape(x_var, shape)
y_var = paddle.reshape(x_var, shape)
y_np = y_var.numpy()
np.testing.assert_allclose(np.reshape(x_np, shape), y_np)
self.assertTrue(np.allclose(np.reshape(x_np, shape), y_np))
def test_case2(self):
def test_shape_omit_dims(self):
for dtype in self._dtypes:
x_np = np.random.randn(
2, 3, 4).astype(dtype) + 1j * np.random.randn(2, 3,
......@@ -50,43 +49,9 @@ class TestComplexReshape(unittest.TestCase):
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x_np)
y_var = cpx.reshape(x_var, shape, inplace=True)
y_var = paddle.reshape(x_var, shape)
y_np = y_var.numpy()
np.testing.assert_allclose(np.reshape(x_np, shape_), y_np)
def test_case3(self):
for dtype in self._dtypes:
x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4)
shape = (2, -1)
for place in self._places:
with dg.guard(place):
x_var = paddle.Tensor(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
stop_gradient=True)
y_var = fluid.layers.reshape(x_var, shape)
y_np = y_var.numpy()
np.testing.assert_allclose(np.reshape(x_np, shape), y_np)
def test_case4(self):
for dtype in self._dtypes:
x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4)
shape = (0, -1)
shape_ = (2, 12)
for place in self._places:
with dg.guard(place):
x_var = paddle.Tensor(
value=x_np,
place=fluid.framework._current_expected_place(),
persistable=False,
zero_copy=None,
stop_gradient=True)
y_var = fluid.layers.reshape(x_var, shape)
y_np = y_var.numpy()
np.testing.assert_allclose(np.reshape(x_np, shape_), y_np)
self.assertTrue(np.allclose(np.reshape(x_np, shape_), y_np))
if __name__ == "__main__":
......
......@@ -16,7 +16,6 @@ import unittest
import numpy as np
import paddle
from numpy.random import random as rand
from paddle import complex as cpx
from paddle import tensor
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
......@@ -29,29 +28,13 @@ class TestComplexSumLayer(unittest.TestCase):
if fluid.core.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def test_complex_x(self):
for dtype in self._dtypes:
input = rand([2, 10, 10]).astype(dtype) + 1j * rand(
[2, 10, 10]).astype(dtype)
for place in self._places:
with dg.guard(place):
var_x = dg.to_variable(input)
result = cpx.sum(var_x, dim=[1, 2]).numpy()
target = np.sum(input, axis=(1, 2))
self.assertTrue(np.allclose(result, target))
def test_complex_basic_api(self):
for dtype in self._dtypes:
input = rand([2, 10, 10]).astype(dtype) + 1j * rand(
[2, 10, 10]).astype(dtype)
for place in self._places:
with dg.guard(place):
var_x = paddle.Tensor(
value=input,
place=place,
persistable=False,
zero_copy=None,
stop_gradient=True)
var_x = dg.to_variable(input)
result = tensor.sum(var_x, axis=[1, 2]).numpy()
target = np.sum(input, axis=(1, 2))
self.assertTrue(np.allclose(result, target))
......
......@@ -16,7 +16,6 @@ import unittest
import numpy as np
import paddle
from numpy.random import random as rand
from paddle import complex as cpx
from paddle import tensor
import paddle.fluid as fluid
import paddle.fluid.dygraph as dg
......@@ -29,30 +28,13 @@ class TestComplexTraceLayer(unittest.TestCase):
if fluid.core.is_compiled_with_cuda():
self._places.append(fluid.CUDAPlace(0))
def test_complex_api(self):
for dtype in self._dtypes:
input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand(
[2, 20, 2, 3]).astype(dtype)
for place in self._places:
with dg.guard(place):
var_x = dg.to_variable(input)
result = cpx.trace(
var_x, offset=1, axis1=0, axis2=2).numpy()
target = np.trace(input, offset=1, axis1=0, axis2=2)
self.assertTrue(np.allclose(result, target))
def test_basic_api(self):
for dtype in self._dtypes:
input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand(
[2, 20, 2, 3]).astype(dtype)
for place in self._places:
with dg.guard(place):
var_x = paddle.Tensor(
value=input,
place=place,
persistable=False,
zero_copy=None,
stop_gradient=True)
var_x = dg.to_variable(input)
result = tensor.trace(
var_x, offset=1, axis1=0, axis2=2).numpy()
target = np.trace(input, offset=1, axis1=0, axis2=2)
......
......@@ -36,24 +36,6 @@ class TestComplexTransposeLayer(unittest.TestCase):
for place in self._places:
with dg.guard(place):
var = dg.to_variable(data)
trans = paddle.complex.transpose(var, perm=perm)
self.assertTrue(np.allclose(trans.numpy(), np_trans))
def test_transpose_by_basic_api(self):
for dtype in self._dtypes:
data = np.random.random(
(2, 3, 4, 5)).astype(dtype) + 1J * np.random.random(
(2, 3, 4, 5)).astype(dtype)
perm = [3, 2, 0, 1]
np_trans = np.transpose(data, perm)
for place in self._places:
with dg.guard(place):
var = paddle.Tensor(
value=data,
place=place,
persistable=False,
zero_copy=None,
stop_gradient=True)
trans = paddle.transpose(var, perm=perm)
self.assertTrue(np.allclose(trans.numpy(), np_trans))
......
......@@ -30,14 +30,11 @@ class TestComplexVariable(unittest.TestCase):
with dg.guard():
x = dg.to_variable(a, "x")
y = dg.to_variable(b)
out = paddle.complex.elementwise_add(x, y)
out = paddle.fluid.layers.elementwise_add(x, y)
self.assertIsNotNone("{}".format(out))
self.assertTrue(np.allclose(out.numpy(), a + b))
self.assertEqual(x.name, {'real': 'x.real', 'imag': 'x.imag'})
x.name = "new_x"
self.assertEqual(x.name, {'real': 'new_x.real', 'imag': 'new_x.imag'})
self.assertEqual(out.dtype, self._dtype)
self.assertEqual(out.dtype, convert_np_dtype_to_dtype_(self._dtype))
self.assertEqual(out.shape, x.shape)
def test_attrs(self):
......
......@@ -28,15 +28,6 @@ class TestIsTensorApi(unittest.TestCase):
x = paddle.rand([3, 2, 4], dtype=dtype)
self.assertTrue(paddle.is_tensor(x))
def test_is_tensor_complex(self, dtype="float32"):
"""Test is_tensor api with a complex tensor
"""
paddle.disable_static()
r = paddle.to_tensor(1)
i = paddle.to_tensor(2)
x = paddle.ComplexTensor(r, i)
self.assertTrue(paddle.is_tensor(x))
def test_is_tensor_list(self, dtype="float32"):
"""Test is_tensor api with a list
"""
......
......@@ -78,7 +78,7 @@ class TestVarBase(unittest.TestCase):
# set_default_dtype take effect on complex
x = paddle.to_tensor(1 + 2j, place=place, stop_gradient=False)
self.assertTrue(np.array_equal(x.numpy(), [1 + 2j]))
self.assertEqual(x.dtype, 'complex64')
self.assertEqual(x.dtype, core.VarDesc.VarType.COMPLEX64)
paddle.set_default_dtype('float64')
x = paddle.to_tensor(1.2, place=place, stop_gradient=False)
......@@ -87,7 +87,7 @@ class TestVarBase(unittest.TestCase):
x = paddle.to_tensor(1 + 2j, place=place, stop_gradient=False)
self.assertTrue(np.array_equal(x.numpy(), [1 + 2j]))
self.assertEqual(x.dtype, 'complex128')
self.assertEqual(x.dtype, core.VarDesc.VarType.COMPLEX128)
x = paddle.to_tensor(
1, dtype='float32', place=place, stop_gradient=False)
......@@ -133,10 +133,8 @@ class TestVarBase(unittest.TestCase):
[1 + 2j, 1 - 2j], dtype='complex64', place=place)
y = paddle.to_tensor(x)
self.assertTrue(np.array_equal(x.numpy(), [1 + 2j, 1 - 2j]))
self.assertEqual(y.dtype, 'complex64')
self.assertEqual(y.dtype, core.VarDesc.VarType.COMPLEX64)
self.assertEqual(y.shape, [2])
self.assertEqual(y.real.stop_gradient, True)
self.assertEqual(y.real.type, core.VarDesc.VarType.LOD_TENSOR)
with self.assertRaises(TypeError):
paddle.to_tensor('test')
......
......@@ -25,7 +25,6 @@ from .random import seed
from .framework import get_default_dtype
from .framework import set_default_dtype
from ..fluid.framework import ComplexVariable #DEFINE_ALIAS
from ..fluid.param_attr import ParamAttr #DEFINE_ALIAS
# from ..fluid.layers.tensor import create_global_var #DEFINE_ALIAS
from ..fluid.layers.tensor import create_parameter #DEFINE_ALIAS
......
......@@ -55,32 +55,29 @@ __all__ = [
@dygraph_only
def to_tensor(data, dtype=None, place=None, stop_gradient=True):
r"""
Constructs a ``paddle.Tensor`` or ``paddle.ComplexTensor`` from ``data`` ,
which can be scalar, tuple, list, numpy\.ndarray, paddle\.Tensor, paddle\.ComplexTensor.
Constructs a ``paddle.Tensor`` from ``data`` ,
which can be scalar, tuple, list, numpy\.ndarray, paddle\.Tensor.
If the ``data`` is already a tensor, and ``dtype`` or ``place`` does't change, no copy
will be performed and return origin tensor, otherwise a new tensor will be constructed
and returned.
The ``ComplexTensor`` is a unique type of paddle. If x is ``ComplexTensor``, then
``x.real`` is the real part, and ``x.imag`` is the imaginary part.
Args:
data(scalar|tuple|list|ndarray|Tensor|ComplexTensor): Initial data for the tensor.
Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor, paddle\.ComplexTensor.
data(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor.
Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor.
dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' ,
'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8'. And
'complex64' , 'complex128' only for ComplexTensor. Default: None, infers dtype from ``data``
'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8',
'complex64' , 'complex128'. Default: None, infers dtype from ``data``
except for python float number which gets dtype from ``get_default_type`` .
place(CPUPlace|CUDAPinnedPlace|CUDAPlace, optional): The place to allocate Tensor. Can be
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place.
stop_gradient(bool, optional): Whether to block the gradient propagation of Autograd. Default: True.
Returns:
Tensor: A Tensor or ComplexTensor constructed from ``data`` .
Tensor: A Tensor constructed from ``data`` .
Raises:
TypeError: If the data type of ``data`` is not scalar, list, tuple, numpy.ndarray, paddle.Tensor, paddle.ComplexTensor
TypeError: If the data type of ``data`` is not scalar, list, tuple, numpy.ndarray, paddle.Tensor
ValueError: If ``data`` is tuple|list, it can't contain nested tuple|list with different lengths , such as: [[1, 2], [3, 4, 5]]
TypeError: If ``dtype`` is not bool, float16, float32, float64, int8, int16, int32, int64, uint8, complex64, complex128
ValueError: If ``place`` is not paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace
......@@ -112,16 +109,13 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
# [[0.10000000, 0.20000000],
# [0.30000001, 0.40000001]])
type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]]), dtype='complex64')
# <class 'paddle.ComplexTensor'>
type(paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64'))
# <class 'paddle.VarBase'>
paddle.to_tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64')
# ComplexTensor[real](shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 2.],
# [3., 4.]])
# ComplexTensor[imag](shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 0.],
# [2., 0.]])
# Tensor(shape=[2, 2], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+1j), (2+0j)],
# [(3+2j), (4+0j)]])
"""
if place is None:
......@@ -156,11 +150,9 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
if convert_dtype(dtype) != convert_dtype(data.dtype):
return data.astype(convert_dtype(dtype))
return data
elif isinstance(data, paddle.ComplexTensor):
return data
else:
raise TypeError(
"Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|numpy.ndarray|paddle.Tensor|paddle.ComplexTensor".
"Can't constructs a 'paddle.Tensor' with data type {}, data type must be scalar|list|tuple|numpy.ndarray|paddle.Tensor".
format(type(data)))
if not dtype and data.dtype in [
'float16', 'float32', 'float64', 'complex64', 'complex128'
......@@ -175,30 +167,12 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
if dtype and convert_dtype(dtype) != data.dtype:
data = data.astype(dtype)
if not np.iscomplexobj(data):
if dtype and convert_dtype(dtype) != data.dtype:
data = data.astype(dtype)
return paddle.Tensor(
value=data,
place=place,
persistable=False,
zero_copy=False,
stop_gradient=stop_gradient)
else:
name = unique_name.generate('generated_tensor')
real_tensor = paddle.Tensor(
value=data.real,
place=place,
zero_copy=False,
name=name + ".real",
stop_gradient=stop_gradient)
imag_tensor = paddle.Tensor(
value=data.imag,
place=place,
zero_copy=False,
name=name + ".imag",
stop_gradient=stop_gradient)
return paddle.ComplexTensor(real_tensor, imag_tensor)
def full_like(x, fill_value, dtype=None, name=None):
......
......@@ -20,7 +20,6 @@ from .. import fluid
from ..fluid.framework import in_dygraph_mode
from paddle.common_ops_import import *
from ..framework import VarBase as Tensor
from ..framework import ComplexVariable as ComplexTensor
# TODO: define logic functions of a tensor
from ..fluid.layers import is_empty #DEFINE_ALIAS
......@@ -445,13 +444,13 @@ def not_equal(x, y, name=None):
def is_tensor(x):
"""
This function tests whether input object is a paddle.Tensor or a paddle.ComplexTensor.
This function tests whether input object is a paddle.Tensor.
Args:
x (object): Object to test.
Returns:
A boolean value. True if 'x' is a paddle.Tensor or a paddle.ComplexTensor, otherwise False.
A boolean value. True if 'x' is a paddle.Tensor, otherwise False.
Examples:
.. code-block:: python
......@@ -462,13 +461,9 @@ def is_tensor(x):
check = paddle.is_tensor(input1)
print(check) #True
input2 = paddle.ComplexTensor(input1, input1)
check = paddle.is_tensor(input2)
print(check) #True
input3 = [1, 4]
check = paddle.is_tensor(input3)
print(check) #False
"""
return isinstance(x, Tensor) or isinstance(x, ComplexTensor)
return isinstance(x, Tensor)
......@@ -143,8 +143,6 @@ packages=['paddle',
'paddle.reader',
'paddle.distributed',
'paddle.incubate',
'paddle.incubate.complex',
'paddle.incubate.complex.tensor',
'paddle.distributed.fleet',
'paddle.distributed.fleet.base',
'paddle.distributed.fleet.meta_optimizers',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册