未验证 提交 1e1ae5c5 编写于 作者: Z Zhou Wei 提交者: GitHub

Make the Bind Method of Tensor more automatic (#27270)

* Makes the Bind Method more intelligent

* Makes the Bind Method more intelligent

* fix unittest

* fix unittest

* fix conflict
上级 5508c787
...@@ -649,61 +649,47 @@ void BindImperative(py::module *m_ptr) { ...@@ -649,61 +649,47 @@ void BindImperative(py::module *m_ptr) {
return self.NewVarBase(tensor.place(), false); return self.NewVarBase(tensor.place(), false);
}, },
py::return_value_policy::copy, R"DOC( py::return_value_policy::copy, R"DOC(
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a new Variable, detached from the current graph. Returns a new Tensor, detached from the current graph.
Returns:
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.
Returns: The detached Tensor.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
from paddle.fluid.dygraph.base import to_variable paddle.disable_static()
from paddle.fluid.dygraph import Linear
import numpy as np
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64) linear = Linear(32, 64)
data = to_variable(data) data = paddle.uniform(shape=[30, 10, 32], -1, 1)
x = linear(data) x = linear(data)
y = x.detach() y = x.detach()
)DOC") )DOC")
.def("clear_gradient", &imperative::VarBase::ClearGradient, R"DOC( .def("clear_gradient", &imperative::VarBase::ClearGradient, R"DOC(
**Notes**: Only for Tensor that has gradient, normally we use this for Parameters since other temporary Tensor doesen't has gradient.
**1. This API is ONLY available in Dygraph mode**
**2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**
Clear (set to ``0`` ) the Gradient of Current Variable The Gradient of current Tensor will be set to ``0`` .
Returns: None Returns: None
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np paddle.disable_static()
x = np.ones([2, 2], np.float32) inputs = []
with fluid.dygraph.guard():
inputs2 = []
for _ in range(10): for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x) tmp = paddle.ones([2, 2])
tmp.stop_gradient=False tmp.stop_gradient=False
inputs2.append(tmp) inputs.append(tmp)
ret2 = fluid.layers.sums(inputs2) ret = paddle.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2) loss = paddle.reduce_sum(ret)
loss2.backward() loss.backward()
print(loss2.gradient()) print("Before clear_gradient {}".format(loss.grad))
loss2.clear_gradient() loss.clear_gradient()
print("After clear {}".format(loss2.gradient())) print("After clear_gradient {}".format(loss.grad))
)DOC") )DOC")
.def("_run_backward", .def("_run_backward",
[](imperative::VarBase &self, const imperative::Tracer &tracer, [](imperative::VarBase &self, const imperative::Tracer &tracer,
......
...@@ -17,8 +17,7 @@ from __future__ import print_function ...@@ -17,8 +17,7 @@ from __future__ import print_function
from .. import core from .. import core
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
from ..layers.layer_function_generator import OpProtoHolder from ..layers.layer_function_generator import OpProtoHolder
from ..layers import common_methods from . import no_grad
from . import to_variable, no_grad
import numpy as np import numpy as np
import six import six
...@@ -53,47 +52,25 @@ def monkey_patch_math_varbase(): ...@@ -53,47 +52,25 @@ def monkey_patch_math_varbase():
def astype(self, dtype): def astype(self, dtype):
""" """
**Notes**:
**The variable must be a** :ref:`api_fluid_Tensor`
Cast a variable to a specified data type. Cast a Tensor to a specified data type.
Args: Args:
dtype: The target data type.
self(Variable): The source variable
dtype: The target data type
Returns: Returns:
Variable: Variable with new dtype Tensor: a new Tensor with target dtype
Examples: Examples:
In Static Graph Mode:
.. code-block:: python
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(startup_prog, main_prog):
original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
new_variable = original_variable.astype('int64')
print("new var's dtype is: {}".format(new_variable.dtype))
In Dygraph Mode:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np import numpy as np
x = np.ones([2, 2], np.float32) original_tensor = paddle.ones([2, 2])
with fluid.dygraph.guard(): print("original tensor's dtype is: {}".format(original_tensor.dtype))
original_variable = fluid.dygraph.to_variable(x) new_tensor = original_tensor.astype('float32')
print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype)) print("new tensor's dtype is: {}".format(new_tensor.dtype))
new_variable = original_variable.astype('int64')
print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))
""" """
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
...@@ -147,6 +124,10 @@ def monkey_patch_math_varbase(): ...@@ -147,6 +124,10 @@ def monkey_patch_math_varbase():
def _ndim_(var): def _ndim_(var):
return len(var.shape) return len(var.shape)
@property
def _size_(var):
return np.prod(var.shape)
def _scalar_add_(var, value): def _scalar_add_(var, value):
return _scalar_elementwise_op_(var, 1.0, value) return _scalar_elementwise_op_(var, 1.0, value)
...@@ -208,7 +189,6 @@ def monkey_patch_math_varbase(): ...@@ -208,7 +189,6 @@ def monkey_patch_math_varbase():
__impl__.__doc__ = """ __impl__.__doc__ = """
{0} {0}
Args: Args:
self(Tensor): left hand Tensor
other_var(Tensor|float|int): right hand Tensor other_var(Tensor|float|int): right hand Tensor
Returns: Returns:
...@@ -217,23 +197,7 @@ def monkey_patch_math_varbase(): ...@@ -217,23 +197,7 @@ def monkey_patch_math_varbase():
__impl__.__name__ = method_name __impl__.__name__ = method_name
return __impl__ return __impl__
# Todo(zhouwei): implement dygraph template to adapt to any function, receive('op_type', 'arg_template')
# Such as _method_creator_('addmm', 'x, y, alpha=1.0, beta=1.0, name=None'). It can reduce call time.
def _method_creator_(op_type, arg_template=None):
def __impl__(self):
op = getattr(core.ops, op_type)
return op(self)
__impl__.__doc__ = """
See paddle.{}""".format(op_type)
__impl__.__name__ = op_type
return __impl__
varbase_methods = [ varbase_methods = [
# Type1: From custom fun or lambda
## b=-a
('__neg__', _neg_), ('__neg__', _neg_),
('__float__', _float_), ('__float__', _float_),
('__long__', _long_), ('__long__', _long_),
...@@ -244,8 +208,7 @@ def monkey_patch_math_varbase(): ...@@ -244,8 +208,7 @@ def monkey_patch_math_varbase():
('dim', lambda x: len(x.shape)), ('dim', lambda x: len(x.shape)),
('ndimension', lambda x: len(x.shape)), ('ndimension', lambda x: len(x.shape)),
('ndim', _ndim_), ('ndim', _ndim_),
('size', lambda x: x.shape), ('size', _size_),
# Type2: From Template that create core.ops automatically. It's recommended.
('__add__', ('__add__',
_binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)), _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
## a+b == b+a. Do not need to reverse explicitly ## a+b == b+a. Do not need to reverse explicitly
...@@ -283,31 +246,7 @@ def monkey_patch_math_varbase(): ...@@ -283,31 +246,7 @@ def monkey_patch_math_varbase():
('__le__', _binary_creator_('__le__', 'less_equal', False, None)), ('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)), ('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)), ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
('__array_ufunc__', None), ('__array_ufunc__', None)
('sigmoid', _method_creator_('sigmoid', 'name=None')),
('log_sigmoid', _method_creator_('logsigmoid', 'name=None')),
('exp', _method_creator_('exp', 'name=None')),
('tanh', _method_creator_('tanh', 'name=None')),
('atan', _method_creator_('atan', 'name=None')),
('tanh_shrink', _method_creator_('tanh_shrink', 'name=None')),
('sqrt', _method_creator_('sqrt', 'name=None')),
('rsqrt', _method_creator_('rsqrt', 'name=None')),
('abs', _method_creator_('abs', 'name=None')),
('ceil', _method_creator_('ceil', 'name=None')),
('floor', _method_creator_('floor', 'name=None')),
('cos', _method_creator_('cos', 'name=None')),
('acos', _method_creator_('acos', 'name=None')),
('asin', _method_creator_('asin', 'name=None')),
('sin', _method_creator_('sin', 'name=None')),
('sinh', _method_creator_('sinh', 'name=None')),
('cosh', _method_creator_('cosh', 'name=None')),
('round', _method_creator_('round', 'name=None')),
('reciprocal', _method_creator_('reciprocal', 'name=None')),
('square', _method_creator_('square', 'name=None')),
('softplus', _method_creator_('softplus', 'name=None')),
('softsign', _method_creator_('softsign', 'name=None')),
# Type3: Form module 'paddle.tensor' defaultly.
# It's not a goodway, because it will increase call time.
] ]
global _already_patch_varbase global _already_patch_varbase
...@@ -318,7 +257,15 @@ def monkey_patch_math_varbase(): ...@@ -318,7 +257,15 @@ def monkey_patch_math_varbase():
setattr(core.VarBase, method_name, method_impl) setattr(core.VarBase, method_name, method_impl)
else: else:
import paddle.tensor import paddle.tensor
for method_name in common_methods: # Tensor method from module paddle.tensor
tensor_methods = paddle.tensor.linalg.__all__ + \
paddle.tensor.math.__all__ + \
paddle.tensor.logic.__all__ + \
paddle.tensor.manipulation.__all__ + \
paddle.tensor.search.__all__ + \
paddle.tensor.stat.__all__ + \
paddle.tensor.attribute.__all__
for method_name in tensor_methods:
if hasattr(core.VarBase, method_name): continue if hasattr(core.VarBase, method_name): continue
method_impl = getattr(paddle.tensor, method_name, None) method_impl = getattr(paddle.tensor, method_name, None)
if method_impl: setattr(core.VarBase, method_name, method_impl) if method_impl: setattr(core.VarBase, method_name, method_impl)
......
...@@ -54,29 +54,6 @@ EXPRESSION_MAP = { ...@@ -54,29 +54,6 @@ EXPRESSION_MAP = {
"__ge__": "A >= B" "__ge__": "A >= B"
} }
# method for Tensor from paddle.tensor
# edit it when paddle.tensor has new method about Tensor operation
common_methods = [
'exp', 'tanh', 'atan', 'sqrt', 'rsqrt', 'abs', 'ceil', 'floor', 'cos',
'acos', 'asin', 'sin', 'sinh', 'cosh', 'round', 'reciprocal', 'square',
'rank', 'matmul', 'dot', 'norm', 'transpose', 'dist', 't', 'cross',
'cholesky', 'bmm', 'histogram', 'equal', 'greater_equal', 'greater_than',
'is_empty', 'isfinite', 'less_equal', 'less_than', 'logical_and',
'logical_not', 'logical_or', 'logical_xor', 'not_equal', 'reduce_all',
'reduce_any', 'allclose', 'equal_all', 'cast', 'expand', 'expand_as',
'tile', 'flatten', 'gather', 'gather_nd', 'reshape', 'reverse', 'scatter',
'scatter_nd_add', 'scatter_nd', 'shard_index', 'slice', 'split', 'squeeze',
'strided_slice', 'unique', 'unique_with_counts', 'unsqueeze', 'flip',
'unbind', 'roll', 'cumsum', 'increment', 'log', 'pow', 'reciprocal',
'round', 'rsqrt', 'scale', 'sign', 'stanh', 'sum', 'reduce_prod', 'max',
'min', 'mm', 'div', 'multiply', 'add', 'logsumexp', 'log1p', 'erf',
'addcmul', 'addmm', 'clamp', 'trace', 'kron', 'argmax', 'argmin', 'argsort',
'has_inf', 'has_nan', 'topk', 'index_select', 'nonzero', 'sort',
'index_sample', 'mean', 'std', 'var', 'elementwise_add', 'elementwise_div',
'elementwise_floordiv', 'elementwise_mod', 'elementwise_pow',
'elementwise_sub'
]
_already_patch_variable = False _already_patch_variable = False
...@@ -372,7 +349,14 @@ def monkey_patch_variable(): ...@@ -372,7 +349,14 @@ def monkey_patch_variable():
setattr(Variable, method_name, method_impl) setattr(Variable, method_name, method_impl)
else: else:
import paddle.tensor import paddle.tensor
for method_name in common_methods: variabel_methods = paddle.tensor.linalg.__all__ + \
paddle.tensor.math.__all__ + \
paddle.tensor.logic.__all__ + \
paddle.tensor.manipulation.__all__ + \
paddle.tensor.search.__all__ + \
paddle.tensor.stat.__all__ + \
paddle.tensor.attribute.__all__
for method_name in variabel_methods:
if hasattr(Variable, method_name): continue if hasattr(Variable, method_name): continue
method_impl = getattr(paddle.tensor, method_name, None) method_impl = getattr(paddle.tensor, method_name, None)
if method_impl: setattr(Variable, method_name, method_impl) if method_impl: setattr(Variable, method_name, method_impl)
......
...@@ -47,7 +47,7 @@ class TestSimpleRNNCell(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestSimpleRNNCell(unittest.TestCase):
prev_h = np.random.randn(4, 32) prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h) y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h)) y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_zero_state(self): def test_with_zero_state(self):
...@@ -57,7 +57,7 @@ class TestSimpleRNNCell(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestSimpleRNNCell(unittest.TestCase):
x = np.random.randn(4, 16) x = np.random.randn(4, 16)
y1, h1 = rnn1(x) y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x)) y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def runTest(self): def runTest(self):
...@@ -90,7 +90,7 @@ class TestGRUCell(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestGRUCell(unittest.TestCase):
prev_h = np.random.randn(4, 32) prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h) y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h)) y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_zero_state(self): def test_with_zero_state(self):
...@@ -100,7 +100,7 @@ class TestGRUCell(unittest.TestCase): ...@@ -100,7 +100,7 @@ class TestGRUCell(unittest.TestCase):
x = np.random.randn(4, 16) x = np.random.randn(4, 16)
y1, h1 = rnn1(x) y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x)) y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def runTest(self): def runTest(self):
...@@ -134,8 +134,8 @@ class TestLSTMCell(unittest.TestCase): ...@@ -134,8 +134,8 @@ class TestLSTMCell(unittest.TestCase):
y1, (h1, c1) = rnn1(x, (prev_h, prev_c)) y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
y2, (h2, c2) = rnn2( y2, (h2, c2) = rnn2(
paddle.to_variable(x), paddle.to_tensor(x),
(paddle.to_variable(prev_h), paddle.to_variable(prev_c))) (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -146,7 +146,7 @@ class TestLSTMCell(unittest.TestCase): ...@@ -146,7 +146,7 @@ class TestLSTMCell(unittest.TestCase):
x = np.random.randn(4, 16) x = np.random.randn(4, 16)
y1, (h1, c1) = rnn1(x) y1, (h1, c1) = rnn1(x)
y2, (h2, c2) = rnn2(paddle.to_variable(x)) y2, (h2, c2) = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
......
...@@ -53,7 +53,7 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestSimpleRNN(unittest.TestCase):
prev_h = np.random.randn(2 * self.num_directions, 4, 32) prev_h = np.random.randn(2 * self.num_directions, 4, 32)
y1, h1 = rnn1(x, prev_h) y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h)) y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -66,7 +66,7 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestSimpleRNN(unittest.TestCase):
x = np.transpose(x, [1, 0, 2]) x = np.transpose(x, [1, 0, 2])
y1, h1 = rnn1(x) y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x)) y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -81,11 +81,11 @@ class TestSimpleRNN(unittest.TestCase): ...@@ -81,11 +81,11 @@ class TestSimpleRNN(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length) y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_variable(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_variable(x), sequence_length=seq_len) y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0) y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -133,7 +133,7 @@ class TestGRU(unittest.TestCase): ...@@ -133,7 +133,7 @@ class TestGRU(unittest.TestCase):
prev_h = np.random.randn(2 * self.num_directions, 4, 32) prev_h = np.random.randn(2 * self.num_directions, 4, 32)
y1, h1 = rnn1(x, prev_h) y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h)) y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -146,7 +146,7 @@ class TestGRU(unittest.TestCase): ...@@ -146,7 +146,7 @@ class TestGRU(unittest.TestCase):
x = np.transpose(x, [1, 0, 2]) x = np.transpose(x, [1, 0, 2])
y1, h1 = rnn1(x) y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x)) y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -161,11 +161,11 @@ class TestGRU(unittest.TestCase): ...@@ -161,11 +161,11 @@ class TestGRU(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length) y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_variable(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_variable(x), sequence_length=seq_len) y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0) y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -209,8 +209,8 @@ class TestLSTM(unittest.TestCase): ...@@ -209,8 +209,8 @@ class TestLSTM(unittest.TestCase):
y1, (h1, c1) = rnn1(x, (prev_h, prev_c)) y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
y2, (h2, c2) = rnn2( y2, (h2, c2) = rnn2(
paddle.to_variable(x), paddle.to_tensor(x),
(paddle.to_variable(prev_h), paddle.to_variable(prev_c))) (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -224,7 +224,7 @@ class TestLSTM(unittest.TestCase): ...@@ -224,7 +224,7 @@ class TestLSTM(unittest.TestCase):
x = np.transpose(x, [1, 0, 2]) x = np.transpose(x, [1, 0, 2])
y1, (h1, c1) = rnn1(x) y1, (h1, c1) = rnn1(x)
y2, (h2, c2) = rnn2(paddle.to_variable(x)) y2, (h2, c2) = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
...@@ -240,11 +240,11 @@ class TestLSTM(unittest.TestCase): ...@@ -240,11 +240,11 @@ class TestLSTM(unittest.TestCase):
y1, (h1, c1) = rnn1(x, sequence_length=sequence_length) y1, (h1, c1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_variable(sequence_length) seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype()) mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major: if self.time_major:
mask = paddle.transpose(mask, [1, 0]) mask = paddle.transpose(mask, [1, 0])
y2, (h2, c2) = rnn2(paddle.to_variable(x), sequence_length=seq_len) y2, (h2, c2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0) y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
......
...@@ -19,6 +19,7 @@ import paddle ...@@ -19,6 +19,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
import six import six
import inspect
class TestMathOpPatchesVarBase(unittest.TestCase): class TestMathOpPatchesVarBase(unittest.TestCase):
...@@ -302,21 +303,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -302,21 +303,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertEqual(x.dim(), 2) self.assertEqual(x.dim(), 2)
self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndimension(), 2)
self.assertEqual(x.ndim, 2) self.assertEqual(x.ndim, 2)
self.assertEqual(x.size(), [2, 3]) self.assertEqual(x.size, 6)
self.assertTrue( self.assertEqual(x.numel(), 6)
np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy(
)))
self.assertTrue(
np.array_equal(x.log_sigmoid().numpy(),
fluid.layers.logsigmoid(x).numpy()))
self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy()))
self.assertTrue( self.assertTrue(
np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy()))
self.assertTrue( self.assertTrue(
np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) np.array_equal(x.atan().numpy(), paddle.atan(x).numpy()))
self.assertTrue(
np.array_equal(x.tanh_shrink().numpy(),
fluid.layers.tanh_shrink(x).numpy()))
self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy()))
m = x.abs() m = x.abs()
self.assertTrue( self.assertTrue(
...@@ -344,12 +337,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -344,12 +337,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
))) )))
self.assertTrue( self.assertTrue(
np.array_equal(x.square().numpy(), paddle.square(x).numpy())) np.array_equal(x.square().numpy(), paddle.square(x).numpy()))
self.assertTrue(
np.array_equal(x.softplus().numpy(),
fluid.layers.softplus(x).numpy()))
self.assertTrue(
np.array_equal(x.softsign().numpy(),
fluid.layers.softsign(x).numpy()))
self.assertTrue( self.assertTrue(
np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) np.array_equal(x.rank().numpy(), paddle.rank(x).numpy()))
self.assertTrue( self.assertTrue(
...@@ -422,6 +409,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -422,6 +409,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x)))
# 2. Binary operation # 2. Binary operation
self.assertTrue(
np.array_equal(x.divide(y).numpy(), paddle.divide(x, y).numpy()))
self.assertTrue( self.assertTrue(
np.array_equal( np.array_equal(
x.matmul(y, True, False).numpy(), x.matmul(y, True, False).numpy(),
...@@ -501,6 +490,73 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -501,6 +490,73 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal( np.array_equal(
x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))
a = paddle.to_tensor([[1, 2], [3, 4]])
b = paddle.to_tensor([[4, 3], [2, 1]])
self.assertTrue(
np.array_equal(
x.where(a, b).numpy(), paddle.where(x, a, b).numpy()))
self.assertTrue(inspect.ismethod(a.dot))
self.assertTrue(inspect.ismethod(a.elementwise_add))
self.assertTrue(inspect.ismethod(a.elementwise_div))
self.assertTrue(inspect.ismethod(a.elementwise_floordiv))
self.assertTrue(inspect.ismethod(a.elementwise_mod))
self.assertTrue(inspect.ismethod(a.elementwise_sub))
self.assertTrue(inspect.ismethod(a.logsumexp))
self.assertTrue(inspect.ismethod(a.multiplex))
self.assertTrue(inspect.ismethod(a.prod))
self.assertTrue(inspect.ismethod(a.reduce_max))
self.assertTrue(inspect.ismethod(a.reduce_min))
self.assertTrue(inspect.ismethod(a.reduce_prod))
self.assertTrue(inspect.ismethod(a.reduce_sum))
self.assertTrue(inspect.ismethod(a.scale))
self.assertTrue(inspect.ismethod(a.stanh))
self.assertTrue(inspect.ismethod(a.sums))
self.assertTrue(inspect.ismethod(a.elementwise_sum))
self.assertTrue(inspect.ismethod(a.max))
self.assertTrue(inspect.ismethod(a.maximum))
self.assertTrue(inspect.ismethod(a.min))
self.assertTrue(inspect.ismethod(a.minimum))
self.assertTrue(inspect.ismethod(a.floor_divide))
self.assertTrue(inspect.ismethod(a.remainder))
self.assertTrue(inspect.ismethod(a.floor_mod))
self.assertTrue(inspect.ismethod(a.multiply))
self.assertTrue(inspect.ismethod(a.logsumexp))
self.assertTrue(inspect.ismethod(a.inverse))
self.assertTrue(inspect.ismethod(a.log1p))
self.assertTrue(inspect.ismethod(a.erf))
self.assertTrue(inspect.ismethod(a.addcmul))
self.assertTrue(inspect.ismethod(a.addmm))
self.assertTrue(inspect.ismethod(a.clip))
self.assertTrue(inspect.ismethod(a.trace))
self.assertTrue(inspect.ismethod(a.kron))
self.assertTrue(inspect.ismethod(a.isinf))
self.assertTrue(inspect.ismethod(a.isnan))
self.assertTrue(inspect.ismethod(a.concat))
self.assertTrue(inspect.ismethod(a.broadcast_to))
self.assertTrue(inspect.ismethod(a.scatter_nd_add))
self.assertTrue(inspect.ismethod(a.scatter_nd))
self.assertTrue(inspect.ismethod(a.shard_index))
self.assertTrue(inspect.ismethod(a.chunk))
self.assertTrue(inspect.ismethod(a.stack))
self.assertTrue(inspect.ismethod(a.strided_slice))
self.assertTrue(inspect.ismethod(a.unsqueeze))
self.assertTrue(inspect.ismethod(a.unstack))
self.assertTrue(inspect.ismethod(a.argmax))
self.assertTrue(inspect.ismethod(a.argmin))
self.assertTrue(inspect.ismethod(a.argsort))
self.assertTrue(inspect.ismethod(a.has_inf))
self.assertTrue(inspect.ismethod(a.has_nan))
self.assertTrue(inspect.ismethod(a.masked_select))
self.assertTrue(inspect.ismethod(a.topk))
self.assertTrue(inspect.ismethod(a.index_select))
self.assertTrue(inspect.ismethod(a.nonzero))
self.assertTrue(inspect.ismethod(a.sort))
self.assertTrue(inspect.ismethod(a.index_sample))
self.assertTrue(inspect.ismethod(a.mean))
self.assertTrue(inspect.ismethod(a.reduce_mean))
self.assertTrue(inspect.ismethod(a.std))
self.assertTrue(inspect.ismethod(a.numel))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -61,8 +61,8 @@ class ApiMinimumTest(unittest.TestCase): ...@@ -61,8 +61,8 @@ class ApiMinimumTest(unittest.TestCase):
def test_dynamic_api(self): def test_dynamic_api(self):
paddle.disable_static() paddle.disable_static()
np_x = np.array([10, 10]).astype('float64') np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(self.input_x) x = paddle.to_tensor(self.input_x)
y = paddle.to_variable(self.input_y) y = paddle.to_tensor(self.input_y)
z = paddle.minimum(x, y) z = paddle.minimum(x, y)
np_z = z.numpy() np_z = z.numpy()
z_expected = np.array(np.minimum(self.input_x, self.input_y)) z_expected = np.array(np.minimum(self.input_x, self.input_y))
...@@ -73,8 +73,8 @@ class ApiMinimumTest(unittest.TestCase): ...@@ -73,8 +73,8 @@ class ApiMinimumTest(unittest.TestCase):
np_x = np.random.rand(5, 4, 3, 2).astype("float64") np_x = np.random.rand(5, 4, 3, 2).astype("float64")
np_y = np.random.rand(4, 3).astype("float64") np_y = np.random.rand(4, 3).astype("float64")
x = paddle.to_variable(self.input_x) x = paddle.to_tensor(self.input_x)
y = paddle.to_variable(self.input_y) y = paddle.to_tensor(self.input_y)
result_1 = paddle.minimum(x, y, axis=1) result_1 = paddle.minimum(x, y, axis=1)
result_2 = paddle.minimum(x, y, axis=-2) result_2 = paddle.minimum(x, y, axis=-2)
self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True) self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True)
...@@ -205,8 +205,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -205,8 +205,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss( dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np), paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'mean')
paddle.to_variable(target_np), 'mean')
dy_result = dy_ret.numpy() dy_result = dy_ret.numpy()
sub = input_np - target_np sub = input_np - target_np
...@@ -240,8 +239,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -240,8 +239,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss( dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np), paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'sum')
paddle.to_variable(target_np), 'sum')
dy_result = dy_ret.numpy() dy_result = dy_ret.numpy()
sub = input_np - target_np sub = input_np - target_np
...@@ -275,8 +273,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase): ...@@ -275,8 +273,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss( dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np), paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'none')
paddle.to_variable(target_np), 'none')
dy_result = dy_ret.numpy() dy_result = dy_ret.numpy()
sub = input_np - target_np sub = input_np - target_np
......
...@@ -909,8 +909,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): ...@@ -909,8 +909,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x_np = np.random.random(size=(5, )).astype(np.float64) x_np = np.random.random(size=(5, )).astype(np.float64)
label_np = np.random.randint(0, 10, size=(5, )).astype(np.int64) label_np = np.random.randint(0, 10, size=(5, )).astype(np.int64)
x = paddle.to_variable(x_np) x = paddle.to_tensor(x_np)
label = paddle.to_variable(label_np) label = paddle.to_tensor(label_np)
nll_loss = paddle.nn.loss.NLLLoss() nll_loss = paddle.nn.loss.NLLLoss()
res = nll_loss(x, label) res = nll_loss(x, label)
...@@ -933,8 +933,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): ...@@ -933,8 +933,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x_np = np.random.random(size=(5, 3)).astype(np.float64) x_np = np.random.random(size=(5, 3)).astype(np.float64)
label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64) label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64)
x = paddle.to_variable(x_np) x = paddle.to_tensor(x_np)
label = paddle.to_variable(label_np) label = paddle.to_tensor(label_np)
nll_loss = paddle.nn.loss.NLLLoss(reduction='') nll_loss = paddle.nn.loss.NLLLoss(reduction='')
res = nll_loss(x, label) res = nll_loss(x, label)
...@@ -957,8 +957,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase): ...@@ -957,8 +957,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x_np = np.random.random(size=(5, 3)).astype(np.float64) x_np = np.random.random(size=(5, 3)).astype(np.float64)
label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64) label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64)
x = paddle.to_variable(x_np) x = paddle.to_tensor(x_np)
label = paddle.to_variable(label_np) label = paddle.to_tensor(label_np)
res = paddle.nn.functional.nll_loss(x, label, reduction='') res = paddle.nn.functional.nll_loss(x, label, reduction='')
self.assertRaises( self.assertRaises(
......
...@@ -101,9 +101,9 @@ def create_test_case(margin, reduction): ...@@ -101,9 +101,9 @@ def create_test_case(margin, reduction):
def run_dynamic_functional_api(self, place): def run_dynamic_functional_api(self, place):
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_variable(self.x_data) x = paddle.to_tensor(self.x_data)
y = paddle.to_variable(self.y_data) y = paddle.to_tensor(self.y_data)
label = paddle.to_variable(self.label_data) label = paddle.to_tensor(self.label_data)
result = paddle.nn.functional.margin_ranking_loss(x, y, label, result = paddle.nn.functional.margin_ranking_loss(x, y, label,
margin, reduction) margin, reduction)
...@@ -117,9 +117,9 @@ def create_test_case(margin, reduction): ...@@ -117,9 +117,9 @@ def create_test_case(margin, reduction):
def run_dynamic_api(self, place): def run_dynamic_api(self, place):
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_variable(self.x_data) x = paddle.to_tensor(self.x_data)
y = paddle.to_variable(self.y_data) y = paddle.to_tensor(self.y_data)
label = paddle.to_variable(self.label_data) label = paddle.to_tensor(self.label_data)
margin_rank_loss = paddle.nn.loss.MarginRankingLoss( margin_rank_loss = paddle.nn.loss.MarginRankingLoss(
margin=margin, reduction=reduction) margin=margin, reduction=reduction)
result = margin_rank_loss(x, y, label) result = margin_rank_loss(x, y, label)
...@@ -134,9 +134,9 @@ def create_test_case(margin, reduction): ...@@ -134,9 +134,9 @@ def create_test_case(margin, reduction):
def run_dynamic_broadcast_api(self, place): def run_dynamic_broadcast_api(self, place):
paddle.disable_static(place) paddle.disable_static(place)
label_data = np.random.choice([-1, 1], size=[10]).astype("float64") label_data = np.random.choice([-1, 1], size=[10]).astype("float64")
x = paddle.to_variable(self.x_data) x = paddle.to_tensor(self.x_data)
y = paddle.to_variable(self.y_data) y = paddle.to_tensor(self.y_data)
label = paddle.to_variable(label_data) label = paddle.to_tensor(label_data)
margin_rank_loss = paddle.nn.loss.MarginRankingLoss( margin_rank_loss = paddle.nn.loss.MarginRankingLoss(
margin=margin, reduction=reduction) margin=margin, reduction=reduction)
result = margin_rank_loss(x, y, label) result = margin_rank_loss(x, y, label)
......
...@@ -56,7 +56,7 @@ class TestNNSigmoidAPI(unittest.TestCase): ...@@ -56,7 +56,7 @@ class TestNNSigmoidAPI(unittest.TestCase):
def check_dynamic_api(self, place): def check_dynamic_api(self, place):
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_variable(self.x) x = paddle.to_tensor(self.x)
mysigmoid = nn.Sigmoid() mysigmoid = nn.Sigmoid()
y = mysigmoid(x) y = mysigmoid(x)
self.assertTrue(np.allclose(y.numpy(), self.y)) self.assertTrue(np.allclose(y.numpy(), self.y))
...@@ -94,7 +94,7 @@ class TestNNFunctionalSigmoidAPI(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestNNFunctionalSigmoidAPI(unittest.TestCase):
def check_dynamic_api(self): def check_dynamic_api(self):
paddle.disable_static() paddle.disable_static()
x = paddle.to_variable(self.x) x = paddle.to_tensor(self.x)
y = functional.sigmoid(x) y = functional.sigmoid(x)
self.assertTrue(np.allclose(y.numpy(), self.y)) self.assertTrue(np.allclose(y.numpy(), self.y))
......
...@@ -76,8 +76,8 @@ class TestNumelOoAPI(unittest.TestCase): ...@@ -76,8 +76,8 @@ class TestNumelOoAPI(unittest.TestCase):
paddle.disable_static(paddle.CPUPlace()) paddle.disable_static(paddle.CPUPlace())
input_1 = np.random.random([2, 1, 4, 5]).astype("int32") input_1 = np.random.random([2, 1, 4, 5]).astype("int32")
input_2 = np.random.random([1, 4, 5]).astype("int32") input_2 = np.random.random([1, 4, 5]).astype("int32")
x_1 = paddle.to_variable(input_1) x_1 = paddle.to_tensor(input_1)
x_2 = paddle.to_variable(input_2) x_2 = paddle.to_tensor(input_2)
out_1 = paddle.numel(x_1) out_1 = paddle.numel(x_1)
out_2 = paddle.numel(x_2) out_2 = paddle.numel(x_2)
assert (np.array_equal(out_1.numpy().item(0), np.size(input_1))) assert (np.array_equal(out_1.numpy().item(0), np.size(input_1)))
......
...@@ -63,7 +63,7 @@ class TestOnesLikeImpeartive(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestOnesLikeImpeartive(unittest.TestCase):
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace() ) else fluid.CPUPlace()
paddle.disable_static(place) paddle.disable_static(place)
x = paddle.to_variable(np.ones(shape)) x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]: for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
out = ones_like(x, dtype) out = ones_like(x, dtype)
self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True) self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True)
......
...@@ -48,8 +48,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False): ...@@ -48,8 +48,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
def test_dygraph(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False): def test_dygraph(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
paddle.disable_static() paddle.disable_static()
x = paddle.to_variable(x_np) x = paddle.to_tensor(x_np)
y = paddle.to_variable(y_np) y = paddle.to_tensor(y_np)
dist = paddle.nn.layer.distance.PairwiseDistance( dist = paddle.nn.layer.distance.PairwiseDistance(
p=p, epsilon=epsilon, keepdim=keepdim) p=p, epsilon=epsilon, keepdim=keepdim)
distance = dist(x, y) distance = dist(x, y)
......
...@@ -72,14 +72,14 @@ class TestSortDygraph(unittest.TestCase): ...@@ -72,14 +72,14 @@ class TestSortDygraph(unittest.TestCase):
def test_api_0(self): def test_api_0(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
var_x = paddle.to_variable(self.input_data) var_x = paddle.to_tensor(self.input_data)
out = paddle.sort(var_x) out = paddle.sort(var_x)
self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), True) self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), True)
paddle.enable_static() paddle.enable_static()
def test_api_1(self): def test_api_1(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
var_x = paddle.to_variable(self.input_data) var_x = paddle.to_tensor(self.input_data)
out = paddle.sort(var_x, axis=-1) out = paddle.sort(var_x, axis=-1)
self.assertEqual( self.assertEqual(
(np.sort( (np.sort(
......
...@@ -230,13 +230,13 @@ class TestTileAPI(unittest.TestCase): ...@@ -230,13 +230,13 @@ class TestTileAPI(unittest.TestCase):
def test_api(self): def test_api(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
np_x = np.random.random([12, 14]).astype("float32") np_x = np.random.random([12, 14]).astype("float32")
x = paddle.to_variable(np_x) x = paddle.to_tensor(np_x)
positive_2 = np.array([2]).astype("int32") positive_2 = np.array([2]).astype("int32")
positive_2 = paddle.to_variable(positive_2) positive_2 = paddle.to_tensor(positive_2)
repeat_times = np.array([2, 3]).astype("int32") repeat_times = np.array([2, 3]).astype("int32")
repeat_times = paddle.to_variable(repeat_times) repeat_times = paddle.to_tensor(repeat_times)
out_1 = paddle.tile(x, repeat_times=[2, 3]) out_1 = paddle.tile(x, repeat_times=[2, 3])
out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) out_2 = paddle.tile(x, repeat_times=[positive_2, 3])
......
...@@ -234,23 +234,23 @@ class TestTransformer(unittest.TestCase): ...@@ -234,23 +234,23 @@ class TestTransformer(unittest.TestCase):
if cache_dict: if cache_dict:
if 'k' and 'v' in cache_dict: if 'k' and 'v' in cache_dict:
cache_obj = multi_head_attn.Cache( cache_obj = multi_head_attn.Cache(
paddle.to_variable(cache_dict['k']), paddle.to_tensor(cache_dict['k']),
paddle.to_variable(cache_dict['v'])) paddle.to_tensor(cache_dict['v']))
elif 'static_k' and 'static_v' in cache_dict: elif 'static_k' and 'static_v' in cache_dict:
cache_obj = multi_head_attn.StaticCache( cache_obj = multi_head_attn.StaticCache(
paddle.to_variable(cache_dict['static_k']), paddle.to_tensor(cache_dict['static_k']),
paddle.to_variable(cache_dict['static_v'])) paddle.to_tensor(cache_dict['static_v']))
if attn_mask is not None: if attn_mask is not None:
attn_output = multi_head_attn( attn_output = multi_head_attn(
paddle.to_variable(query), paddle.to_tensor(query),
paddle.to_variable(key), paddle.to_tensor(key),
paddle.to_variable(value), paddle.to_tensor(value),
paddle.to_variable(attn_mask), cache_obj) paddle.to_tensor(attn_mask), cache_obj)
else: else:
attn_output = multi_head_attn( attn_output = multi_head_attn(
paddle.to_variable(query), paddle.to_tensor(query),
paddle.to_variable(key), paddle.to_tensor(key),
paddle.to_variable(value), attn_mask, cache_obj) paddle.to_tensor(value), attn_mask, cache_obj)
attn_output = attn_output[0] if cache_dict else attn_output attn_output = attn_output[0] if cache_dict else attn_output
# implementation by numpy # implementation by numpy
...@@ -296,16 +296,16 @@ class TestTransformer(unittest.TestCase): ...@@ -296,16 +296,16 @@ class TestTransformer(unittest.TestCase):
attn_dropout, act_dropout) attn_dropout, act_dropout)
encoder_output = encoder_layer( encoder_output = encoder_layer(
paddle.to_variable(src), paddle.to_tensor(src),
paddle.to_variable(src_mask)) # paddle.to_variable(src_mask)) paddle.to_tensor(src_mask)) # paddle.to_tensor(src_mask))
# 4.numpy: # 4.numpy:
# paddle self attention # paddle self attention
self_attn = MultiHeadAttention( self_attn = MultiHeadAttention(
d_model, n_head, dropout=attn_dropout) d_model, n_head, dropout=attn_dropout)
attn_output = self_attn( attn_output = self_attn(
paddle.to_variable(src), paddle.to_tensor(src),
paddle.to_variable(src), paddle.to_tensor(src),
paddle.to_variable(src), paddle.to_variable(src_mask)).numpy() paddle.to_tensor(src), paddle.to_tensor(src_mask)).numpy()
src = attn_output + residual src = attn_output + residual
src_norm = layer_norm(src, d_model, encoder_layer.norm1) src_norm = layer_norm(src, d_model, encoder_layer.norm1)
...@@ -348,13 +348,13 @@ class TestTransformer(unittest.TestCase): ...@@ -348,13 +348,13 @@ class TestTransformer(unittest.TestCase):
cache_objs = None cache_objs = None
if cache: if cache:
cache_objs = decoder_layer.gen_cache( cache_objs = decoder_layer.gen_cache(
paddle.to_variable(memory)) paddle.to_tensor(memory))
decoder_output = decoder_layer( decoder_output = decoder_layer(
paddle.to_variable(tgt), paddle.to_tensor(tgt),
paddle.to_variable(memory), paddle.to_tensor(memory),
paddle.to_variable(tgt_mask), paddle.to_tensor(tgt_mask),
paddle.to_variable(memory_mask), cache_objs) paddle.to_tensor(memory_mask), cache_objs)
decoder_output = decoder_output[0].numpy( decoder_output = decoder_output[0].numpy(
) if cache else decoder_output.numpy() ) if cache else decoder_output.numpy()
...@@ -365,10 +365,10 @@ class TestTransformer(unittest.TestCase): ...@@ -365,10 +365,10 @@ class TestTransformer(unittest.TestCase):
self_attn_cache = cache_objs[ self_attn_cache = cache_objs[
0] if cache_objs is not None else None 0] if cache_objs is not None else None
tgt = self_attn( tgt = self_attn(
paddle.to_variable(tgt), paddle.to_tensor(tgt),
paddle.to_variable(tgt), paddle.to_tensor(tgt),
paddle.to_variable(tgt), paddle.to_tensor(tgt),
paddle.to_variable(tgt_mask), self_attn_cache) paddle.to_tensor(tgt_mask), self_attn_cache)
tgt = tgt[0].numpy() if cache else tgt.numpy() tgt = tgt[0].numpy() if cache else tgt.numpy()
...@@ -380,10 +380,10 @@ class TestTransformer(unittest.TestCase): ...@@ -380,10 +380,10 @@ class TestTransformer(unittest.TestCase):
cross_attn_cache = cache_objs[ cross_attn_cache = cache_objs[
1] if cache_objs is not None else None 1] if cache_objs is not None else None
tgt = cross_attn( tgt = cross_attn(
paddle.to_variable(tgt_norm), paddle.to_tensor(tgt_norm),
paddle.to_variable(memory), paddle.to_tensor(memory),
paddle.to_variable(memory), paddle.to_tensor(memory),
paddle.to_variable(memory_mask), cross_attn_cache) paddle.to_tensor(memory_mask), cross_attn_cache)
tgt = tgt[0].numpy() if cache else tgt.numpy() tgt = tgt[0].numpy() if cache else tgt.numpy()
# postprocess # postprocess
...@@ -416,7 +416,7 @@ class TestTransformer(unittest.TestCase): ...@@ -416,7 +416,7 @@ class TestTransformer(unittest.TestCase):
encoder = TransformerEncoder(encoder_layer, num_layers) encoder = TransformerEncoder(encoder_layer, num_layers)
# src, src_mask # src, src_mask
enc_output = encoder( enc_output = encoder(
paddle.to_variable(src), paddle.to_variable(src_mask)) paddle.to_tensor(src), paddle.to_tensor(src_mask))
def test_decoder(self): def test_decoder(self):
batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params(
...@@ -438,9 +438,9 @@ class TestTransformer(unittest.TestCase): ...@@ -438,9 +438,9 @@ class TestTransformer(unittest.TestCase):
decoder = TransformerDecoder(decoder_layer, num_layers) decoder = TransformerDecoder(decoder_layer, num_layers)
output = decoder( output = decoder(
paddle.to_variable(tgt), paddle.to_tensor(tgt),
paddle.to_variable(memory), paddle.to_tensor(memory),
paddle.to_variable(tgt_mask), paddle.to_variable(memory_mask)) paddle.to_tensor(tgt_mask), paddle.to_tensor(memory_mask))
def test_transformer(self): def test_transformer(self):
batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params(
...@@ -453,24 +453,24 @@ class TestTransformer(unittest.TestCase): ...@@ -453,24 +453,24 @@ class TestTransformer(unittest.TestCase):
n_head, n_head,
dim_feedforward=dim_feedforward, dim_feedforward=dim_feedforward,
dropout=dropout) dropout=dropout)
src = paddle.to_variable( src = paddle.to_tensor(
np.random.rand(batch_size, source_length, d_model).astype( np.random.rand(batch_size, source_length, d_model).astype(
"float32")) "float32"))
tgt = paddle.to_variable( tgt = paddle.to_tensor(
np.random.rand(batch_size, target_length, d_model).astype( np.random.rand(batch_size, target_length, d_model).astype(
"float32")) "float32"))
src_mask = np.zeros((batch_size, n_head, source_length, src_mask = np.zeros((batch_size, n_head, source_length,
source_length)).astype("float32") source_length)).astype("float32")
src_mask[0][0][0][0] = -np.inf src_mask[0][0][0][0] = -np.inf
src_mask = paddle.to_variable(src_mask) src_mask = paddle.to_tensor(src_mask)
tgt_mask = np.zeros((batch_size, n_head, target_length, tgt_mask = np.zeros((batch_size, n_head, target_length,
target_length)).astype("float32") target_length)).astype("float32")
tgt_mask[0][0][0][0] = -1e9 tgt_mask[0][0][0][0] = -1e9
memory_mask = np.zeros((batch_size, n_head, target_length, memory_mask = np.zeros((batch_size, n_head, target_length,
source_length)).astype("float32") source_length)).astype("float32")
memory_mask[0][0][0][0] = -1e9 memory_mask[0][0][0][0] = -1e9
tgt_mask, memory_mask = paddle.to_variable( tgt_mask, memory_mask = paddle.to_tensor(
tgt_mask), paddle.to_variable(memory_mask) tgt_mask), paddle.to_tensor(memory_mask)
trans_output = transformer(src, tgt, src_mask, tgt_mask, trans_output = transformer(src, tgt, src_mask, tgt_mask,
memory_mask) memory_mask)
......
...@@ -424,10 +424,10 @@ class TestCTCLossAPICase(unittest.TestCase): ...@@ -424,10 +424,10 @@ class TestCTCLossAPICase(unittest.TestCase):
loss_np = ctc.forward() loss_np = ctc.forward()
paddle.disable_static() paddle.disable_static()
softmax = paddle.to_variable(logits) softmax = paddle.to_tensor(logits)
labels = paddle.to_variable(labels) labels = paddle.to_tensor(labels)
logits_length = paddle.to_variable(self.logits_length) logits_length = paddle.to_tensor(self.logits_length)
labels_length = paddle.to_variable(self.labels_length) labels_length = paddle.to_tensor(self.labels_length)
loss_pd_mean = F.ctc_loss( loss_pd_mean = F.ctc_loss(
softmax, softmax,
labels, labels,
...@@ -477,10 +477,10 @@ class TestCTCLossAPICase(unittest.TestCase): ...@@ -477,10 +477,10 @@ class TestCTCLossAPICase(unittest.TestCase):
loss_np = ctc.forward() loss_np = ctc.forward()
paddle.disable_static() paddle.disable_static()
softmax = paddle.to_variable(logits) softmax = paddle.to_tensor(logits)
labels = paddle.to_variable(labels) labels = paddle.to_tensor(labels)
logits_length = paddle.to_variable(self.logits_length) logits_length = paddle.to_tensor(self.logits_length)
labels_length = paddle.to_variable(self.labels_length) labels_length = paddle.to_tensor(self.labels_length)
loss_pd = paddle.nn.CTCLoss(self.blank, 'none')( loss_pd = paddle.nn.CTCLoss(self.blank, 'none')(
softmax, labels, logits_length, labels_length) softmax, labels, logits_length, labels_length)
......
...@@ -53,7 +53,7 @@ __all__ = [ ...@@ -53,7 +53,7 @@ __all__ = [
'shard_index', 'shard_index',
'slice', 'slice',
'split', 'split',
'chunk' 'chunk',
'squeeze', 'squeeze',
'stack', 'stack',
'strided_slice', 'strided_slice',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册