未验证 提交 1e1ae5c5 编写于 作者: Z Zhou Wei 提交者: GitHub

Make the Bind Method of Tensor more automatic (#27270)

* Makes the Bind Method more intelligent

* Makes the Bind Method more intelligent

* fix unittest

* fix unittest

* fix conflict
上级 5508c787
......@@ -649,61 +649,47 @@ void BindImperative(py::module *m_ptr) {
return self.NewVarBase(tensor.place(), false);
},
py::return_value_policy::copy, R"DOC(
**Notes**:
**This API is ONLY available in Dygraph mode**
Returns a new Variable, detached from the current graph.
Returns:
( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.
Returns a new Tensor, detached from the current graph.
Returns: The detached Tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph import Linear
import numpy as np
import paddle
paddle.disable_static()
data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')
with fluid.dygraph.guard():
linear = Linear(32, 64)
data = to_variable(data)
data = paddle.uniform(shape=[30, 10, 32], -1, 1)
x = linear(data)
y = x.detach()
)DOC")
.def("clear_gradient", &imperative::VarBase::ClearGradient, R"DOC(
**Notes**:
**1. This API is ONLY available in Dygraph mode**
**2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**
Only for Tensor that has gradient, normally we use this for Parameters since other temporary Tensor doesen't has gradient.
Clear (set to ``0`` ) the Gradient of Current Variable
The Gradient of current Tensor will be set to ``0`` .
Returns: None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.disable_static()
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
inputs2 = []
inputs = []
for _ in range(10):
tmp = fluid.dygraph.base.to_variable(x)
tmp = paddle.ones([2, 2])
tmp.stop_gradient=False
inputs2.append(tmp)
ret2 = fluid.layers.sums(inputs2)
loss2 = fluid.layers.reduce_sum(ret2)
loss2.backward()
print(loss2.gradient())
loss2.clear_gradient()
print("After clear {}".format(loss2.gradient()))
inputs.append(tmp)
ret = paddle.sums(inputs2)
loss = paddle.reduce_sum(ret)
loss.backward()
print("Before clear_gradient {}".format(loss.grad))
loss.clear_gradient()
print("After clear_gradient {}".format(loss.grad))
)DOC")
.def("_run_backward",
[](imperative::VarBase &self, const imperative::Tracer &tracer,
......
......@@ -17,8 +17,7 @@ from __future__ import print_function
from .. import core
from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
from ..layers.layer_function_generator import OpProtoHolder
from ..layers import common_methods
from . import to_variable, no_grad
from . import no_grad
import numpy as np
import six
......@@ -53,47 +52,25 @@ def monkey_patch_math_varbase():
def astype(self, dtype):
"""
**Notes**:
**The variable must be a** :ref:`api_fluid_Tensor`
Cast a variable to a specified data type.
Cast a Tensor to a specified data type.
Args:
self(Variable): The source variable
dtype: The target data type
dtype: The target data type.
Returns:
Variable: Variable with new dtype
Tensor: a new Tensor with target dtype
Examples:
In Static Graph Mode:
.. code-block:: python
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_prog = fluid.Program()
with fluid.program_guard(startup_prog, main_prog):
original_variable = fluid.data(name = "new_variable", shape=[2,2], dtype='float32')
new_variable = original_variable.astype('int64')
print("new var's dtype is: {}".format(new_variable.dtype))
In Dygraph Mode:
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
x = np.ones([2, 2], np.float32)
with fluid.dygraph.guard():
original_variable = fluid.dygraph.to_variable(x)
print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype))
new_variable = original_variable.astype('int64')
print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype))
original_tensor = paddle.ones([2, 2])
print("original tensor's dtype is: {}".format(original_tensor.dtype))
new_tensor = original_tensor.astype('float32')
print("new tensor's dtype is: {}".format(new_tensor.dtype))
"""
if not isinstance(dtype, core.VarDesc.VarType):
......@@ -147,6 +124,10 @@ def monkey_patch_math_varbase():
def _ndim_(var):
return len(var.shape)
@property
def _size_(var):
return np.prod(var.shape)
def _scalar_add_(var, value):
return _scalar_elementwise_op_(var, 1.0, value)
......@@ -208,7 +189,6 @@ def monkey_patch_math_varbase():
__impl__.__doc__ = """
{0}
Args:
self(Tensor): left hand Tensor
other_var(Tensor|float|int): right hand Tensor
Returns:
......@@ -217,23 +197,7 @@ def monkey_patch_math_varbase():
__impl__.__name__ = method_name
return __impl__
# Todo(zhouwei): implement dygraph template to adapt to any function, receive('op_type', 'arg_template')
# Such as _method_creator_('addmm', 'x, y, alpha=1.0, beta=1.0, name=None'). It can reduce call time.
def _method_creator_(op_type, arg_template=None):
def __impl__(self):
op = getattr(core.ops, op_type)
return op(self)
__impl__.__doc__ = """
See paddle.{}""".format(op_type)
__impl__.__name__ = op_type
return __impl__
varbase_methods = [
# Type1: From custom fun or lambda
## b=-a
('__neg__', _neg_),
('__float__', _float_),
('__long__', _long_),
......@@ -244,8 +208,7 @@ def monkey_patch_math_varbase():
('dim', lambda x: len(x.shape)),
('ndimension', lambda x: len(x.shape)),
('ndim', _ndim_),
('size', lambda x: x.shape),
# Type2: From Template that create core.ops automatically. It's recommended.
('size', _size_),
('__add__',
_binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)),
## a+b == b+a. Do not need to reverse explicitly
......@@ -283,31 +246,7 @@ def monkey_patch_math_varbase():
('__le__', _binary_creator_('__le__', 'less_equal', False, None)),
('__gt__', _binary_creator_('__gt__', 'greater_than', False, None)),
('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
('__array_ufunc__', None),
('sigmoid', _method_creator_('sigmoid', 'name=None')),
('log_sigmoid', _method_creator_('logsigmoid', 'name=None')),
('exp', _method_creator_('exp', 'name=None')),
('tanh', _method_creator_('tanh', 'name=None')),
('atan', _method_creator_('atan', 'name=None')),
('tanh_shrink', _method_creator_('tanh_shrink', 'name=None')),
('sqrt', _method_creator_('sqrt', 'name=None')),
('rsqrt', _method_creator_('rsqrt', 'name=None')),
('abs', _method_creator_('abs', 'name=None')),
('ceil', _method_creator_('ceil', 'name=None')),
('floor', _method_creator_('floor', 'name=None')),
('cos', _method_creator_('cos', 'name=None')),
('acos', _method_creator_('acos', 'name=None')),
('asin', _method_creator_('asin', 'name=None')),
('sin', _method_creator_('sin', 'name=None')),
('sinh', _method_creator_('sinh', 'name=None')),
('cosh', _method_creator_('cosh', 'name=None')),
('round', _method_creator_('round', 'name=None')),
('reciprocal', _method_creator_('reciprocal', 'name=None')),
('square', _method_creator_('square', 'name=None')),
('softplus', _method_creator_('softplus', 'name=None')),
('softsign', _method_creator_('softsign', 'name=None')),
# Type3: Form module 'paddle.tensor' defaultly.
# It's not a goodway, because it will increase call time.
('__array_ufunc__', None)
]
global _already_patch_varbase
......@@ -318,7 +257,15 @@ def monkey_patch_math_varbase():
setattr(core.VarBase, method_name, method_impl)
else:
import paddle.tensor
for method_name in common_methods:
# Tensor method from module paddle.tensor
tensor_methods = paddle.tensor.linalg.__all__ + \
paddle.tensor.math.__all__ + \
paddle.tensor.logic.__all__ + \
paddle.tensor.manipulation.__all__ + \
paddle.tensor.search.__all__ + \
paddle.tensor.stat.__all__ + \
paddle.tensor.attribute.__all__
for method_name in tensor_methods:
if hasattr(core.VarBase, method_name): continue
method_impl = getattr(paddle.tensor, method_name, None)
if method_impl: setattr(core.VarBase, method_name, method_impl)
......
......@@ -54,29 +54,6 @@ EXPRESSION_MAP = {
"__ge__": "A >= B"
}
# method for Tensor from paddle.tensor
# edit it when paddle.tensor has new method about Tensor operation
common_methods = [
'exp', 'tanh', 'atan', 'sqrt', 'rsqrt', 'abs', 'ceil', 'floor', 'cos',
'acos', 'asin', 'sin', 'sinh', 'cosh', 'round', 'reciprocal', 'square',
'rank', 'matmul', 'dot', 'norm', 'transpose', 'dist', 't', 'cross',
'cholesky', 'bmm', 'histogram', 'equal', 'greater_equal', 'greater_than',
'is_empty', 'isfinite', 'less_equal', 'less_than', 'logical_and',
'logical_not', 'logical_or', 'logical_xor', 'not_equal', 'reduce_all',
'reduce_any', 'allclose', 'equal_all', 'cast', 'expand', 'expand_as',
'tile', 'flatten', 'gather', 'gather_nd', 'reshape', 'reverse', 'scatter',
'scatter_nd_add', 'scatter_nd', 'shard_index', 'slice', 'split', 'squeeze',
'strided_slice', 'unique', 'unique_with_counts', 'unsqueeze', 'flip',
'unbind', 'roll', 'cumsum', 'increment', 'log', 'pow', 'reciprocal',
'round', 'rsqrt', 'scale', 'sign', 'stanh', 'sum', 'reduce_prod', 'max',
'min', 'mm', 'div', 'multiply', 'add', 'logsumexp', 'log1p', 'erf',
'addcmul', 'addmm', 'clamp', 'trace', 'kron', 'argmax', 'argmin', 'argsort',
'has_inf', 'has_nan', 'topk', 'index_select', 'nonzero', 'sort',
'index_sample', 'mean', 'std', 'var', 'elementwise_add', 'elementwise_div',
'elementwise_floordiv', 'elementwise_mod', 'elementwise_pow',
'elementwise_sub'
]
_already_patch_variable = False
......@@ -372,7 +349,14 @@ def monkey_patch_variable():
setattr(Variable, method_name, method_impl)
else:
import paddle.tensor
for method_name in common_methods:
variabel_methods = paddle.tensor.linalg.__all__ + \
paddle.tensor.math.__all__ + \
paddle.tensor.logic.__all__ + \
paddle.tensor.manipulation.__all__ + \
paddle.tensor.search.__all__ + \
paddle.tensor.stat.__all__ + \
paddle.tensor.attribute.__all__
for method_name in variabel_methods:
if hasattr(Variable, method_name): continue
method_impl = getattr(paddle.tensor, method_name, None)
if method_impl: setattr(Variable, method_name, method_impl)
......
......@@ -47,7 +47,7 @@ class TestSimpleRNNCell(unittest.TestCase):
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h))
y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
......@@ -57,7 +57,7 @@ class TestSimpleRNNCell(unittest.TestCase):
x = np.random.randn(4, 16)
y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x))
y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def runTest(self):
......@@ -90,7 +90,7 @@ class TestGRUCell(unittest.TestCase):
prev_h = np.random.randn(4, 32)
y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h))
y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_with_zero_state(self):
......@@ -100,7 +100,7 @@ class TestGRUCell(unittest.TestCase):
x = np.random.randn(4, 16)
y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x))
y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def runTest(self):
......@@ -134,8 +134,8 @@ class TestLSTMCell(unittest.TestCase):
y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
y2, (h2, c2) = rnn2(
paddle.to_variable(x),
(paddle.to_variable(prev_h), paddle.to_variable(prev_c)))
paddle.to_tensor(x),
(paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -146,7 +146,7 @@ class TestLSTMCell(unittest.TestCase):
x = np.random.randn(4, 16)
y1, (h1, c1) = rnn1(x)
y2, (h2, c2) = rnn2(paddle.to_variable(x))
y2, (h2, c2) = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
......
......@@ -53,7 +53,7 @@ class TestSimpleRNN(unittest.TestCase):
prev_h = np.random.randn(2 * self.num_directions, 4, 32)
y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h))
y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -66,7 +66,7 @@ class TestSimpleRNN(unittest.TestCase):
x = np.transpose(x, [1, 0, 2])
y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x))
y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -81,11 +81,11 @@ class TestSimpleRNN(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_variable(sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_variable(x), sequence_length=seq_len)
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -133,7 +133,7 @@ class TestGRU(unittest.TestCase):
prev_h = np.random.randn(2 * self.num_directions, 4, 32)
y1, h1 = rnn1(x, prev_h)
y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h))
y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -146,7 +146,7 @@ class TestGRU(unittest.TestCase):
x = np.transpose(x, [1, 0, 2])
y1, h1 = rnn1(x)
y2, h2 = rnn2(paddle.to_variable(x))
y2, h2 = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -161,11 +161,11 @@ class TestGRU(unittest.TestCase):
y1, h1 = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_variable(sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, h2 = rnn2(paddle.to_variable(x), sequence_length=seq_len)
y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -209,8 +209,8 @@ class TestLSTM(unittest.TestCase):
y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
y2, (h2, c2) = rnn2(
paddle.to_variable(x),
(paddle.to_variable(prev_h), paddle.to_variable(prev_c)))
paddle.to_tensor(x),
(paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -224,7 +224,7 @@ class TestLSTM(unittest.TestCase):
x = np.transpose(x, [1, 0, 2])
y1, (h1, c1) = rnn1(x)
y2, (h2, c2) = rnn2(paddle.to_variable(x))
y2, (h2, c2) = rnn2(paddle.to_tensor(x))
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)
......@@ -240,11 +240,11 @@ class TestLSTM(unittest.TestCase):
y1, (h1, c1) = rnn1(x, sequence_length=sequence_length)
seq_len = paddle.to_variable(sequence_length)
seq_len = paddle.to_tensor(sequence_length)
mask = sequence_mask(seq_len, dtype=paddle.get_default_dtype())
if self.time_major:
mask = paddle.transpose(mask, [1, 0])
y2, (h2, c2) = rnn2(paddle.to_variable(x), sequence_length=seq_len)
y2, (h2, c2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
y2 = paddle.multiply(y2, mask, axis=0)
np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
......
......@@ -19,6 +19,7 @@ import paddle
import paddle.fluid as fluid
import numpy as np
import six
import inspect
class TestMathOpPatchesVarBase(unittest.TestCase):
......@@ -302,21 +303,13 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertEqual(x.dim(), 2)
self.assertEqual(x.ndimension(), 2)
self.assertEqual(x.ndim, 2)
self.assertEqual(x.size(), [2, 3])
self.assertTrue(
np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy(
)))
self.assertTrue(
np.array_equal(x.log_sigmoid().numpy(),
fluid.layers.logsigmoid(x).numpy()))
self.assertEqual(x.size, 6)
self.assertEqual(x.numel(), 6)
self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy()))
self.assertTrue(
np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy()))
self.assertTrue(
np.array_equal(x.atan().numpy(), paddle.atan(x).numpy()))
self.assertTrue(
np.array_equal(x.tanh_shrink().numpy(),
fluid.layers.tanh_shrink(x).numpy()))
self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy()))
m = x.abs()
self.assertTrue(
......@@ -344,12 +337,6 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
)))
self.assertTrue(
np.array_equal(x.square().numpy(), paddle.square(x).numpy()))
self.assertTrue(
np.array_equal(x.softplus().numpy(),
fluid.layers.softplus(x).numpy()))
self.assertTrue(
np.array_equal(x.softsign().numpy(),
fluid.layers.softsign(x).numpy()))
self.assertTrue(
np.array_equal(x.rank().numpy(), paddle.rank(x).numpy()))
self.assertTrue(
......@@ -422,6 +409,8 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x)))
# 2. Binary operation
self.assertTrue(
np.array_equal(x.divide(y).numpy(), paddle.divide(x, y).numpy()))
self.assertTrue(
np.array_equal(
x.matmul(y, True, False).numpy(),
......@@ -501,6 +490,73 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
self.assertTrue(
np.array_equal(
x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))
a = paddle.to_tensor([[1, 2], [3, 4]])
b = paddle.to_tensor([[4, 3], [2, 1]])
self.assertTrue(
np.array_equal(
x.where(a, b).numpy(), paddle.where(x, a, b).numpy()))
self.assertTrue(inspect.ismethod(a.dot))
self.assertTrue(inspect.ismethod(a.elementwise_add))
self.assertTrue(inspect.ismethod(a.elementwise_div))
self.assertTrue(inspect.ismethod(a.elementwise_floordiv))
self.assertTrue(inspect.ismethod(a.elementwise_mod))
self.assertTrue(inspect.ismethod(a.elementwise_sub))
self.assertTrue(inspect.ismethod(a.logsumexp))
self.assertTrue(inspect.ismethod(a.multiplex))
self.assertTrue(inspect.ismethod(a.prod))
self.assertTrue(inspect.ismethod(a.reduce_max))
self.assertTrue(inspect.ismethod(a.reduce_min))
self.assertTrue(inspect.ismethod(a.reduce_prod))
self.assertTrue(inspect.ismethod(a.reduce_sum))
self.assertTrue(inspect.ismethod(a.scale))
self.assertTrue(inspect.ismethod(a.stanh))
self.assertTrue(inspect.ismethod(a.sums))
self.assertTrue(inspect.ismethod(a.elementwise_sum))
self.assertTrue(inspect.ismethod(a.max))
self.assertTrue(inspect.ismethod(a.maximum))
self.assertTrue(inspect.ismethod(a.min))
self.assertTrue(inspect.ismethod(a.minimum))
self.assertTrue(inspect.ismethod(a.floor_divide))
self.assertTrue(inspect.ismethod(a.remainder))
self.assertTrue(inspect.ismethod(a.floor_mod))
self.assertTrue(inspect.ismethod(a.multiply))
self.assertTrue(inspect.ismethod(a.logsumexp))
self.assertTrue(inspect.ismethod(a.inverse))
self.assertTrue(inspect.ismethod(a.log1p))
self.assertTrue(inspect.ismethod(a.erf))
self.assertTrue(inspect.ismethod(a.addcmul))
self.assertTrue(inspect.ismethod(a.addmm))
self.assertTrue(inspect.ismethod(a.clip))
self.assertTrue(inspect.ismethod(a.trace))
self.assertTrue(inspect.ismethod(a.kron))
self.assertTrue(inspect.ismethod(a.isinf))
self.assertTrue(inspect.ismethod(a.isnan))
self.assertTrue(inspect.ismethod(a.concat))
self.assertTrue(inspect.ismethod(a.broadcast_to))
self.assertTrue(inspect.ismethod(a.scatter_nd_add))
self.assertTrue(inspect.ismethod(a.scatter_nd))
self.assertTrue(inspect.ismethod(a.shard_index))
self.assertTrue(inspect.ismethod(a.chunk))
self.assertTrue(inspect.ismethod(a.stack))
self.assertTrue(inspect.ismethod(a.strided_slice))
self.assertTrue(inspect.ismethod(a.unsqueeze))
self.assertTrue(inspect.ismethod(a.unstack))
self.assertTrue(inspect.ismethod(a.argmax))
self.assertTrue(inspect.ismethod(a.argmin))
self.assertTrue(inspect.ismethod(a.argsort))
self.assertTrue(inspect.ismethod(a.has_inf))
self.assertTrue(inspect.ismethod(a.has_nan))
self.assertTrue(inspect.ismethod(a.masked_select))
self.assertTrue(inspect.ismethod(a.topk))
self.assertTrue(inspect.ismethod(a.index_select))
self.assertTrue(inspect.ismethod(a.nonzero))
self.assertTrue(inspect.ismethod(a.sort))
self.assertTrue(inspect.ismethod(a.index_sample))
self.assertTrue(inspect.ismethod(a.mean))
self.assertTrue(inspect.ismethod(a.reduce_mean))
self.assertTrue(inspect.ismethod(a.std))
self.assertTrue(inspect.ismethod(a.numel))
if __name__ == '__main__':
......
......@@ -61,8 +61,8 @@ class ApiMinimumTest(unittest.TestCase):
def test_dynamic_api(self):
paddle.disable_static()
np_x = np.array([10, 10]).astype('float64')
x = paddle.to_variable(self.input_x)
y = paddle.to_variable(self.input_y)
x = paddle.to_tensor(self.input_x)
y = paddle.to_tensor(self.input_y)
z = paddle.minimum(x, y)
np_z = z.numpy()
z_expected = np.array(np.minimum(self.input_x, self.input_y))
......@@ -73,8 +73,8 @@ class ApiMinimumTest(unittest.TestCase):
np_x = np.random.rand(5, 4, 3, 2).astype("float64")
np_y = np.random.rand(4, 3).astype("float64")
x = paddle.to_variable(self.input_x)
y = paddle.to_variable(self.input_y)
x = paddle.to_tensor(self.input_x)
y = paddle.to_tensor(self.input_y)
result_1 = paddle.minimum(x, y, axis=1)
result_2 = paddle.minimum(x, y, axis=-2)
self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True)
......@@ -205,8 +205,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'mean')
paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'mean')
dy_result = dy_ret.numpy()
sub = input_np - target_np
......@@ -240,8 +239,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'sum')
paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'sum')
dy_result = dy_ret.numpy()
sub = input_np - target_np
......@@ -275,8 +273,7 @@ class TestNNFunctionalMseLoss(unittest.TestCase):
paddle.disable_static()
dy_ret = paddle.nn.functional.mse_loss(
paddle.to_variable(input_np),
paddle.to_variable(target_np), 'none')
paddle.to_tensor(input_np), paddle.to_tensor(target_np), 'none')
dy_result = dy_ret.numpy()
sub = input_np - target_np
......
......@@ -909,8 +909,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
with fluid.dygraph.guard():
x_np = np.random.random(size=(5, )).astype(np.float64)
label_np = np.random.randint(0, 10, size=(5, )).astype(np.int64)
x = paddle.to_variable(x_np)
label = paddle.to_variable(label_np)
x = paddle.to_tensor(x_np)
label = paddle.to_tensor(label_np)
nll_loss = paddle.nn.loss.NLLLoss()
res = nll_loss(x, label)
......@@ -933,8 +933,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
with fluid.dygraph.guard():
x_np = np.random.random(size=(5, 3)).astype(np.float64)
label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64)
x = paddle.to_variable(x_np)
label = paddle.to_variable(label_np)
x = paddle.to_tensor(x_np)
label = paddle.to_tensor(label_np)
nll_loss = paddle.nn.loss.NLLLoss(reduction='')
res = nll_loss(x, label)
......@@ -957,8 +957,8 @@ class TestNLLLossInvalidArgs(unittest.TestCase):
with fluid.dygraph.guard():
x_np = np.random.random(size=(5, 3)).astype(np.float64)
label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64)
x = paddle.to_variable(x_np)
label = paddle.to_variable(label_np)
x = paddle.to_tensor(x_np)
label = paddle.to_tensor(label_np)
res = paddle.nn.functional.nll_loss(x, label, reduction='')
self.assertRaises(
......
......@@ -101,9 +101,9 @@ def create_test_case(margin, reduction):
def run_dynamic_functional_api(self, place):
paddle.disable_static(place)
x = paddle.to_variable(self.x_data)
y = paddle.to_variable(self.y_data)
label = paddle.to_variable(self.label_data)
x = paddle.to_tensor(self.x_data)
y = paddle.to_tensor(self.y_data)
label = paddle.to_tensor(self.label_data)
result = paddle.nn.functional.margin_ranking_loss(x, y, label,
margin, reduction)
......@@ -117,9 +117,9 @@ def create_test_case(margin, reduction):
def run_dynamic_api(self, place):
paddle.disable_static(place)
x = paddle.to_variable(self.x_data)
y = paddle.to_variable(self.y_data)
label = paddle.to_variable(self.label_data)
x = paddle.to_tensor(self.x_data)
y = paddle.to_tensor(self.y_data)
label = paddle.to_tensor(self.label_data)
margin_rank_loss = paddle.nn.loss.MarginRankingLoss(
margin=margin, reduction=reduction)
result = margin_rank_loss(x, y, label)
......@@ -134,9 +134,9 @@ def create_test_case(margin, reduction):
def run_dynamic_broadcast_api(self, place):
paddle.disable_static(place)
label_data = np.random.choice([-1, 1], size=[10]).astype("float64")
x = paddle.to_variable(self.x_data)
y = paddle.to_variable(self.y_data)
label = paddle.to_variable(label_data)
x = paddle.to_tensor(self.x_data)
y = paddle.to_tensor(self.y_data)
label = paddle.to_tensor(label_data)
margin_rank_loss = paddle.nn.loss.MarginRankingLoss(
margin=margin, reduction=reduction)
result = margin_rank_loss(x, y, label)
......
......@@ -56,7 +56,7 @@ class TestNNSigmoidAPI(unittest.TestCase):
def check_dynamic_api(self, place):
paddle.disable_static(place)
x = paddle.to_variable(self.x)
x = paddle.to_tensor(self.x)
mysigmoid = nn.Sigmoid()
y = mysigmoid(x)
self.assertTrue(np.allclose(y.numpy(), self.y))
......@@ -94,7 +94,7 @@ class TestNNFunctionalSigmoidAPI(unittest.TestCase):
def check_dynamic_api(self):
paddle.disable_static()
x = paddle.to_variable(self.x)
x = paddle.to_tensor(self.x)
y = functional.sigmoid(x)
self.assertTrue(np.allclose(y.numpy(), self.y))
......
......@@ -76,8 +76,8 @@ class TestNumelOoAPI(unittest.TestCase):
paddle.disable_static(paddle.CPUPlace())
input_1 = np.random.random([2, 1, 4, 5]).astype("int32")
input_2 = np.random.random([1, 4, 5]).astype("int32")
x_1 = paddle.to_variable(input_1)
x_2 = paddle.to_variable(input_2)
x_1 = paddle.to_tensor(input_1)
x_2 = paddle.to_tensor(input_2)
out_1 = paddle.numel(x_1)
out_2 = paddle.numel(x_2)
assert (np.array_equal(out_1.numpy().item(0), np.size(input_1)))
......
......@@ -63,7 +63,7 @@ class TestOnesLikeImpeartive(unittest.TestCase):
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
paddle.disable_static(place)
x = paddle.to_variable(np.ones(shape))
x = paddle.to_tensor(np.ones(shape))
for dtype in [np.bool, np.float32, np.float64, np.int32, np.int64]:
out = ones_like(x, dtype)
self.assertEqual((out.numpy() == np.ones(shape, dtype)).all(), True)
......
......@@ -48,8 +48,8 @@ def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
def test_dygraph(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
paddle.disable_static()
x = paddle.to_variable(x_np)
y = paddle.to_variable(y_np)
x = paddle.to_tensor(x_np)
y = paddle.to_tensor(y_np)
dist = paddle.nn.layer.distance.PairwiseDistance(
p=p, epsilon=epsilon, keepdim=keepdim)
distance = dist(x, y)
......
......@@ -72,14 +72,14 @@ class TestSortDygraph(unittest.TestCase):
def test_api_0(self):
paddle.disable_static(self.place)
var_x = paddle.to_variable(self.input_data)
var_x = paddle.to_tensor(self.input_data)
out = paddle.sort(var_x)
self.assertEqual((np.sort(self.input_data) == out.numpy()).all(), True)
paddle.enable_static()
def test_api_1(self):
paddle.disable_static(self.place)
var_x = paddle.to_variable(self.input_data)
var_x = paddle.to_tensor(self.input_data)
out = paddle.sort(var_x, axis=-1)
self.assertEqual(
(np.sort(
......
......@@ -230,13 +230,13 @@ class TestTileAPI(unittest.TestCase):
def test_api(self):
with fluid.dygraph.guard():
np_x = np.random.random([12, 14]).astype("float32")
x = paddle.to_variable(np_x)
x = paddle.to_tensor(np_x)
positive_2 = np.array([2]).astype("int32")
positive_2 = paddle.to_variable(positive_2)
positive_2 = paddle.to_tensor(positive_2)
repeat_times = np.array([2, 3]).astype("int32")
repeat_times = paddle.to_variable(repeat_times)
repeat_times = paddle.to_tensor(repeat_times)
out_1 = paddle.tile(x, repeat_times=[2, 3])
out_2 = paddle.tile(x, repeat_times=[positive_2, 3])
......
......@@ -234,23 +234,23 @@ class TestTransformer(unittest.TestCase):
if cache_dict:
if 'k' and 'v' in cache_dict:
cache_obj = multi_head_attn.Cache(
paddle.to_variable(cache_dict['k']),
paddle.to_variable(cache_dict['v']))
paddle.to_tensor(cache_dict['k']),
paddle.to_tensor(cache_dict['v']))
elif 'static_k' and 'static_v' in cache_dict:
cache_obj = multi_head_attn.StaticCache(
paddle.to_variable(cache_dict['static_k']),
paddle.to_variable(cache_dict['static_v']))
paddle.to_tensor(cache_dict['static_k']),
paddle.to_tensor(cache_dict['static_v']))
if attn_mask is not None:
attn_output = multi_head_attn(
paddle.to_variable(query),
paddle.to_variable(key),
paddle.to_variable(value),
paddle.to_variable(attn_mask), cache_obj)
paddle.to_tensor(query),
paddle.to_tensor(key),
paddle.to_tensor(value),
paddle.to_tensor(attn_mask), cache_obj)
else:
attn_output = multi_head_attn(
paddle.to_variable(query),
paddle.to_variable(key),
paddle.to_variable(value), attn_mask, cache_obj)
paddle.to_tensor(query),
paddle.to_tensor(key),
paddle.to_tensor(value), attn_mask, cache_obj)
attn_output = attn_output[0] if cache_dict else attn_output
# implementation by numpy
......@@ -296,16 +296,16 @@ class TestTransformer(unittest.TestCase):
attn_dropout, act_dropout)
encoder_output = encoder_layer(
paddle.to_variable(src),
paddle.to_variable(src_mask)) # paddle.to_variable(src_mask))
paddle.to_tensor(src),
paddle.to_tensor(src_mask)) # paddle.to_tensor(src_mask))
# 4.numpy:
# paddle self attention
self_attn = MultiHeadAttention(
d_model, n_head, dropout=attn_dropout)
attn_output = self_attn(
paddle.to_variable(src),
paddle.to_variable(src),
paddle.to_variable(src), paddle.to_variable(src_mask)).numpy()
paddle.to_tensor(src),
paddle.to_tensor(src),
paddle.to_tensor(src), paddle.to_tensor(src_mask)).numpy()
src = attn_output + residual
src_norm = layer_norm(src, d_model, encoder_layer.norm1)
......@@ -348,13 +348,13 @@ class TestTransformer(unittest.TestCase):
cache_objs = None
if cache:
cache_objs = decoder_layer.gen_cache(
paddle.to_variable(memory))
paddle.to_tensor(memory))
decoder_output = decoder_layer(
paddle.to_variable(tgt),
paddle.to_variable(memory),
paddle.to_variable(tgt_mask),
paddle.to_variable(memory_mask), cache_objs)
paddle.to_tensor(tgt),
paddle.to_tensor(memory),
paddle.to_tensor(tgt_mask),
paddle.to_tensor(memory_mask), cache_objs)
decoder_output = decoder_output[0].numpy(
) if cache else decoder_output.numpy()
......@@ -365,10 +365,10 @@ class TestTransformer(unittest.TestCase):
self_attn_cache = cache_objs[
0] if cache_objs is not None else None
tgt = self_attn(
paddle.to_variable(tgt),
paddle.to_variable(tgt),
paddle.to_variable(tgt),
paddle.to_variable(tgt_mask), self_attn_cache)
paddle.to_tensor(tgt),
paddle.to_tensor(tgt),
paddle.to_tensor(tgt),
paddle.to_tensor(tgt_mask), self_attn_cache)
tgt = tgt[0].numpy() if cache else tgt.numpy()
......@@ -380,10 +380,10 @@ class TestTransformer(unittest.TestCase):
cross_attn_cache = cache_objs[
1] if cache_objs is not None else None
tgt = cross_attn(
paddle.to_variable(tgt_norm),
paddle.to_variable(memory),
paddle.to_variable(memory),
paddle.to_variable(memory_mask), cross_attn_cache)
paddle.to_tensor(tgt_norm),
paddle.to_tensor(memory),
paddle.to_tensor(memory),
paddle.to_tensor(memory_mask), cross_attn_cache)
tgt = tgt[0].numpy() if cache else tgt.numpy()
# postprocess
......@@ -416,7 +416,7 @@ class TestTransformer(unittest.TestCase):
encoder = TransformerEncoder(encoder_layer, num_layers)
# src, src_mask
enc_output = encoder(
paddle.to_variable(src), paddle.to_variable(src_mask))
paddle.to_tensor(src), paddle.to_tensor(src_mask))
def test_decoder(self):
batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params(
......@@ -438,9 +438,9 @@ class TestTransformer(unittest.TestCase):
decoder = TransformerDecoder(decoder_layer, num_layers)
output = decoder(
paddle.to_variable(tgt),
paddle.to_variable(memory),
paddle.to_variable(tgt_mask), paddle.to_variable(memory_mask))
paddle.to_tensor(tgt),
paddle.to_tensor(memory),
paddle.to_tensor(tgt_mask), paddle.to_tensor(memory_mask))
def test_transformer(self):
batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params(
......@@ -453,24 +453,24 @@ class TestTransformer(unittest.TestCase):
n_head,
dim_feedforward=dim_feedforward,
dropout=dropout)
src = paddle.to_variable(
src = paddle.to_tensor(
np.random.rand(batch_size, source_length, d_model).astype(
"float32"))
tgt = paddle.to_variable(
tgt = paddle.to_tensor(
np.random.rand(batch_size, target_length, d_model).astype(
"float32"))
src_mask = np.zeros((batch_size, n_head, source_length,
source_length)).astype("float32")
src_mask[0][0][0][0] = -np.inf
src_mask = paddle.to_variable(src_mask)
src_mask = paddle.to_tensor(src_mask)
tgt_mask = np.zeros((batch_size, n_head, target_length,
target_length)).astype("float32")
tgt_mask[0][0][0][0] = -1e9
memory_mask = np.zeros((batch_size, n_head, target_length,
source_length)).astype("float32")
memory_mask[0][0][0][0] = -1e9
tgt_mask, memory_mask = paddle.to_variable(
tgt_mask), paddle.to_variable(memory_mask)
tgt_mask, memory_mask = paddle.to_tensor(
tgt_mask), paddle.to_tensor(memory_mask)
trans_output = transformer(src, tgt, src_mask, tgt_mask,
memory_mask)
......
......@@ -424,10 +424,10 @@ class TestCTCLossAPICase(unittest.TestCase):
loss_np = ctc.forward()
paddle.disable_static()
softmax = paddle.to_variable(logits)
labels = paddle.to_variable(labels)
logits_length = paddle.to_variable(self.logits_length)
labels_length = paddle.to_variable(self.labels_length)
softmax = paddle.to_tensor(logits)
labels = paddle.to_tensor(labels)
logits_length = paddle.to_tensor(self.logits_length)
labels_length = paddle.to_tensor(self.labels_length)
loss_pd_mean = F.ctc_loss(
softmax,
labels,
......@@ -477,10 +477,10 @@ class TestCTCLossAPICase(unittest.TestCase):
loss_np = ctc.forward()
paddle.disable_static()
softmax = paddle.to_variable(logits)
labels = paddle.to_variable(labels)
logits_length = paddle.to_variable(self.logits_length)
labels_length = paddle.to_variable(self.labels_length)
softmax = paddle.to_tensor(logits)
labels = paddle.to_tensor(labels)
logits_length = paddle.to_tensor(self.logits_length)
labels_length = paddle.to_tensor(self.labels_length)
loss_pd = paddle.nn.CTCLoss(self.blank, 'none')(
softmax, labels, logits_length, labels_length)
......
......@@ -53,7 +53,7 @@ __all__ = [
'shard_index',
'slice',
'split',
'chunk'
'chunk',
'squeeze',
'stack',
'strided_slice',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册