未验证 提交 07f509a8 编写于 作者: Y yaoxuefeng 提交者: GitHub

fix 4 apis test=develop (#25529)

上级 beb0ca5f
......@@ -63,18 +63,104 @@ class TestAddMMOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of addmm_op must be Variable.
input = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace())
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace())
x2 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace())
self.assertRaises(TypeError, paddle.addmm, input, x1, x2)
# The input dtype of mul_op must be float32 or float64.
input = fluid.layers.data(name='input', shape=[4], dtype="int32")
x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32")
x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32")
input = fluid.layers.data(
name='input',
shape=[4, 4],
dtype="int32",
append_batch_size=False)
x3 = fluid.layers.data(
name='x3', shape=[4, 4], dtype="int32", append_batch_size=False)
x4 = fluid.layers.data(
name='x4', shape=[4, 4], dtype="int32", append_batch_size=False)
self.assertRaises(TypeError, paddle.addmm, input, x3, x4)
# x and y dimension mismatch
x5 = fluid.layers.data(
name='x5',
shape=[4, 5],
dtype="float32",
append_batch_size=False)
x6 = fluid.layers.data(
name='x6',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
self.assertRaises(ValueError, paddle.addmm, input, x5, x6)
# input and x are not broadcastable
x7 = fluid.layers.data(
name='x7',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
x8 = fluid.layers.data(
name='x8',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
input1 = fluid.layers.data(
name='input1',
shape=[2, 4],
dtype="float32",
append_batch_size=False)
self.assertRaises(ValueError, paddle.addmm, input1, x7, x8)
# input and x are not broadcastable
x9 = fluid.layers.data(
name='x9',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
x10 = fluid.layers.data(
name='x10',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
input2 = fluid.layers.data(
name='input2',
shape=[1, 2],
dtype="float32",
append_batch_size=False)
self.assertRaises(ValueError, paddle.addmm, input2, x9, x10)
x11 = fluid.layers.data(
name='x11',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
x12 = fluid.layers.data(
name='x12',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
input3 = fluid.layers.data(
name='input3',
shape=[4, 2],
dtype="float32",
append_batch_size=False)
self.assertRaises(ValueError, paddle.addmm, input3, x11, x12)
x13 = fluid.layers.data(
name='x13',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
x14 = fluid.layers.data(
name='x14',
shape=[4, 4],
dtype="float32",
append_batch_size=False)
input4 = fluid.layers.data(
name='input4',
shape=[3, 1],
dtype="float32",
append_batch_size=False)
self.assertRaises(ValueError, paddle.addmm, input4, x13, x14)
class TestAddMMOp2(TestAddMMOp):
......@@ -147,5 +233,23 @@ class TestAddMMOp4(unittest.TestCase):
assert np.allclose(np_input + np.dot(np_x, np_y), out.numpy())
'''
class TestAddMMAPI(unittest.TestCase):
def test_api_error(self):
data_x = np.ones((2, 2)).astype(np.float32)
data_y = np.ones((2, 2)).astype(np.float32)
data_input = np.ones((2, 2)).astype(np.float32)
paddle.enable_imperative()
def test_error1():
data_x_wrong = np.ones((2, 3)).astype(np.float32)
x = paddle.imperative.to_variable(data_x_wrong)
y = paddle.imperative.to_variable(data_y)
input = paddle.imperative.to_variable(data_input)
out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
self.assertRaises(ValueError, test_error1)
'''
if __name__ == "__main__":
unittest.main()
......@@ -73,5 +73,15 @@ class API_TestDygraphBmm(unittest.TestCase):
self.assertTrue(np.allclose(expected_result, out_np))
class TestBmmAPIError(unittest.TestCase):
def test_api_error(self):
x_data = np.arange(24, dtype='float32').reshape((2, 3, 4))
y_data = np.arange(16, dtype='float32').reshape((2, 4, 2))
y_data_wrong1 = np.arange(16, dtype='float32').reshape((2, 2, 4))
y_data_wrong2 = np.arange(16, dtype='float32').reshape((2, 2, 2, 2))
self.assertRaises(ValueError, paddle.bmm, x_data, y_data_wrong1)
self.assertRaises(ValueError, paddle.bmm, x_data, y_data_wrong2)
if __name__ == "__main__":
unittest.main()
......@@ -63,7 +63,7 @@ def case_generator(op_type, Xshape, diagonal, expected):
"diagonal: TypeError":
"diagonal in {} must be a python Int".format(op_type),
"input: ValueError":
"input shape in {} must be at least 2-D".format(op_type),
"x shape in {} must be at least 2-D".format(op_type),
}
class FailureCase(unittest.TestCase):
......@@ -71,7 +71,7 @@ def case_generator(op_type, Xshape, diagonal, expected):
data = fluid.data(shape=Xshape, dtype='float64', name=cls_name)
with self.assertRaisesRegexp(
eval(expected.split(':')[-1]), errmsg[expected]):
getattr(tensor, op_type)(input=data, diagonal=diagonal)
getattr(tensor, op_type)(x=data, diagonal=diagonal)
class SuccessCase(TrilTriuOpDefaultTest):
def initTestCase(self):
......
......@@ -490,14 +490,13 @@ def _tril_triu_op(helper):
"""Base op of tril_op and triu_op
"""
op_type = helper.layer_type
x = helper.kwargs.get('input', None)
x = helper.kwargs.get('x', None)
assert x is not None, 'x cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
if len(x.shape) < 2:
raise ValueError("input shape in {} must be at least 2-D".format(
op_type))
raise ValueError("x shape in {} must be at least 2-D".format(op_type))
diagonal = helper.kwargs.get('diagonal', 0)
if not isinstance(diagonal, (int, )):
raise TypeError("diagonal in {} must be a python Int".format(op_type))
......@@ -521,18 +520,18 @@ def _tril_triu_op(helper):
return out
def tril(input, diagonal=0, name=None):
def tril(x, diagonal=0, name=None):
"""
:alias_main: paddle.tril
:alias: paddle.tril,paddle.tensor.tril,paddle.tensor.creation.tril
This op returns the lower triangular part of a matrix (2-D tensor) or batch
of matrices :attr:`input`, the other elements of the result tensor are set
of matrices :attr:`x`, the other elements of the result tensor are set
to 0. The lower triangular part of the matrix is defined as the elements
on and below the diagonal.
Args:
input (Variable): The input variable which is a Tensor.
x (Variable): The input variable x which is a Tensor.
Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
diagonal (int, optional): The diagonal to consider, default value is 0.
If :attr:`diagonal` = 0, all elements on and below the main diagonal are
......@@ -545,47 +544,41 @@ def tril(input, diagonal=0, name=None):
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor,
it's data type is the same as input's Tensor.
Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor x,
it's data type is the same as x's Tensor.
Raises:
TypeError: diagonal is not a int type.
ValueError: dimension of :attr:`input` is less than 2.
ValueError: dimension of :attr:`x` is less than 2.
Examples:
.. code-block:: python
import numpy as np
import paddle.tensor as tensor
import paddle.fluid as fluid
import paddle
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
# example 1, default diagonal
tril = tensor.tril(x)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
paddle.enable_imperative()
x = paddle.imperative.to_variable(data)
tril1 = paddle.tensor.tril(x)
# array([[ 1, 0, 0, 0],
# [ 5, 6, 0, 0],
# [ 9, 10, 11, 0]])
# example 2, positive diagonal value
tril = tensor.tril(x, diagonal=2)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
tril2 = paddle.tensor.tril(x, diagonal=2)
# array([[ 1, 2, 3, 0],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
# example 3, negative diagonal value
tril = tensor.tril(x, diagonal=-1)
tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[tril], return_numpy=True)
tril3 = paddle.tensor.tril(x, diagonal=-1)
# array([[ 0, 0, 0, 0],
# [ 5, 0, 0, 0],
# [ 9, 10, 0, 0]])
......@@ -593,23 +586,23 @@ def tril(input, diagonal=0, name=None):
"""
if in_dygraph_mode():
op = getattr(core.ops, 'tril_triu')
return op(input, 'diagonal', diagonal, "lower", True)
return op(x, 'diagonal', diagonal, "lower", True)
return _tril_triu_op(LayerHelper('tril', **locals()))
def triu(input, diagonal=0, name=None):
def triu(x, diagonal=0, name=None):
"""
:alias_main: paddle.triu
:alias: paddle.triu,paddle.tensor.triu,paddle.tensor.creation.triu
This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor are set to 0.
:attr:`x`, the other elements of the result tensor are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
Args:
input (Variable): The input variable which is a Tensor.
x (Variable): The input variable x which is a Tensor.
Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
diagonal (int, optional): The diagonal to consider, default value is 0.
If :attr:`diagonal` = 0, all elements on and above the main diagonal are
......@@ -622,47 +615,41 @@ def triu(input, diagonal=0, name=None):
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor, results of upper triangular operation by the specified diagonal of input tensor,
it's data type is the same as input's Tensor.
Variable: Tensor, results of upper triangular operation by the specified diagonal of input tensor x,
it's data type is the same as x's Tensor.
Raises:
TypeError: diagonal is not a int type.
ValueError: dimension of :attr:`input` is less than 2.
ValueError: dimension of :attr:`x` is less than 2.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
import paddle.tensor as tensor
import paddle
data = np.arange(1, 13, dtype="int64").reshape(3,-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 9, 10, 11, 12]])
x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
exe = fluid.Executor(fluid.CPUPlace())
paddle.enable_imperative()
# example 1, default diagonal
triu = tensor.triu(x)
triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[triu], return_numpy=True)
x = paddle.imperative.to_variable(data)
triu1 = paddle.tensor.triu(x)
# array([[ 1, 2, 3, 4],
# [ 0, 6, 7, 8],
# [ 0, 0, 11, 12]])
# example 2, positive diagonal value
triu = tensor.triu(x, diagonal=2)
triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[triu], return_numpy=True)
triu2 = paddle.tensor.triu(x, diagonal=2)
# array([[0, 0, 3, 4],
# [0, 0, 0, 8],
# [0, 0, 0, 0]])
# example 3, negative diagonal value
triu = tensor.triu(x, diagonal=-1)
triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
fetch_list=[triu], return_numpy=True)
triu3 = paddle.tensor.triu(x, diagonal=-1)
# array([[ 1, 2, 3, 4],
# [ 5, 6, 7, 8],
# [ 0, 10, 11, 12]])
......@@ -670,7 +657,7 @@ def triu(input, diagonal=0, name=None):
"""
if in_dygraph_mode():
op = getattr(core.ops, 'tril_triu')
return op(input, 'diagonal', diagonal, "lower", False)
return op(x, 'diagonal', diagonal, "lower", False)
return _tril_triu_op(LayerHelper('triu', **locals()))
......
......@@ -729,26 +729,32 @@ def bmm(x, y, name=None):
Examples:
import paddle
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10, 3, 4], dtype='float32')
y = fluid.layers.data(name='y', shape=[10, 4, 5], dtype='float32')
out = paddle.bmm(x, y)
# In dygraph mode:
# In imperative mode:
# size input1: (2, 2, 3) and input2: (2, 3, 2)
input1 = np.array([[[1.0, 1.0, 1.0],[2.0, 2.0, 2.0]],[[3.0, 3.0, 3.0],[4.0, 4.0, 4.0]]])
input2 = np.array([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(input1)
y = fluid.dygraph.to_variable(input2)
paddle.enable_imperative()
x = paddle.imperative.to_variable(input1)
y = paddle.imperative.to_variable(input2)
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy()
"""
x_shape = x.shape
y_shape = y.shape
if not len(x_shape) == len(y_shape) == 3:
raise ValueError(
"x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".
format(x_shape, y_shape))
if x_shape[2] != y_shape[1]:
raise ValueError(
"x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
helper = LayerHelper('bmm', **locals())
if in_dygraph_mode():
return core.ops.bmm(x, y)
......
......@@ -915,7 +915,7 @@ def mm(input, mat2, name=None):
return out
def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
"""
:alias_main: paddle.addmm
:alias: paddle.addmm,paddle.tensor.addmm,paddle.tensor.math.addmm
......@@ -935,8 +935,8 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
input (Variable): The input Tensor/LoDTensor to be added to the final result.
x (Variable): The first input Tensor/LoDTensor for matrix multiplication.
y (Variable): The second input Tensor/LoDTensor for matrix multiplication.
alpha (float): Coefficient of $x*y$.
beta (float): Coefficient of $input$.
alpha (float): Coefficient of $x*y$.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
......@@ -947,25 +947,43 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
import numpy as np
import paddle
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[2, 2], dtype='float32')
x = fluid.data(name='x', shape=[2, 2], dtype='float32')
y = fluid.data(name='y', shape=[2, 2], dtype='float32')
out = paddle.addmm( input=input, x=x, y=y, alpha=5.0, beta=0.5 )
data_x = np.ones((2, 2)).astype(np.float32)
data_y = np.ones((2, 2)).astype(np.float32)
data_input = np.ones((2, 2)).astype(np.float32)
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda() else fluid.CPUPlace()
exe = fluid.Executor(place)
results = exe.run(fluid.default_main_program(),
fetch_list=[out], feed={"input": data_input, 'x': data_x, "y": data_y})
print( np.array(results[0]) )
paddle.enable_imperative()
x = paddle.imperative.to_variable(data_x)
y = paddle.imperative.to_variable(data_y)
input = paddle.imperative.to_variable(data_input)
out = paddle.tensor.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
print( out.numpy() )
# [[10.5 10.5]
# [10.5 10.5]]
"""
input_shape = input.shape
x_shape = x.shape
y_shape = y.shape
if not len(input_shape) == len(x_shape) == len(y_shape) == 2:
raise ValueError("The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}".format(input_shape, x_shape, y_shape))
if input_shape[0] != x_shape[0]:
if input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if input_shape[1] != y_shape[1] and input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[1] != y_shape[1]:
if input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[0] != x_shape[0] and input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if x_shape[1] != y_shape[0]:
raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))
if in_dygraph_mode():
out = core.ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
......@@ -974,7 +992,7 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(x, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册