未验证 提交 4a44ffdd 编写于 作者: W wangchaochaohu 提交者: GitHub

refine the eye Op for API 2.0 test=develop (#25295)

上级 273ee7d3
......@@ -1551,7 +1551,11 @@ def diag(diagonal):
return out
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
def eye(num_rows,
num_columns=None,
batch_shape=None,
dtype='float32',
name=None):
"""
:alias_main: paddle.eye
:alias: paddle.eye,paddle.tensor.eye,paddle.tensor.creation.eye
......@@ -1559,19 +1563,25 @@ def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
**eye**
This function constructs an identity tensor, or a batch of tensor.
This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
num_rows(int): the number of rows in each batch tensor.
num_columns(int): the number of columns in each batch tensor.
If None, default: num_rows.
batch_shape(list(int)): If provided, the returned tensor will have a leading
batch size of this shape.
dtype(string): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64.
num_columns(int, optional): the number of columns in each batch tensor.
If None, default: num_rows.
batch_shape(list(int), optional): If provided, the returned tensor will have a leading
batch size of this shape, default is None.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64, default is 'float32'.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
Raises:
TypeError: The `dtype` must be one of float16, float32, float64, int32 and int64.
TypeError: The `num_columns` must be non-negative int.
Examples:
.. code-block:: python
......@@ -1592,38 +1602,55 @@ def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
"""
helper = LayerHelper("eye", **locals())
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if num_columns is not None:
if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
num_columns = num_rows
out = helper.create_variable_for_type_inference(dtype=dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': c_dtype
},
stop_gradient=True)
out.stop_gradient = True
if in_dygraph_mode():
out = core.ops.eye('dtype', dtype, 'num_rows', num_rows, 'num_columns',
num_columns)
else:
helper = LayerHelper("eye", **locals())
check_dtype(dtype, 'dtype',
['float16', 'float32', 'float64', 'int32', 'int64'], 'eye')
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': dtype
},
stop_gradient=True)
if batch_shape is not None:
re_shape = [1] * len(batch_shape)
re_shape = re_shape + [num_rows, num_columns]
expand_times = batch_shape + [1, 1]
if in_dygraph_mode():
out = core.ops.reshape(out, 'shape', re_shape)
return core.ops.expand(out, 'expand_times', expand_times)
if not isinstance(batch_shape, list):
raise TypeError("batch_shape should be a list")
from .nn import stack
for batch_val in reversed(batch_shape):
for batch_val in (batch_shape):
if batch_val <= 0:
raise TypeError("batch_shape should be a positive int list")
else:
stack_vars = [out for _ in numpy.arange(batch_val)]
out = stack(stack_vars, axis=0)
from .nn import reshape, expand
out = reshape(x=out, shape=re_shape)
out = expand(x=out, expand_times=expand_times)
out.stop_gradient = True
return out
......
......@@ -74,32 +74,73 @@ class TestEyeOp2(OpTest):
class API_TestTensorEye(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
with paddle.program_guard(paddle.Program()):
data = paddle.eye(10)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, dtype="float32")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
with paddle.program_guard(paddle.Program()):
data = paddle.eye(10, num_columns=7, dtype="float64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, 7, dtype="float64")
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
with paddle.program_guard(paddle.Program()):
data = paddle.eye(10, dtype="int64")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.Executor(place)
result, = exe.run(fetch_list=[data])
expected_result = np.eye(10, dtype="int64")
self.assertEqual((result == expected_result).all(), True)
with paddle.imperative.guard():
out = paddle.eye(10, dtype="int64")
expected_result = np.eye(10, dtype="int64")
self.assertEqual((out.numpy() == expected_result).all(), True)
with paddle.imperative.guard():
batch_shape = [2]
out = fluid.layers.eye(10,
10,
dtype="int64",
batch_shape=batch_shape)
result = np.eye(10, dtype="int64")
expected_result = []
for index in reversed(batch_shape):
tmp_result = []
for i in range(index):
tmp_result.append(result)
result = tmp_result
expected_result = np.stack(result, axis=0)
self.assertEqual(out.numpy().shape == np.array(expected_result).shape,
True)
self.assertEqual((out.numpy() == expected_result).all(), True)
with paddle.imperative.guard():
batch_shape = [3, 2]
out = fluid.layers.eye(10,
10,
dtype="int64",
batch_shape=batch_shape)
result = np.eye(10, dtype="int64")
expected_result = []
for index in reversed(batch_shape):
tmp_result = []
for i in range(index):
tmp_result.append(result)
result = tmp_result
expected_result = np.stack(result, axis=0)
self.assertEqual(out.numpy().shape == np.array(expected_result).shape,
True)
self.assertEqual((out.numpy() == expected_result).all(), True)
def test_errors(self):
with fluid.program_guard(fluid.Program()):
with paddle.program_guard(paddle.Program()):
def test_num_rows_type_check():
paddle.eye(-1, dtype="int64")
......@@ -111,6 +152,11 @@ class API_TestTensorEye(unittest.TestCase):
self.assertRaises(TypeError, test_num_columns_type_check)
def test_num_columns_type_check():
paddle.eye(10, num_columns=10, dtype="int8")
self.assertRaises(TypeError, test_num_columns_type_check)
if __name__ == "__main__":
unittest.main()
......@@ -26,10 +26,10 @@ import paddle
# TODO: define functions to get create a tensor
from ..fluid.layers import crop_tensor #DEFINE_ALIAS
from ..fluid.layers import diag #DEFINE_ALIAS
from ..fluid.layers import eye #DEFINE_ALIAS
from ..fluid.layers import fill_constant #DEFINE_ALIAS
from ..fluid.layers import create_tensor #DEFINE_ALIAS
from ..fluid.layers import linspace #DEFINE_ALIAS
import paddle
__all__ = [
'create_tensor',
......@@ -295,67 +295,50 @@ def zeros_like(x, dtype=None, name=None):
return full_like(x=x, fill_value=0, dtype=dtype, name=name)
def eye(num_rows,
num_columns=None,
out=None,
dtype='float32',
stop_gradient=True,
name=None):
def eye(num_rows, num_columns=None, dtype=None, name=None):
"""
**eye**
This function constructs an identity tensor.
This function constructs 2-D Tensor with ones on the diagonal and zeros elsewhere.
Args:
num_rows(int): the number of rows in each batch tensor.
num_columns(int, optional): the number of columns in each batch tensor.
If None, default: num_rows.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
dtype(string, optional): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64.
stop_gradient(bool, optional): Whether stop calculating gradients. Default:True.
If None, default: num_rows.
dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64. Default: if None, the data type
is float32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: An identity Tensor or LoDTensor of shape [num_rows, num_columns].
Raises:
TypeError: The `dtype` must be one of float16, float32, float64, int32 int64 and None.
TypeError: The `num_columns` must be non-negative int.
Examples:
.. code-block:: python
import paddle
paddle.enable_imperative() # Now we are in imperative mode
data = paddle.eye(3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]
# [0, 0, 1]]
# [[1 0 0]
# [0 1 0]
# [0 0 1]]
data = paddle.eye(2, 3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]]
# [[1 0 0]
# [0 1 0]]
"""
helper = LayerHelper("eye", **locals())
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
if num_columns is not None:
if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
if dtype is None:
dtype = 'float32'
if num_columns is None:
num_columns = num_rows
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': c_dtype
},
stop_gradient=True)
out.stop_gradient = stop_gradient
return out
return paddle.fluid.layers.eye(num_rows=num_rows,
num_columns=num_columns,
batch_shape=None,
dtype=dtype,
name=name)
def full(shape, fill_value, dtype=None, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册