diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index ee7b764ad7b863eb925c1a180804099808098e69..b7e0e60145df3e229bdd029ffb12bdd75dd2f9f9 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -68,7 +68,6 @@ __all__ = [ 'zeros_like', 'ones_like', 'diag', - 'eye', 'triu', ] @@ -1787,113 +1786,6 @@ def diag(diagonal): return out -def eye( - num_rows, num_columns=None, batch_shape=None, dtype='float32', name=None -): - """ - This function constructs a or a batch of 2-D tensor with ones on the diagonal and zeros elsewhere. - - Args: - num_rows(int): the number of rows in each batch tensor. - num_columns(int, optional): the number of columns in each batch tensor. - If None, default: num_rows. - batch_shape(list, optional): If provided, the returned tensor will have a leading - batch size of this shape, the data type of ``batch_shape`` is int. Default is None. - dtype(np.dtype|str, optional): The data type of the returned tensor. - It should be int32, int64, float16, float32, float64, default is 'float32'. - name(str, optional): The default value is None. Normally there is no - need for user to set this property. For more information, please - refer to :ref:`api_guide_Name`. - - Returns: - Tensor: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns]. - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - data = fluid.layers.eye(3, dtype='int32') - # [[1, 0, 0] - # [0, 1, 0] - # [0, 0, 1]] - - data = fluid.layers.eye(2, 3, dtype='int32') - # [[1, 0, 0] - # [0, 1, 0]] - - data = fluid.layers.eye(2, batch_shape=[3]) - # Construct a batch of 3 identity tensors, each 2 x 2. - # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2. - - """ - - def _check_attr(attr, message): - if isinstance(attr, ((Variable, core.VarBase, core.eager.Tensor))): - assert len(attr.shape) == 1 and attr.shape[0] in [1, -1] - elif not isinstance(attr, int) or attr < 0: - raise TypeError("{} should be a non-negative int.".format(message)) - - _check_attr(num_rows, "num_rows") - if not isinstance(dtype, core.VarDesc.VarType): - dtype = convert_np_dtype_to_dtype_(dtype) - if num_columns is not None: - _check_attr(num_columns, "num_columns") - else: - num_columns = num_rows - - if in_dygraph_mode(): - out = _C_ops.eye( - num_rows, num_columns, dtype, _current_expected_place() - ) - elif _in_legacy_dygraph(): - out = _legacy_C_ops.eye( - 'dtype', dtype, 'num_rows', num_rows, 'num_columns', num_columns - ) - else: - helper = LayerHelper("eye", **locals()) - check_dtype( - dtype, - 'dtype', - ['float16', 'float32', 'float64', 'int32', 'int64'], - 'eye', - ) - out = helper.create_variable_for_type_inference(dtype=dtype) - helper.append_op( - type='eye', - inputs={}, - outputs={'Out': [out]}, - attrs={ - 'num_rows': num_rows, - 'num_columns': num_columns, - 'dtype': dtype, - }, - stop_gradient=True, - ) - - if batch_shape is not None: - re_shape = [1] * len(batch_shape) - re_shape = re_shape + [num_rows, num_columns] - expand_times = batch_shape + [1, 1] - if _non_static_mode(): - out, _ = _legacy_C_ops.reshape2(out, None, 'shape', re_shape) - return _legacy_C_ops.expand(out, None, 'expand_times', expand_times) - - if not isinstance(batch_shape, list): - raise TypeError("batch_shape should be a list") - for batch_val in batch_shape: - if batch_val <= 0: - raise TypeError("batch_shape should be a positive int list") - - from .nn import expand - from paddle import reshape - - out = reshape(x=out, shape=re_shape) - out = expand(x=out, expand_times=expand_times) - - out.stop_gradient = True - return out - - def ones_like(x, out=None): """ **ones_like** diff --git a/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py index e80be96f2cdcbfab5b31f95a2e1bd9f4daf107c0..dd4ad921172c8521b340cbe583882d5351786191 100755 --- a/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_eye_op_npu.py @@ -139,39 +139,6 @@ class API_TestTensorEye(unittest.TestCase): paddle.enable_static() self.assertEqual((out.numpy() == expected_result).all(), True) - paddle.disable_static(paddle.NPUPlace(0)) - batch_shape = [2] - out = fluid.layers.eye(10, 10, dtype="int32", batch_shape=batch_shape) - result = np.eye(10, dtype="int32") - expected_result = [] - for index in reversed(batch_shape): - tmp_result = [] - for i in range(index): - tmp_result.append(result) - result = tmp_result - expected_result = np.stack(result, axis=0) - paddle.enable_static() - self.assertEqual( - out.numpy().shape == np.array(expected_result).shape, True - ) - self.assertEqual((out.numpy() == expected_result).all(), True) - - paddle.disable_static(paddle.NPUPlace(0)) - batch_shape = [3, 2] - out = fluid.layers.eye(10, 10, dtype="int32", batch_shape=batch_shape) - result = np.eye(10, dtype="int32") - expected_result = [] - for index in reversed(batch_shape): - tmp_result = [] - for i in range(index): - tmp_result.append(result) - result = tmp_result - expected_result = np.stack(result, axis=0) - paddle.enable_static() - self.assertEqual( - out.numpy().shape == np.array(expected_result).shape, True - ) - self.assertEqual((out.numpy() == expected_result).all(), True) def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): diff --git a/python/paddle/fluid/tests/unittests/test_eye_op.py b/python/paddle/fluid/tests/unittests/test_eye_op.py index e61037ec1afbdcade0f5b52f9f4ec6e88186b879..fb93aee9b30cd9d59a297b5a73b002c65a1b4e31 100644 --- a/python/paddle/fluid/tests/unittests/test_eye_op.py +++ b/python/paddle/fluid/tests/unittests/test_eye_op.py @@ -109,40 +109,6 @@ class API_TestTensorEye(unittest.TestCase): paddle.enable_static() self.assertEqual((out.numpy() == expected_result).all(), True) - paddle.disable_static() - batch_shape = [2] - out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape) - result = np.eye(10, dtype="int64") - expected_result = [] - for index in reversed(batch_shape): - tmp_result = [] - for i in range(index): - tmp_result.append(result) - result = tmp_result - expected_result = np.stack(result, axis=0) - paddle.enable_static() - self.assertEqual( - out.numpy().shape == np.array(expected_result).shape, True - ) - self.assertEqual((out.numpy() == expected_result).all(), True) - - paddle.disable_static() - batch_shape = [3, 2] - out = fluid.layers.eye(10, 10, dtype="int64", batch_shape=batch_shape) - result = np.eye(10, dtype="int64") - expected_result = [] - for index in reversed(batch_shape): - tmp_result = [] - for i in range(index): - tmp_result.append(result) - result = tmp_result - expected_result = np.stack(result, axis=0) - paddle.enable_static() - self.assertEqual( - out.numpy().shape == np.array(expected_result).shape, True - ) - self.assertEqual((out.numpy() == expected_result).all(), True) - def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): @@ -212,18 +178,6 @@ class TestEyeRowsCol(UnittestBase): paddle.eye(-1) -class TestEyeRowsCol2(TestEyeRowsCol): - def call_func(self, x): - rows = paddle.assign(3) - cols = paddle.assign(10) - out = paddle.fluid.layers.eye(rows, cols) - return out - - def test_error(self): - with self.assertRaises(TypeError): - paddle.fluid.layers.eye(-1) - - if __name__ == "__main__": paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 38b0d96571b603436675c3ddf84a53e189d51db9..1d892e76c92c02c4b928c2a7c33b06233a4e61f6 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -2407,70 +2407,6 @@ class TestLayer(LayerTest): conv3d1.bias.numpy(), conv3d2.bias.numpy() ) - def test_eye_op(self): - np_eye = np.eye(3, 2) - array_rlt1 = [np_eye for _ in range(3)] - stack_rlt1 = np.stack(array_rlt1, axis=0) - array_rlt2 = [stack_rlt1 for _ in range(4)] - stack_rlt2 = np.stack(array_rlt2, axis=0) - - with self.dynamic_graph(): - with _test_eager_guard(): - eager_eye_tensor = layers.eye(num_rows=3, num_columns=2) - eager_eye_tensor_rlt1 = layers.eye( - num_rows=3, num_columns=2, batch_shape=[3] - ) - eager_eye_tensor_rlt2 = layers.eye( - num_rows=3, num_columns=2, batch_shape=[4, 3] - ) - eager_diag_tensor = layers.eye(20) - eager_eye_tensor_value = eager_eye_tensor.numpy() - eager_eye_tensor_rlt1_value = eager_eye_tensor_rlt1.numpy() - eager_eye_tensor_rlt2_value = eager_eye_tensor_rlt2.numpy() - eager_diag_tensor_value = eager_diag_tensor.numpy() - - eye_tensor = layers.eye(num_rows=3, num_columns=2) - eye_tensor_rlt1 = layers.eye( - num_rows=3, num_columns=2, batch_shape=[3] - ) - eye_tensor_rlt2 = layers.eye( - num_rows=3, num_columns=2, batch_shape=[4, 3] - ) - diag_tensor = layers.eye(20) - eye_tensor_value = eye_tensor.numpy() - eye_tensor_rlt1_value = eye_tensor_rlt1.numpy() - eye_tensor_rlt2_value = eye_tensor_rlt2.numpy() - diag_tensor_value = diag_tensor.numpy() - - np.testing.assert_allclose(eager_eye_tensor_value, np_eye, rtol=1e-05) - np.testing.assert_allclose( - eager_eye_tensor_rlt1_value, stack_rlt1, rtol=1e-05 - ) - np.testing.assert_allclose( - eager_eye_tensor_rlt2_value, stack_rlt2, rtol=1e-05 - ) - np.testing.assert_allclose( - eager_diag_tensor_value, np.eye(20), rtol=1e-05 - ) - - np.testing.assert_allclose(eye_tensor_value, np_eye, rtol=1e-05) - np.testing.assert_allclose( - eye_tensor_rlt1_value, stack_rlt1, rtol=1e-05 - ) - np.testing.assert_allclose( - eye_tensor_rlt2_value, stack_rlt2, rtol=1e-05 - ) - np.testing.assert_allclose(diag_tensor_value, np.eye(20), rtol=1e-05) - - with self.assertRaises(TypeError): - layers.eye(num_rows=3.1) - with self.assertRaises(TypeError): - layers.eye(num_rows=3, num_columns=2.2) - with self.assertRaises(TypeError): - layers.eye(num_rows=3, batch_shape=2) - with self.assertRaises(TypeError): - layers.eye(num_rows=3, batch_shape=[-1]) - def func_while_loop(self): with self.static_graph(): i = layers.fill_constant(shape=[1], dtype='int64', value=0)