diff --git a/python/paddle/fluid/tests/unittests/test_lod_append_op.py b/python/paddle/fluid/tests/unittests/test_lod_append_op.py index a67ce8a897ce9c326b1da77770d50c379ae44f5d..82cf4318098b6b834e7e41337a89542cd7d7e088 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_append_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_append_op.py @@ -29,9 +29,12 @@ class TestLoDAppendAPI(unittest.TestCase): main_program = Program() with fluid.program_guard(main_program): x = fluid.layers.data(name='x', shape=[6], dtype='float32') - result = fluid.layers.lod_append(x, [0, 2, 6]) + level = fluid.layers.data( + name='level', shape=[3], dtype='int32', lod_level=0) + result = fluid.layers.lod_append(x, level) x_i = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]).astype("float32") + level_i = np.array([0, 2, 6]).astype("int32") for use_cuda in [False, True]: if use_cuda and not fluid.core.is_compiled_with_cuda(): @@ -39,49 +42,38 @@ class TestLoDAppendAPI(unittest.TestCase): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) [out] = exe.run(fluid.default_main_program(), - feed={'x': x_i}, + feed={'x': x_i, + 'level': level_i}, fetch_list=[result], return_numpy=False) self.assertEqual(out.recursive_sequence_lengths(), [[2, 4]]) class TestLodAppendOpError(unittest.TestCase): - def test_errors(self): - with program_guard(Program()): + def test_error(self): + # The input(x) must be Variable. + x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") + level1 = [0, 2, 4] + self.assertRaises(TypeError, fluid.layers.lod_append, x1, level1) - def test_x_Variable(): - # The input(x) must be Variable. - x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") - level1 = [0, 2, 4] - fluid.layers.lod_append(x1, level1) - self.assertRaises(TypeError, fluid.layers.lod_append, x1, - level1) + #The input(level) must be Variable or list. + x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') + self.assertRaises(ValueError, fluid.layers.lod_append, x2, 2) - def test_level_Variable(): - # The input(level) must be Variable or list. - x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') - level2 = 2 - fluid.layers.lod_append(x2, level2) - self.assertRaises(TypeError, fluid.layers.lod_append, x2, - level2) + # Input(x) dtype must be float32 or float64 or int32 or int64 + for dtype in ["bool", "float16"]: + x3 = fluid.layers.data(name='x3_' + dtype, shape=[4], dtype=dtype) + level3 = fluid.layers.data( + name='level3' + dtype, shape=[4], dtype='int32', lod_level=2) + self.assertRaises(TypeError, fluid.layers.lod_append, x3, level3) - def test_x_dtype(): - for dtype in ["bool", "float16"]: - x3 = fluid.layers.data( - name='x3_' + dtype, shape=[4], dtype=dtype) - level3 = fluid.layers.data( - name='level3', shape=[4], dtype='int32', lod_level=2) - self.assertRaises(TypeError, fluid.layers.lod_append, x3, - level3) - - def test_level_dtype(): - for dtype in ["bool", "float16", "float32", "float64", "int64"]: - x4 = fluid.layers.data( - name='x4_' + dtype, shape=[4], dtype='float32') - level4 = fluid.layers.data( - name='level4', shape=[4], dtype=dtype, lod_level=0) - self.assertRaises(TypeError, fluid.layers.lod_append, x4, - level4) + # Input(level) dtype must be int32 when lod_level=0 + for dtype in ["bool", "float16", "float32", "float64", "int64"]: + x4 = fluid.layers.data( + name='x4' + dtype, shape=[4], dtype='float32') + level4 = fluid.layers.data( + name='level4_' + dtype, shape=[4], dtype=dtype, lod_level=0) + self.assertRaises(TypeError, fluid.layers.lod_append, x4, level4) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py index 236c9944145ed46852ec18467bd2190edf073526..ac2cd1be27f46351bc965d82f0603f463f775564 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid as fluid from op_test import OpTest from paddle.fluid import Program, program_guard @@ -136,28 +137,26 @@ class TestLodAppendOpByAttr(OpTest): class TestLodResetOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - - def test_Variable(): - # The input must be Variable. - x1 = fluid.create_lod_tensor( - np.ones([6]), [3, 3], fluid.CPUPlace()) - y1 = fluid.create_lod_tensor( - np.ones([6]), [2, 2, 2], fluid.CPUPlace()) - self.assertRaises(TypeError, fluid.layers.lod_reset, [x1, y1]) - - def test_type(): - # dtype must be float32 or float64 or int32 or int64 - x2 = fluid.layers.data(shape=[4], dtype='uint8', name='x2') + # The input must be Variable. + x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") + target_lod = [2, 2] + self.assertRaises(TypeError, fluid.layers.lod_reset, x1, target_lod) + + # Input(x) dtype must be float32 or float64 or int32 or int64 + for dtype in ["bool", "float16"]: + x2 = fluid.layers.data( + name='x2' + dtype, shape=[4], dtype=dtype) y2 = fluid.layers.data( - shape=[4], dtype='uint8', name='x2', lod_level=2) - self.assertRaises(TypeError, fluid.layers.lod_reset, [x2, y2]) + name='y2' + dtype, shape=[4], dtype='int32', lod_level=2) + self.assertRaises(TypeError, fluid.layers.lod_reset, x2, y2) - def test_type2(): - # dtype must be int32 or int64 - x3 = fluid.layers.data(shape=[4], dtype='float32', name='x3') + # Input(y) dtype must be int32 when lod_level=0 + for dtype in ["bool", "float16", "float32", "float64", "int64"]: + x3 = fluid.layers.data( + name='x3' + dtype, shape=[4], dtype='float32') y3 = fluid.layers.data( - shape=[4], dtype='float32', name='x3', lod_level=0) - self.assertRaises(TypeError, fluid.layers.lod_reset, [x3, y3]) + name='y3' + dtype, shape=[4], dtype=dtype, lod_level=0) + self.assertRaises(TypeError, fluid.layers.lod_reset, x3, y3) if __name__ == '__main__':