From d050360a748090bf5d5e3886dedef22b80506b39 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Thu, 7 Jun 2018 10:24:37 +0800 Subject: [PATCH] "reduce test input data size to accelerate ci" (#10831) --- .../fluid/tests/unittests/test_mul_op.py | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index 862b7f8cb93..bbc782c1bce 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -22,8 +22,8 @@ class TestMulOp(OpTest): def setUp(self): self.op_type = "mul" self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") + 'X': np.random.random((2, 5)).astype("float32"), + 'Y': np.random.random((5, 3)).astype("float32") } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} @@ -46,13 +46,16 @@ class TestMulOp2(OpTest): def setUp(self): self.op_type = "mul" self.inputs = { - 'X': np.random.random((15, 4, 12, 10)).astype("float32"), - 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") + 'X': np.random.random((3, 4, 4, 3)).astype("float32"), + 'Y': np.random.random((2, 6, 1, 2, 3)).astype("float32") } - self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} - result = np.dot(self.inputs['X'].reshape(15 * 4, 12 * 10), - self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) - result = result.reshape(15, 4, 8, 2, 9) + self.attrs = { + 'x_num_col_dims': 2, + 'y_num_col_dims': 2, + } + result = np.dot(self.inputs['X'].reshape(3 * 4, 4 * 3), + self.inputs['Y'].reshape(2 * 6, 1 * 2 * 3)) + result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} def test_check_output(self): @@ -73,9 +76,9 @@ class TestMulOp2(OpTest): class TestFP16MulOp1(OpTest): def setUp(self): self.op_type = "mul" - x = np.random.random((32, 84)).astype("float16") - y = np.random.random((84, 100)).astype("float16") - self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} + x = np.random.random((3, 5)).astype("float16") + y = np.random.random((5, 4)).astype("float16") + self.inputs = {'X': x.view(np.float16), 'Y': y.view(np.float16)} self.outputs = {'Out': np.dot(x, y)} def test_check_output(self): @@ -88,13 +91,15 @@ class TestFP16MulOp1(OpTest): class TestFP16MulOp2(OpTest): def setUp(self): self.op_type = "mul" - x = np.random.random((15, 4, 12, 10)).astype("float16") - y = np.random.random((4, 30, 8, 2, 9)).astype("float16") - self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} - self.attrs = {'x_num_col_dims': 2, 'y_num_col_dims': 2} - result = np.dot( - x.reshape(15 * 4, 12 * 10), y.reshape(4 * 30, 8 * 2 * 9)) - result = result.reshape(15, 4, 8, 2, 9) + x = np.random.random((3, 4, 4, 3)).astype("float16") + y = np.random.random((2, 6, 1, 2, 3)).astype("float16") + self.inputs = {'X': x.view(np.float16), 'Y': y.view(np.float16)} + self.attrs = { + 'x_num_col_dims': 2, + 'y_num_col_dims': 2, + } + result = np.dot(x.reshape(3 * 4, 4 * 3), y.reshape(2 * 6, 1 * 2 * 3)) + result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} def test_check_output(self): -- GitLab