From 2b8640962aca6809b4647bf190c10c30001f2150 Mon Sep 17 00:00:00 2001 From: wawltor Date: Tue, 3 Mar 2020 11:31:40 +0800 Subject: [PATCH] Fix the big shape and precision op test, remove those from the white list (#22788) Fix the big shape op test, elementwise_mul, elementwise_div, mul op Fix the precision op test, mul op --- .../unittests/test_elementwise_div_op.py | 58 ++++++++++--------- .../unittests/test_elementwise_mul_op.py | 50 ++++++++-------- .../fluid/tests/unittests/test_mul_op.py | 12 ++-- .../white_list/check_shape_white_list.py | 3 - .../white_list/op_accuracy_white_list.py | 1 - 5 files changed, 62 insertions(+), 62 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 5b9c072f0a4..cb21a0b3bd4 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci class ElementwiseDivOp(OpTest): @@ -53,11 +53,13 @@ class ElementwiseDivOp(OpTest): pass +@skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") class TestElementwiseDivOp_scalar(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype(np.float64), + 'X': np.random.uniform(0.1, 1, [20, 3, 4]).astype(np.float64), 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64) } self.outputs = {'Out': self.inputs['X'] / self.inputs['Y']} @@ -67,8 +69,8 @@ class TestElementwiseDivOp_Vector(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [32]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [32]).astype("float64") + 'X': np.random.uniform(0.1, 1, [100]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} @@ -77,14 +79,14 @@ class TestElementwiseDivOp_broadcast_0(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2]).astype("float64") + 'X': np.random.uniform(0.1, 1, [100, 3, 4]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") } self.attrs = {'axis': 0} self.outputs = { 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(2, 1, 1)) + np.divide(self.inputs['X'], self.inputs['Y'].reshape(100, 1, 1)) } @@ -92,14 +94,14 @@ class TestElementwiseDivOp_broadcast_1(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [3]).astype("float64") + 'X': np.random.uniform(0.1, 1, [2, 100, 4]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") } self.attrs = {'axis': 1} self.outputs = { 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 1)) + np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 100, 1)) } @@ -107,13 +109,13 @@ class TestElementwiseDivOp_broadcast_2(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [4]).astype("float64") + 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [100]).astype("float64") } self.outputs = { 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 4)) + np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100)) } @@ -121,14 +123,14 @@ class TestElementwiseDivOp_broadcast_3(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [3, 4]).astype("float64") + 'X': np.random.uniform(0.1, 1, [2, 10, 12, 5]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [10, 12]).astype("float64") } self.attrs = {'axis': 1} self.outputs = { 'Out': - np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 3, 4, 1)) + np.divide(self.inputs['X'], self.inputs['Y'].reshape(1, 10, 12, 1)) } @@ -136,8 +138,8 @@ class TestElementwiseDivOp_broadcast_4(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2, 1, 4]).astype("float64") + 'X': np.random.uniform(0.1, 1, [2, 3, 50]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [2, 1, 50]).astype("float64") } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} @@ -146,8 +148,8 @@ class TestElementwiseDivOp_broadcast_5(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 5]).astype("float64") + 'X': np.random.uniform(0.1, 1, [2, 3, 4, 20]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [2, 3, 1, 20]).astype("float64") } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} @@ -156,8 +158,8 @@ class TestElementwiseDivOp_commonuse_1(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 4]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [1, 1, 4]).astype("float64"), + 'X': np.random.uniform(0.1, 1, [2, 3, 100]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [1, 1, 100]).astype("float64"), } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} @@ -166,8 +168,8 @@ class TestElementwiseDivOp_commonuse_2(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [2, 3, 1, 5]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2, 1, 4, 1]).astype("float64"), + 'X': np.random.uniform(0.1, 1, [30, 3, 1, 5]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [30, 1, 4, 1]).astype("float64"), } self.outputs = {'Out': np.divide(self.inputs['X'], self.inputs['Y'])} @@ -176,8 +178,8 @@ class TestElementwiseDivOp_xsize_lessthan_ysize(ElementwiseDivOp): def setUp(self): self.op_type = "elementwise_div" self.inputs = { - 'X': np.random.uniform(0.1, 1, [4, 5]).astype("float64"), - 'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float64"), + 'X': np.random.uniform(0.1, 1, [10, 12]).astype("float64"), + 'Y': np.random.uniform(0.1, 1, [2, 3, 10, 12]).astype("float64"), } self.attrs = {'axis': 2} @@ -192,9 +194,9 @@ class TestElementwiseDivOp_INT(OpTest): self.init_dtype() self.inputs = { 'X': np.random.randint( - 1, 5, size=[2, 3]).astype(self.dtype), + 1, 5, size=[13, 17]).astype(self.dtype), 'Y': np.random.randint( - 1, 5, size=[2, 3]).astype(self.dtype) + 1, 5, size=[13, 17]).astype(self.dtype) } self.outputs = {'Out': self.inputs['X'] // self.inputs['Y']} diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index a07bb86c5ce..fd2fe73ad51 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest import numpy as np -from op_test import OpTest +from op_test import OpTest, skip_check_grad_ci import paddle.fluid.core as core from paddle.fluid.op import Operator import paddle.fluid as fluid @@ -79,6 +79,8 @@ class ElementwiseMulOp(OpTest): pass +@skip_check_grad_ci( + reason="[skip shape check] Use y_shape(1) to test broadcast.") class TestElementwiseMulOp_scalar(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" @@ -103,9 +105,9 @@ class TestElementwiseMulOp_Vector(ElementwiseMulOp): class TestElementwiseMulOp_broadcast_0(ElementwiseMulOp): def init_input_output(self): - self.x = np.random.rand(2, 13, 4).astype(self.dtype) - self.y = np.random.rand(2).astype(self.dtype) - self.out = self.x * self.y.reshape(2, 1, 1) + self.x = np.random.rand(100, 2, 3).astype(self.dtype) + self.y = np.random.rand(100).astype(self.dtype) + self.out = self.x * self.y.reshape(100, 1, 1) def init_axis(self): self.axis = 0 @@ -115,13 +117,13 @@ class TestElementwiseMulOp_broadcast_1(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(10, 3, 4).astype(np.float64), - 'Y': np.random.rand(3).astype(np.float64) + 'X': np.random.rand(2, 100, 3).astype(np.float64), + 'Y': np.random.rand(100).astype(np.float64) } self.attrs = {'axis': 1} self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 1) + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 100, 1) } self.init_kernel_type() @@ -130,12 +132,12 @@ class TestElementwiseMulOp_broadcast_2(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(10, 3, 4).astype(np.float64), - 'Y': np.random.rand(4).astype(np.float64) + 'X': np.random.rand(2, 3, 100).astype(np.float64), + 'Y': np.random.rand(100).astype(np.float64) } self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 4) + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 1, 100) } self.init_kernel_type() @@ -144,13 +146,13 @@ class TestElementwiseMulOp_broadcast_3(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4, 5).astype(np.float64), - 'Y': np.random.rand(3, 4).astype(np.float64) + 'X': np.random.rand(2, 10, 12, 3).astype(np.float64), + 'Y': np.random.rand(10, 12).astype(np.float64) } self.attrs = {'axis': 1} self.outputs = { - 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 3, 4, 1) + 'Out': self.inputs['X'] * self.inputs['Y'].reshape(1, 10, 12, 1) } self.init_kernel_type() @@ -159,8 +161,8 @@ class TestElementwiseMulOp_broadcast_4(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float64), - 'Y': np.random.rand(2, 1, 4).astype(np.float64) + 'X': np.random.rand(10, 2, 11).astype(np.float64), + 'Y': np.random.rand(10, 1, 11).astype(np.float64) } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() @@ -170,8 +172,8 @@ class TestElementwiseMulOp_broadcast_5(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4, 5).astype(np.float64), - 'Y': np.random.rand(2, 3, 1, 5).astype(np.float64) + 'X': np.random.rand(10, 4, 2, 3).astype(np.float64), + 'Y': np.random.rand(10, 4, 1, 3).astype(np.float64) } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() @@ -188,8 +190,8 @@ class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 4).astype(np.float64), - 'Y': np.random.rand(1, 1, 4).astype(np.float64) + 'X': np.random.rand(2, 3, 100).astype(np.float64), + 'Y': np.random.rand(1, 1, 100).astype(np.float64) } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() @@ -199,8 +201,8 @@ class TestElementwiseMulOp_commonuse_2(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(2, 3, 1, 5).astype(np.float64), - 'Y': np.random.rand(2, 1, 4, 1).astype(np.float64) + 'X': np.random.rand(30, 3, 1, 5).astype(np.float64), + 'Y': np.random.rand(30, 1, 4, 1).astype(np.float64) } self.outputs = {'Out': self.inputs['X'] * self.inputs['Y']} self.init_kernel_type() @@ -210,14 +212,14 @@ class TestElementwiseMulOp_xsize_lessthan_ysize(ElementwiseMulOp): def setUp(self): self.op_type = "elementwise_mul" self.inputs = { - 'X': np.random.rand(4, 5).astype(np.float64), - 'Y': np.random.rand(2, 3, 4, 5).astype(np.float64) + 'X': np.random.rand(10, 10).astype(np.float64), + 'Y': np.random.rand(2, 2, 10, 10).astype(np.float64) } self.attrs = {'axis': 2} self.outputs = { - 'Out': self.inputs['X'].reshape(1, 1, 4, 5) * self.inputs['Y'] + 'Out': self.inputs['X'].reshape(1, 1, 10, 10) * self.inputs['Y'] } self.init_kernel_type() diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index 3dd7634eb8c..b267634197f 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -28,8 +28,8 @@ class TestMulOp(OpTest): self.dtype = np.float64 self.init_dtype_type() self.inputs = { - 'X': np.random.random((2, 5)).astype(self.dtype), - 'Y': np.random.random((5, 3)).astype(self.dtype) + 'X': np.random.random((20, 5)).astype(self.dtype), + 'Y': np.random.random((5, 21)).astype(self.dtype) } self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} @@ -72,15 +72,15 @@ class TestMulOp2(OpTest): self.dtype = np.float64 self.init_dtype_type() self.inputs = { - 'X': np.random.random((3, 4, 4, 3)).astype(self.dtype), - 'Y': np.random.random((2, 6, 1, 2, 3)).astype(self.dtype) + 'X': np.random.random((3, 4, 2, 9)).astype(self.dtype), + 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.dtype) } self.attrs = { 'x_num_col_dims': 2, 'y_num_col_dims': 2, } - result = np.dot(self.inputs['X'].reshape(3 * 4, 4 * 3), - self.inputs['Y'].reshape(2 * 6, 1 * 2 * 3)) + result = np.dot(self.inputs['X'].reshape(3 * 4, 2 * 9), + self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3)) result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} diff --git a/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py b/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py index a43cd8f80dd..227e6cc28fb 100644 --- a/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py @@ -13,8 +13,6 @@ # limitations under the License. NEED_TO_FIX_OP_LIST = [ - 'elementwise_mul', - 'elementwise_div', 'fused_elemwise_activation', 'bilinear_tensor_product', 'conv2d_transpose', @@ -23,7 +21,6 @@ NEED_TO_FIX_OP_LIST = [ 'lstmp', 'margin_rank_loss', 'matmul', - 'mul', 'scatter', 'soft_relu', 'squared_l2_distance', diff --git a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py index 44e0c7b5e90..0f5918544a3 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py @@ -46,7 +46,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [ 'max_pool3d_with_index', \ 'minus', \ 'modified_huber_loss', \ - 'mul', \ 'nce', \ 'pool2d', \ 'pool3d', \ -- GitLab