提交 4c987a60 编写于 作者: Z zhupengyang 提交者: hong19860320

fix input shape of op tests (#21682)

* fix input shape of op tests for elementwise_sub,
gather, pad2d, transpose, softmax, scale,
elementwise_max, hierarchical_sigmoid, reshape2,
sign, squeeze, reduce_sum, sum, squeeze2,
unsqueeze, unsqueeze2, cast, reverse

test=develop

* fix cast, elementwise_mul, gather, scale, sign,
softmax, transpose

test=develop
上级 f64d0066
......@@ -49,7 +49,7 @@ class TestElementwiseOp(OpTest):
class TestElementwiseMaxOp_scalar(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random_integers(-5, 5, [2, 3, 4]).astype("float32")
x = np.random.random_integers(-5, 5, [2, 3, 20]).astype("float32")
y = np.array([0.5]).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......@@ -58,9 +58,9 @@ class TestElementwiseMaxOp_scalar(TestElementwiseOp):
class TestElementwiseMaxOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.random((32, )).astype("float32")
sgn = np.random.choice([-1, 1], (32, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (32, )).astype("float32")
x = np.random.random((100, )).astype("float32")
sgn = np.random.choice([-1, 1], (100, )).astype("float32")
y = x + sgn * np.random.uniform(0.1, 1, (100, )).astype("float32")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.maximum(self.inputs['X'], self.inputs['Y'])}
......@@ -68,7 +68,7 @@ class TestElementwiseMaxOp_Vector(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32)
x = np.random.uniform(0.5, 1, (2, 3, 20)).astype(np.float32)
sgn = np.random.choice([-1, 1], (2, )).astype(np.float32)
y = x[:, 0, 0] + sgn * \
np.random.uniform(1, 2, (2, )).astype(np.float32)
......@@ -84,7 +84,7 @@ class TestElementwiseMaxOp_broadcast_0(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32)
x = np.random.uniform(0.5, 1, (2, 3, 20)).astype(np.float32)
sgn = np.random.choice([-1, 1], (3, )).astype(np.float32)
y = x[0, :, 0] + sgn * \
np.random.uniform(1, 2, (3, )).astype(np.float32)
......@@ -100,7 +100,7 @@ class TestElementwiseMaxOp_broadcast_1(TestElementwiseOp):
class TestElementwiseMaxOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_max"
x = np.random.uniform(0.5, 1, (2, 3, 4)).astype(np.float32)
x = np.random.uniform(0.5, 1, (10, 3, 4)).astype(np.float32)
sgn = np.random.choice([-1, 1], (4, )).astype(np.float32)
y = x[0, 0, :] + sgn * \
np.random.uniform(1, 2, (4, )).astype(np.float32)
......
......@@ -22,8 +22,8 @@ class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3]).astype("float32")
'X': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32"),
'Y': np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype("float32")
}
self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']}
......
......@@ -51,7 +51,7 @@ class TestCase1(TestGatherOp):
"""
For one dimension input
"""
self.x_shape = (10)
self.x_shape = (100)
self.x_type = "float32"
self.index = [1, 3, 5]
self.index_type = "int32"
......
......@@ -147,7 +147,7 @@ class TestHSigmoidOp(OpTest):
self.op_type = "hierarchical_sigmoid"
num_classes = 6
feature_size = 8
batch_size = 4
batch_size = 15
x = np.random.random((batch_size, feature_size)).astype("float32") * 2
w = np.random.random(
(num_classes - 1, feature_size)).astype("float32") * 2
......
......@@ -56,7 +56,7 @@ class TestPad2dOp(OpTest):
self.check_grad(['X'], 'Out', max_relative_error=0.006)
def initTestCase(self):
self.shape = (2, 3, 4, 4)
self.shape = (2, 3, 4, 5)
self.paddings = [0, 1, 2, 3]
self.mode = "constant"
self.data_format = "NCHW"
......@@ -65,7 +65,7 @@ class TestPad2dOp(OpTest):
class TestCase1(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 4)
self.shape = (2, 3, 4, 5)
self.paddings = [0, 1, 2, 3]
self.mode = "reflect"
self.data_format = "NCHW"
......@@ -73,7 +73,7 @@ class TestCase1(TestPad2dOp):
class TestCase2(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 4)
self.shape = (2, 3, 4, 5)
self.paddings = [0, 1, 2, 3]
self.mode = "edge"
self.data_format = "NCHW"
......@@ -81,7 +81,7 @@ class TestCase2(TestPad2dOp):
class TestCase3(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 4, 4, 2)
self.shape = (2, 4, 4, 4)
self.paddings = [0, 1, 2, 3]
self.mode = "reflect"
self.data_format = "NHWC"
......@@ -89,7 +89,7 @@ class TestCase3(TestPad2dOp):
class TestCase4(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 4, 4, 2)
self.shape = (2, 4, 4, 4)
self.paddings = [0, 1, 2, 3]
self.mode = "edge"
self.data_format = "NHWC"
......@@ -97,7 +97,7 @@ class TestCase4(TestPad2dOp):
class TestCase5(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 4, 4, 2)
self.shape = (2, 4, 4, 4)
self.paddings = [0, 1, 2, 3]
self.mode = "constant"
self.pad_value = 1.2
......@@ -106,7 +106,7 @@ class TestCase5(TestPad2dOp):
class TestCase6(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 4, 4, 2)
self.shape = (2, 4, 4, 4)
self.paddings = [0, 1, 2, 3]
self.mode = "constant"
self.pad_value = 1.2
......@@ -116,7 +116,7 @@ class TestCase6(TestPad2dOp):
class TestCase7(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 4)
self.shape = (2, 3, 4, 5)
self.paddings = [0, 1, 2, 3]
self.mode = "reflect"
self.data_format = "NCHW"
......
......@@ -169,7 +169,7 @@ class TestAnyOpWithKeepDim(OpTest):
class Test1DReduce(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(20).astype("float64")}
self.inputs = {'X': np.random.random(120).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
......
......@@ -35,9 +35,9 @@ class TestReshapeOp(OpTest):
}
def init_data(self):
self.ori_shape = (2, 25)
self.new_shape = (5, 10)
self.infered_shape = (5, 10)
self.ori_shape = (2, 60)
self.new_shape = (12, 10)
self.infered_shape = (12, 10)
def test_check_output(self):
......@@ -49,7 +49,7 @@ class TestReshapeOp(OpTest):
class TestReshapeOpDimInfer1(TestReshapeOp):
def init_data(self):
self.ori_shape = (5, 10)
self.ori_shape = (5, 25)
self.new_shape = (5, -1, 5)
self.infered_shape = (5, -1, 5)
......
......@@ -21,7 +21,7 @@ from op_test import OpTest
class TestReverseOp(OpTest):
def initTestCase(self):
self.x = np.random.random((3, 4)).astype('float32')
self.x = np.random.random((3, 40)).astype('float32')
self.axis = [0]
def setUp(self):
......@@ -43,25 +43,25 @@ class TestReverseOp(OpTest):
class TestCase0(TestReverseOp):
def initTestCase(self):
self.x = np.random.random((3, 4)).astype('float32')
self.x = np.random.random((3, 40)).astype('float32')
self.axis = [1]
class TestCase1(TestReverseOp):
def initTestCase(self):
self.x = np.random.random((3, 4)).astype('float32')
self.x = np.random.random((3, 40)).astype('float32')
self.axis = [0, 1]
class TestCase2(TestReverseOp):
def initTestCase(self):
self.x = np.random.random((3, 4, 5)).astype('float32')
self.x = np.random.random((3, 4, 10)).astype('float32')
self.axis = [0, 2]
class TestCase3(TestReverseOp):
def initTestCase(self):
self.x = np.random.random((3, 4, 5)).astype('float32')
self.x = np.random.random((3, 4, 10)).astype('float32')
self.axis = [1, 2]
......
......@@ -39,9 +39,9 @@ class TestSqueezeOp(OpTest):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 5)
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
......
......@@ -38,9 +38,9 @@ class TestSqueezeOp(OpTest):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, 2)
self.new_shape = (3, 5)
self.new_shape = (3, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
......@@ -49,9 +49,9 @@ class TestSqueezeOp(OpTest):
# Correct: There is mins axis.
class TestSqueezeOp1(TestSqueezeOp):
def init_test_case(self):
self.ori_shape = (1, 3, 1, 5)
self.ori_shape = (1, 3, 1, 40)
self.axes = (0, -2)
self.new_shape = (3, 5)
self.new_shape = (3, 40)
# Correct: No axes input.
......
......@@ -27,9 +27,9 @@ class TestSumOp(OpTest):
self.init_kernel_type()
self.use_mkldnn = False
self.init_kernel_type()
x0 = np.random.random((3, 4)).astype(self.dtype)
x1 = np.random.random((3, 4)).astype(self.dtype)
x2 = np.random.random((3, 4)).astype(self.dtype)
x0 = np.random.random((3, 40)).astype(self.dtype)
x1 = np.random.random((3, 40)).astype(self.dtype)
x2 = np.random.random((3, 40)).astype(self.dtype)
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
y = x0 + x1 + x2
self.outputs = {'Out': y}
......
......@@ -46,19 +46,19 @@ class TestTransposeOp(OpTest):
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.shape = (3, 4)
self.shape = (3, 40)
self.axis = (1, 0)
class TestCase0(TestTransposeOp):
def initTestCase(self):
self.shape = (3, )
self.shape = (100, )
self.axis = (0, )
class TestCase1(TestTransposeOp):
def initTestCase(self):
self.shape = (3, 4, 5)
self.shape = (3, 4, 10)
self.axis = (0, 2, 1)
......
......@@ -39,9 +39,9 @@ class TestUnsqueezeOp(OpTest):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (3, 5)
self.ori_shape = (3, 40)
self.axes = (1, 2)
self.new_shape = (3, 1, 1, 5)
self.new_shape = (3, 1, 1, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
......
......@@ -36,9 +36,9 @@ class TestUnsqueezeOp(OpTest):
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (3, 5)
self.ori_shape = (3, 40)
self.axes = (1, 2)
self.new_shape = (3, 1, 1, 5)
self.new_shape = (3, 1, 1, 40)
def init_attrs(self):
self.attrs = {"axes": self.axes}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册