提交 85ba5275 编写于 作者: Z zhupengyang 提交者: Tao Luo

all cases use large shape (#22065)

affine_channel, affine_grid, bilinear_interp, bilinear_tensor_product,
clip, crop, crop_tensor, cumsum, data_norm, expand, expand_as
上级 7fb817d4
......@@ -62,15 +62,15 @@ class TestAffineChannelOp(OpTest):
self.check_grad(['X'], 'Out', no_grad_set=set(['Scale', 'Bias']))
def init_test_case(self):
self.shape = [2, 8, 12, 12]
self.C = 8
self.shape = [2, 100, 12, 12]
self.C = 100
self.layout = 'NCHW'
class TestAffineChannelNHWC(TestAffineChannelOp):
def init_test_case(self):
self.shape = [2, 12, 12, 16]
self.C = 16
self.shape = [2, 12, 12, 100]
self.C = 100
self.layout = 'NHWC'
def test_check_grad_stopgrad_dx(self):
......@@ -82,8 +82,8 @@ class TestAffineChannelNHWC(TestAffineChannelOp):
class TestAffineChannel2D(TestAffineChannelOp):
def init_test_case(self):
self.shape = [8, 32]
self.C = 32
self.shape = [8, 100]
self.C = 100
self.layout = 'NCHW'
def test_check_grad_stopgrad_dx(self):
......
......@@ -66,8 +66,8 @@ class TestAffineGridOp(OpTest):
class TestAffineGridOpCase1(TestAffineGridOp):
def initTestCase(self):
self.theta_shape = (3, 2, 3)
self.output_shape = np.array([3, 2, 5, 7]).astype("int32")
self.theta_shape = (20, 2, 3)
self.output_shape = np.array([20, 2, 5, 7]).astype("int32")
self.dynamic_shape = True
......
......@@ -144,7 +144,7 @@ class TestBilinearInterpOp(OpTest):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 4, 4]
self.input_shape = [2, 3, 5, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
......@@ -248,7 +248,7 @@ class TestBilinearInterpActualShape(TestBilinearInterpOp):
class TestBilinearInterpDataLayout(TestBilinearInterpOp):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 4, 4, 3]
self.input_shape = [2, 5, 5, 3]
self.out_h = 2
self.out_w = 2
self.scale = 0.
......@@ -438,7 +438,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
def init_test_case(self):
self.interp_method = 'bilinear'
self.input_shape = [2, 3, 4, 4]
self.input_shape = [2, 3, 5, 5]
self.out_h = 3
self.out_w = 3
self.scale = 0.
......
......@@ -23,9 +23,9 @@ class TestBilinearTensorProductOp(OpTest):
def setUp(self):
self.op_type = "bilinear_tensor_product"
batch_size = 6
size0 = 5
size1 = 4
size2 = 5
size0 = 30
size1 = 20
size2 = 100
a = np.random.random((batch_size, size0)).astype("float64")
b = np.random.random((batch_size, size1)).astype("float64")
w = np.random.random((size2, size0, size1)).astype("float64")
......
......@@ -43,7 +43,7 @@ class TestClipOp(OpTest):
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.shape = (4, 4)
self.shape = (10, 10)
self.max = 0.7
self.min = 0.1
......
......@@ -66,7 +66,7 @@ class TestCropOp(OpTest):
}
def initTestCase(self):
self.x_shape = (8, 8)
self.x_shape = (10, 10)
self.crop_shape = (2, 2)
self.offsets = [1, 2]
......@@ -86,8 +86,8 @@ class TestCase1(TestCropOp):
class TestCase2(TestCropOp):
def initTestCase(self):
self.x_shape = (4, 8)
self.crop_shape = [4, 8]
self.x_shape = (15, 8)
self.crop_shape = [15, 8]
self.offsets = [0, 0]
......@@ -101,15 +101,15 @@ class TestCase3(TestCropOp):
class TestCase4(TestCropOp):
def initTestCase(self):
self.x_shape = (4, 4)
self.crop_shape = [4, 4]
self.x_shape = (10, 10)
self.crop_shape = [10, 10]
self.offsets = [0, 0]
self.crop_by_input = True
class TestCase5(TestCropOp):
def initTestCase(self):
self.x_shape = (3, 4, 5)
self.x_shape = (3, 4, 10)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 0, 2]
self.offset_by_input = True
......
......@@ -72,7 +72,7 @@ class TestCropTensorOp(OpTest):
self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}
def initTestCase(self):
self.x_shape = (8, 8)
self.x_shape = (10, 10)
self.crop_shape = [2, 2]
self.offsets = [1, 2]
......@@ -169,7 +169,7 @@ class TestCropTensorOpTensorAttr(OpTest):
self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}
def initTestCase(self):
self.x_shape = (8, 8)
self.x_shape = (10, 10)
self.crop_shape = (2, 2)
self.offsets = [1, 2]
self.shape_attr = [0, 0]
......
......@@ -82,7 +82,7 @@ class TestSumOp4(OpTest):
class TestSumOp5(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.inputs = {'X': np.random.random((5, 6)).astype("float64")}
self.inputs = {'X': np.random.random((5, 20)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)}
def test_check_output(self):
......@@ -95,7 +95,7 @@ class TestSumOp5(OpTest):
class TestSumOp7(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.inputs = {'X': np.random.random((6)).astype("float64")}
self.inputs = {'X': np.random.random((100)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)}
def test_check_output(self):
......@@ -109,7 +109,7 @@ class TestSumOp8(OpTest):
def setUp(self):
self.op_type = "cumsum"
self.attrs = {'axis': 2, "exclusive": True}
a = np.random.random((5, 6, 3)).astype("float64")
a = np.random.random((5, 6, 4)).astype("float64")
self.inputs = {'X': a}
self.outputs = {
'Out': np.concatenate(
......
......@@ -234,12 +234,11 @@ class TestDataNormOpWithSlotDim(OpTest):
self.use_mkldnn = False
epsilon = 0.00001
slot_dim = 1
x_shape = [2, 3]
scale_shape = [3]
x_shape = [2, 50]
scale_shape = [50]
tp = np.float32
x_val = np.array([[-0.35702616, 0.0, -0.08306625],
[0.41199666, 0.0, -0.10180971]]).astype(tp)
x_val = np.random.uniform(-1, 1, x_shape).astype(tp)
batch_size = np.ones(scale_shape).astype(tp)
batch_size *= 1e4
batch_sum = np.zeros(scale_shape).astype(tp)
......@@ -248,8 +247,8 @@ class TestDataNormOpWithSlotDim(OpTest):
y = np.array(x_val)
mean = np.array([[0, 0, 0], [0, 0, 0]]).astype(tp)
scale = np.array([[1, 1, 1], [1, 1, 1]]).astype(tp)
mean = np.zeros(x_shape).astype(tp)
scale = np.ones(x_shape).astype(tp)
self.inputs = {
"X": x_val,
......
......@@ -51,8 +51,8 @@ class TestExpandAsOpRank1(OpTest):
class TestExpandAsOpRank2(OpTest):
def setUp(self):
self.op_type = "expand_as"
x = np.random.rand(2, 3).astype("float64")
target_tensor = np.random.rand(4, 6).astype("float64")
x = np.random.rand(10, 12).astype("float64")
target_tensor = np.random.rand(20, 24).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
bcast_dims = bcast(x, target_tensor)
......@@ -69,8 +69,8 @@ class TestExpandAsOpRank2(OpTest):
class TestExpandAsOpRank3(OpTest):
def setUp(self):
self.op_type = "expand_as"
x = np.random.rand(2, 3, 3).astype("float64")
target_tensor = np.random.rand(4, 6, 6).astype("float64")
x = np.random.rand(2, 3, 20).astype("float64")
target_tensor = np.random.rand(4, 6, 40).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
bcast_dims = bcast(x, target_tensor)
......@@ -87,8 +87,8 @@ class TestExpandAsOpRank3(OpTest):
class TestExpandAsOpRank4(OpTest):
def setUp(self):
self.op_type = "expand_as"
x = np.random.rand(1, 1, 3, 16).astype("float64")
target_tensor = np.random.rand(4, 6, 6, 32).astype("float64")
x = np.random.rand(1, 1, 7, 16).astype("float64")
target_tensor = np.random.rand(4, 6, 14, 32).astype("float64")
self.inputs = {'X': x, 'target_tensor': target_tensor}
self.attrs = {}
bcast_dims = bcast(x, target_tensor)
......
......@@ -45,7 +45,7 @@ class TestExpandOpRank1(OpTest):
class TestExpandOpRank2_Corner(TestExpandOpRank1):
def init_data(self):
self.ori_shape = [12]
self.ori_shape = [120]
self.expand_times = [2]
......@@ -57,13 +57,13 @@ class TestExpandOpRank2(TestExpandOpRank1):
class TestExpandOpRank3_Corner(TestExpandOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5)
self.ori_shape = (2, 10, 5)
self.expand_times = (1, 1, 1)
class TestExpandOpRank3(TestExpandOpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5)
self.ori_shape = (2, 4, 15)
self.expand_times = (2, 1, 4)
......@@ -92,7 +92,7 @@ class TestExpandOpRank1_tensor_attr(OpTest):
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [12]
self.ori_shape = [100]
self.expand_times = [2]
self.infer_expand_times = [-1]
......@@ -132,7 +132,7 @@ class TestExpandOpRank1_tensor(OpTest):
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [12]
self.ori_shape = [100]
self.expand_times = [2]
def test_check_output(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册