提交 f2f91307 编写于 作者: Z zhupengyang 提交者: Tao Luo

all cases use large shape (#22102)

enhanced ops: acos, asin, brelu, ceil, cos, elu, floor, hard_shrink,
hard_sigmoid, hard_swish, relu6, round, rsqrt, sin, softshrink,
cos_sim, cross_entropy, im2squence, kldiv_loss
上级 9f7d90d2
...@@ -165,7 +165,7 @@ class TestHardShrink(TestActivation): ...@@ -165,7 +165,7 @@ class TestHardShrink(TestActivation):
self.init_dtype() self.init_dtype()
threshold = 0.5 threshold = 0.5
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
out = np.copy(x) out = np.copy(x)
out[(out >= -threshold) & (out <= threshold)] = 0 out[(out >= -threshold) & (out <= threshold)] = 0
...@@ -185,7 +185,7 @@ class TestSoftShrink(TestActivation): ...@@ -185,7 +185,7 @@ class TestSoftShrink(TestActivation):
self.init_dtype() self.init_dtype()
lambda_val = 0.1 lambda_val = 0.1
x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype) x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
out = np.copy(x) out = np.copy(x)
out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * ( out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
out - lambda_val) out - lambda_val)
...@@ -222,7 +222,7 @@ class TestRsqrt(TestActivation): ...@@ -222,7 +222,7 @@ class TestRsqrt(TestActivation):
self.op_type = "rsqrt" self.op_type = "rsqrt"
self.init_dtype() self.init_dtype()
x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype) x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
out = 1.0 / np.sqrt(x) out = 1.0 / np.sqrt(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -261,7 +261,7 @@ class TestCeil(TestActivation): ...@@ -261,7 +261,7 @@ class TestCeil(TestActivation):
self.op_type = "ceil" self.op_type = "ceil"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
out = np.ceil(x) out = np.ceil(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -277,7 +277,7 @@ class TestFloor(TestActivation): ...@@ -277,7 +277,7 @@ class TestFloor(TestActivation):
self.op_type = "floor" self.op_type = "floor"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
out = np.floor(x) out = np.floor(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -295,7 +295,7 @@ class TestCos(TestActivation): ...@@ -295,7 +295,7 @@ class TestCos(TestActivation):
self.op_type = "cos" self.op_type = "cos"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
out = np.cos(x) out = np.cos(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -312,7 +312,7 @@ class TestAcos(TestActivation): ...@@ -312,7 +312,7 @@ class TestAcos(TestActivation):
self.op_type = "acos" self.op_type = "acos"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
out = np.arccos(x) out = np.arccos(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -329,7 +329,7 @@ class TestSin(TestActivation): ...@@ -329,7 +329,7 @@ class TestSin(TestActivation):
self.op_type = "sin" self.op_type = "sin"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
out = np.sin(x) out = np.sin(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -346,7 +346,7 @@ class TestAsin(TestActivation): ...@@ -346,7 +346,7 @@ class TestAsin(TestActivation):
self.op_type = "asin" self.op_type = "asin"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
out = np.arcsin(x) out = np.arcsin(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -363,7 +363,7 @@ class TestRound(TestActivation): ...@@ -363,7 +363,7 @@ class TestRound(TestActivation):
self.op_type = "round" self.op_type = "round"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
out = np.round(x) out = np.round(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
...@@ -433,7 +433,7 @@ class TestBRelu(TestActivation): ...@@ -433,7 +433,7 @@ class TestBRelu(TestActivation):
self.op_type = "brelu" self.op_type = "brelu"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
t_min = 1.0 t_min = 1.0
t_max = 4.0 t_max = 4.0
# The same with TestAbs # The same with TestAbs
...@@ -458,7 +458,7 @@ class TestRelu6(TestActivation): ...@@ -458,7 +458,7 @@ class TestRelu6(TestActivation):
self.op_type = "relu6" self.op_type = "relu6"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype) x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
threshold = 6.0 threshold = 6.0
# The same with TestAbs # The same with TestAbs
x[np.abs(x) < 0.005] = 0.02 x[np.abs(x) < 0.005] = 0.02
...@@ -480,7 +480,7 @@ class TestHardSwish(TestActivation): ...@@ -480,7 +480,7 @@ class TestHardSwish(TestActivation):
self.op_type = 'hard_swish' self.op_type = 'hard_swish'
self.init_dtype() self.init_dtype()
x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype) x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
threshold = 6.0 threshold = 6.0
scale = 6.0 scale = 6.0
offset = 3.0 offset = 3.0
...@@ -508,7 +508,7 @@ class TestSoftRelu(TestActivation): ...@@ -508,7 +508,7 @@ class TestSoftRelu(TestActivation):
threshold = 2.0 threshold = 2.0
# The same reason with TestAbs # The same reason with TestAbs
x[np.abs(x - threshold) < 0.005] = threshold + 0.02 x[np.abs(x - threshold) < 0.005] = threshold + 0.02
x[np.abs(x + threshold) < 0.005] = -threshold + 0.02 x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
t = np.copy(x) t = np.copy(x)
t[t < -threshold] = -threshold t[t < -threshold] = -threshold
t[t > threshold] = threshold t[t > threshold] = threshold
...@@ -529,7 +529,7 @@ class TestELU(TestActivation): ...@@ -529,7 +529,7 @@ class TestELU(TestActivation):
self.op_type = "elu" self.op_type = "elu"
self.init_dtype() self.init_dtype()
x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
alpha = 1. alpha = 1.
out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
# Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
...@@ -731,11 +731,11 @@ class TestThresholdedRelu(TestActivation): ...@@ -731,11 +731,11 @@ class TestThresholdedRelu(TestActivation):
self.init_dtype() self.init_dtype()
threshold = 0.25 threshold = 0.25
self.relative_error = 0.005 self.delta = 0.005
X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
# Same reason as TestAbs # Same reason as TestAbs
X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2 X[np.abs(X - threshold) < self.delta] = threshold + 0.2
out = (X > threshold) * X out = (X > threshold) * X
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
...@@ -753,19 +753,17 @@ class TestHardSigmoid(TestActivation): ...@@ -753,19 +753,17 @@ class TestHardSigmoid(TestActivation):
self.op_type = "hard_sigmoid" self.op_type = "hard_sigmoid"
self.init_dtype() self.init_dtype()
self.relative_error = 0.002 X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
slope = 0.2 slope = 0.2
offset = 0.5 offset = 0.5
lower_threshold = -offset / slope lower_threshold = -offset / slope
upper_threshold = (1 - offset) / slope upper_threshold = (1 - offset) / slope
self.delta = 0.005
# Same reason as TestAbs # Same reason as TestAbs
X[np.abs(X - lower_threshold) < self.relative_error] = \ X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
lower_threshold + 0.2 X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
X[np.abs(X - upper_threshold) < self.relative_error] = \
upper_threshold - 0.2
temp = X * slope + offset temp = X * slope + offset
out = np.maximum(0.0, np.minimum(1.0, temp)) out = np.maximum(0.0, np.minimum(1.0, temp))
...@@ -776,7 +774,7 @@ class TestHardSigmoid(TestActivation): ...@@ -776,7 +774,7 @@ class TestHardSigmoid(TestActivation):
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out', max_relative_error=0.002) self.check_grad(['X'], 'Out')
class TestSwish(TestActivation): class TestSwish(TestActivation):
......
...@@ -55,8 +55,8 @@ class TestCosSimOp2(TestCosSimOp): ...@@ -55,8 +55,8 @@ class TestCosSimOp2(TestCosSimOp):
def setUp(self): def setUp(self):
self.op_type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((6, 5)).astype("float32"), 'X': np.random.random((6, 100)).astype("float32"),
'Y': np.random.random((1, 5)).astype("float32") 'Y': np.random.random((1, 100)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
...@@ -73,8 +73,8 @@ class TestCosSimOp3(TestCosSimOp): ...@@ -73,8 +73,8 @@ class TestCosSimOp3(TestCosSimOp):
def setUp(self): def setUp(self):
self.op_type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"), 'X': np.random.random((6, 5, 4)).astype("float32"),
'Y': np.random.random((6, 5, 2)).astype("float32") 'Y': np.random.random((6, 5, 4)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
...@@ -91,8 +91,8 @@ class TestCosSimOp4(TestCosSimOp): ...@@ -91,8 +91,8 @@ class TestCosSimOp4(TestCosSimOp):
def setUp(self): def setUp(self):
self.op_type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"), 'X': np.random.random((6, 5, 20)).astype("float32"),
'Y': np.random.random((1, 5, 2)).astype("float32") 'Y': np.random.random((1, 5, 20)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
......
...@@ -146,7 +146,7 @@ class TestCrossEntropyOp3(TestCrossEntropyOp): ...@@ -146,7 +146,7 @@ class TestCrossEntropyOp3(TestCrossEntropyOp):
def init_bs_class_num(self): def init_bs_class_num(self):
self.batch_size = 5 self.batch_size = 5
self.class_num = 17 self.class_num = 27
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
......
...@@ -170,7 +170,7 @@ class TestBlockExpandOpCase2(TestBlockExpandOp): ...@@ -170,7 +170,7 @@ class TestBlockExpandOpCase2(TestBlockExpandOp):
class TestBlockExpandOpCase3(TestBlockExpandOp): class TestBlockExpandOpCase3(TestBlockExpandOp):
def config(self): def config(self):
self.batch_size = 2 self.batch_size = 6
self.img_channels = 1 self.img_channels = 1
self.img_height = 4 self.img_height = 4
self.img_width = 5 self.img_width = 5
...@@ -183,7 +183,7 @@ class TestBlockExpandOpCase3(TestBlockExpandOp): ...@@ -183,7 +183,7 @@ class TestBlockExpandOpCase3(TestBlockExpandOp):
class TestBlockExpandOpCase4(TestBlockExpandOp): class TestBlockExpandOpCase4(TestBlockExpandOp):
def config(self): def config(self):
self.batch_size = 2 self.batch_size = 6
self.img_channels = 2 self.img_channels = 2
self.img_height = 3 self.img_height = 3
self.img_width = 3 self.img_width = 3
......
...@@ -73,7 +73,7 @@ class TestKLDivLossOp3(TestKLDivLossOp): ...@@ -73,7 +73,7 @@ class TestKLDivLossOp3(TestKLDivLossOp):
class TestKLDivLossOp4(TestKLDivLossOp): class TestKLDivLossOp4(TestKLDivLossOp):
def initTestCase(self): def initTestCase(self):
self.x_shape = (5, 7) self.x_shape = (5, 20)
self.reduction = 'sum' self.reduction = 'sum'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册