diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 80af8dd5339499961f5f4d8615cd047ca233b47b..a4a8b76d07f7760591744bd35beb5154286943c0 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -165,7 +165,7 @@ class TestHardShrink(TestActivation): self.init_dtype() threshold = 0.5 - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10 out = np.copy(x) out[(out >= -threshold) & (out <= threshold)] = 0 @@ -185,7 +185,7 @@ class TestSoftShrink(TestActivation): self.init_dtype() lambda_val = 0.1 - x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype) + x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype) out = np.copy(x) out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * ( out - lambda_val) @@ -222,7 +222,7 @@ class TestRsqrt(TestActivation): self.op_type = "rsqrt" self.init_dtype() - x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype) + x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10 out = 1.0 / np.sqrt(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -261,7 +261,7 @@ class TestCeil(TestActivation): self.op_type = "ceil" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) out = np.ceil(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -277,7 +277,7 @@ class TestFloor(TestActivation): self.op_type = "floor" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) out = np.floor(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -295,7 +295,7 @@ class TestCos(TestActivation): self.op_type = "cos" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) out = np.cos(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -312,7 +312,7 @@ class TestAcos(TestActivation): self.op_type = "acos" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype) out = np.arccos(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -329,7 +329,7 @@ class TestSin(TestActivation): self.op_type = "sin" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) out = np.sin(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -346,7 +346,7 @@ class TestAsin(TestActivation): self.op_type = "asin" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype) out = np.arcsin(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -363,7 +363,7 @@ class TestRound(TestActivation): self.op_type = "round" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) out = np.round(x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -433,7 +433,7 @@ class TestBRelu(TestActivation): self.op_type = "brelu" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) + x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype) t_min = 1.0 t_max = 4.0 # The same with TestAbs @@ -458,7 +458,7 @@ class TestRelu6(TestActivation): self.op_type = "relu6" self.init_dtype() - x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype) + x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype) threshold = 6.0 # The same with TestAbs x[np.abs(x) < 0.005] = 0.02 @@ -480,7 +480,7 @@ class TestHardSwish(TestActivation): self.op_type = 'hard_swish' self.init_dtype() - x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype) + x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype) threshold = 6.0 scale = 6.0 offset = 3.0 @@ -508,7 +508,7 @@ class TestSoftRelu(TestActivation): threshold = 2.0 # The same reason with TestAbs x[np.abs(x - threshold) < 0.005] = threshold + 0.02 - x[np.abs(x + threshold) < 0.005] = -threshold + 0.02 + x[np.abs(x + threshold) < 0.005] = -threshold - 0.02 t = np.copy(x) t[t < -threshold] = -threshold t[t > threshold] = threshold @@ -529,7 +529,7 @@ class TestELU(TestActivation): self.op_type = "elu" self.init_dtype() - x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype) + x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype) alpha = 1. out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) @@ -731,11 +731,11 @@ class TestThresholdedRelu(TestActivation): self.init_dtype() threshold = 0.25 - self.relative_error = 0.005 + self.delta = 0.005 X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) # Same reason as TestAbs - X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2 + X[np.abs(X - threshold) < self.delta] = threshold + 0.2 out = (X > threshold) * X self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)} @@ -753,19 +753,17 @@ class TestHardSigmoid(TestActivation): self.op_type = "hard_sigmoid" self.init_dtype() - self.relative_error = 0.002 - - X = np.random.uniform(-5, 5, [2, 2]).astype("float32") + X = np.random.uniform(-5, 5, [10, 12]).astype("float32") slope = 0.2 offset = 0.5 lower_threshold = -offset / slope upper_threshold = (1 - offset) / slope + self.delta = 0.005 + # Same reason as TestAbs - X[np.abs(X - lower_threshold) < self.relative_error] = \ - lower_threshold + 0.2 - X[np.abs(X - upper_threshold) < self.relative_error] = \ - upper_threshold - 0.2 + X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02 + X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02 temp = X * slope + offset out = np.maximum(0.0, np.minimum(1.0, temp)) @@ -776,7 +774,7 @@ class TestHardSigmoid(TestActivation): def test_check_grad(self): if self.dtype == np.float16: return - self.check_grad(['X'], 'Out', max_relative_error=0.002) + self.check_grad(['X'], 'Out') class TestSwish(TestActivation): diff --git a/python/paddle/fluid/tests/unittests/test_cos_sim_op.py b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py index cee91de510458be2d30d4eb9e629170a6775204e..46f8420be0102961dffb673c448c1cbc7de79aaf 100644 --- a/python/paddle/fluid/tests/unittests/test_cos_sim_op.py +++ b/python/paddle/fluid/tests/unittests/test_cos_sim_op.py @@ -55,8 +55,8 @@ class TestCosSimOp2(TestCosSimOp): def setUp(self): self.op_type = "cos_sim" self.inputs = { - 'X': np.random.random((6, 5)).astype("float32"), - 'Y': np.random.random((1, 5)).astype("float32") + 'X': np.random.random((6, 100)).astype("float32"), + 'Y': np.random.random((1, 100)).astype("float32") } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) @@ -73,8 +73,8 @@ class TestCosSimOp3(TestCosSimOp): def setUp(self): self.op_type = "cos_sim" self.inputs = { - 'X': np.random.random((6, 5, 2)).astype("float32"), - 'Y': np.random.random((6, 5, 2)).astype("float32") + 'X': np.random.random((6, 5, 4)).astype("float32"), + 'Y': np.random.random((6, 5, 4)).astype("float32") } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) @@ -91,8 +91,8 @@ class TestCosSimOp4(TestCosSimOp): def setUp(self): self.op_type = "cos_sim" self.inputs = { - 'X': np.random.random((6, 5, 2)).astype("float32"), - 'Y': np.random.random((1, 5, 2)).astype("float32") + 'X': np.random.random((6, 5, 20)).astype("float32"), + 'Y': np.random.random((1, 5, 20)).astype("float32") } expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2)) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2)) diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index 9018003d71ff902bfbb4eb819db1ab469365ede6..ba39b072303fed5375d6ad3949d549a38902e6f3 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -146,7 +146,7 @@ class TestCrossEntropyOp3(TestCrossEntropyOp): def init_bs_class_num(self): self.batch_size = 5 - self.class_num = 17 + self.class_num = 27 def test_check_grad(self): self.check_grad( diff --git a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py index fbe948f43ae9b4d0773c75af2e8930fea85d3101..a0fb2788d54310aed8ed63230e63dddd80caba7a 100644 --- a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py +++ b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py @@ -170,7 +170,7 @@ class TestBlockExpandOpCase2(TestBlockExpandOp): class TestBlockExpandOpCase3(TestBlockExpandOp): def config(self): - self.batch_size = 2 + self.batch_size = 6 self.img_channels = 1 self.img_height = 4 self.img_width = 5 @@ -183,7 +183,7 @@ class TestBlockExpandOpCase3(TestBlockExpandOp): class TestBlockExpandOpCase4(TestBlockExpandOp): def config(self): - self.batch_size = 2 + self.batch_size = 6 self.img_channels = 2 self.img_height = 3 self.img_width = 3 diff --git a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py index 416af0ef785ccbf26d3df1498c36cc24ccc0d749..a19b4d9c13a9e646da405babfbac98f7ed15f217 100644 --- a/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_kldiv_loss_op.py @@ -73,7 +73,7 @@ class TestKLDivLossOp3(TestKLDivLossOp): class TestKLDivLossOp4(TestKLDivLossOp): def initTestCase(self): - self.x_shape = (5, 7) + self.x_shape = (5, 20) self.reduction = 'sum'