提交 48f5f6bd 编写于 作者: Q qijun

refine some operators' python unittests

上级 41271f03
...@@ -18,21 +18,6 @@ class TestExp(OpTest): ...@@ -18,21 +18,6 @@ class TestExp(OpTest):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Y', max_relative_error=0.007)
class TestRelu(OpTest):
def setUp(self):
self.op_type = "relu"
x = np.random.uniform(-1, 1, [11, 17]).astype("float32")
x = np.sign(x) * np.exp(np.abs(x))
self.inputs = {'X': x}
self.outputs = {'Y': np.maximum(self.inputs['X'], 0)}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007)
class TestSigmoid(OpTest): class TestSigmoid(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid" self.op_type = "sigmoid"
...@@ -81,8 +66,12 @@ class TestSqrt(OpTest): ...@@ -81,8 +66,12 @@ class TestSqrt(OpTest):
class TestAbs(OpTest): class TestAbs(OpTest):
def setUp(self): def setUp(self):
self.op_type = "abs" self.op_type = "abs"
x = np.random.uniform(-1, 1, [11, 17]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
x = np.sign(x) * np.exp(np.abs(x)) # Because we set delta = 0.005 in caculating numeric gradient,
# if x is too small, such as 0.002, x_neg will be -0.003
# x_pos will be 0.007, so the numeric gradient is unaccurate.
# we should avoid this
x[np.abs(x) < 0.005] = 0.02
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.abs(self.inputs['X'])} self.outputs = {'Y': np.abs(self.inputs['X'])}
...@@ -93,41 +82,14 @@ class TestAbs(OpTest): ...@@ -93,41 +82,14 @@ class TestAbs(OpTest):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Y', max_relative_error=0.007)
class TestReciprocal(OpTest): class TestRelu(OpTest):
def setUp(self):
self.op_type = "reciprocal"
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
self.outputs = {'Y': np.reciprocal(self.inputs['X'])}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.01)
class TestLog(OpTest):
def setUp(self):
self.op_type = "log"
self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
}
self.outputs = {'Y': np.log(self.inputs['X'])}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007)
class TestSquare(OpTest):
def setUp(self): def setUp(self):
self.op_type = "square" self.op_type = "relu"
self.inputs = { x = np.random.uniform(-1, 1, [11, 17]).astype("float32")
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") # The same reason with TestAbs
} x[np.abs(x) < 0.005] = 0.02
self.outputs = {'Y': np.square(self.inputs['X'])} self.inputs = {'X': x}
self.outputs = {'Y': np.maximum(self.inputs['X'], 0)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -140,10 +102,13 @@ class TestBRelu(OpTest): ...@@ -140,10 +102,13 @@ class TestBRelu(OpTest):
def setUp(self): def setUp(self):
self.op_type = "brelu" self.op_type = "brelu"
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
x = 2 * np.sign(x) * np.exp(np.abs(x)) t_min = 1
self.inputs = {'X': x}
t_min = 0
t_max = 4 t_max = 4
# The same with TestAbs
x[np.abs(x - t_min) < 0.005] = t_min + 0.02
x[np.abs(x - t_max) < 0.005] = t_min + 0.02
self.inputs = {'X': x}
self.attrs = {'t_min': t_min, 't_max': t_max} self.attrs = {'t_min': t_min, 't_max': t_max}
t = np.copy(x) t = np.copy(x)
t[t < t_min] = t_min t[t < t_min] = t_min
...@@ -160,10 +125,12 @@ class TestBRelu(OpTest): ...@@ -160,10 +125,12 @@ class TestBRelu(OpTest):
class TestSoftRelu(OpTest): class TestSoftRelu(OpTest):
def setUp(self): def setUp(self):
self.op_type = "soft_relu" self.op_type = "soft_relu"
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-3, 3, [4, 4]).astype("float32")
x = 2 * np.sign(x) * np.exp(np.abs(x)) threshold = 2
# The same reason with TestAbs
x[np.abs(x - threshold) < 0.005] = threshold + 0.02
x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
self.inputs = {'X': x} self.inputs = {'X': x}
threshold = 4
self.attrs = {'threshold': threshold} self.attrs = {'threshold': threshold}
t = np.copy(x) t = np.copy(x)
t[t < -threshold] = -threshold t[t < -threshold] = -threshold
...@@ -177,6 +144,49 @@ class TestSoftRelu(OpTest): ...@@ -177,6 +144,49 @@ class TestSoftRelu(OpTest):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Y', max_relative_error=0.02)
class TestReciprocal(OpTest):
def setUp(self):
self.op_type = "reciprocal"
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
self.outputs = {'Y': np.reciprocal(self.inputs['X'])}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.01)
class TestLog(OpTest):
def setUp(self):
self.op_type = "log"
self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
}
self.outputs = {'Y': np.log(self.inputs['X'])}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007)
class TestSquare(OpTest):
def setUp(self):
self.op_type = "square"
self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
}
self.outputs = {'Y': np.square(self.inputs['X'])}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007)
class TestPow(OpTest): class TestPow(OpTest):
def setUp(self): def setUp(self):
self.op_type = "pow" self.op_type = "pow"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册