提交 e41a71ce 编写于 作者: F fengjiayi

fix errors

上级 e0be63bf
...@@ -386,7 +386,8 @@ def square_error_cost(input, label, **kwargs): ...@@ -386,7 +386,8 @@ def square_error_cost(input, label, **kwargs):
square_out = helper.create_tmp_variable(dtype=input.dtype) square_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op( helper.append_op(
type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]}) type='square', inputs={'X': [minus_out]},
outputs={'Out': [square_out]})
return square_out return square_out
...@@ -604,7 +605,7 @@ def sequence_pool(input, pool_type, **kwargs): ...@@ -604,7 +605,7 @@ def sequence_pool(input, pool_type, **kwargs):
sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2),
6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2)
max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1)
Args: Args:
input(variable): The input variable which is a LoDTensor. input(variable): The input variable which is a LoDTensor.
pool_type (string): The pooling type of sequence_pool. pool_type (string): The pooling type of sequence_pool.
...@@ -616,7 +617,7 @@ def sequence_pool(input, pool_type, **kwargs): ...@@ -616,7 +617,7 @@ def sequence_pool(input, pool_type, **kwargs):
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1], x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
avg_x = fluid.layers.sequence_pool(input=x, pool_type='average') avg_x = fluid.layers.sequence_pool(input=x, pool_type='average')
...@@ -654,7 +655,7 @@ def sequence_first_step(input, **kwargs): ...@@ -654,7 +655,7 @@ def sequence_first_step(input, **kwargs):
out.dim = [3, 1] out.dim = [3, 1]
with condition len(x.lod[-1]) - 1 == out.dims[0] with condition len(x.lod[-1]) - 1 == out.dims[0]
out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1)
Args: Args:
input(variable): The input variable which is a LoDTensor. input(variable): The input variable which is a LoDTensor.
...@@ -664,7 +665,7 @@ def sequence_first_step(input, **kwargs): ...@@ -664,7 +665,7 @@ def sequence_first_step(input, **kwargs):
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1], x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
x_first_step = fluid.layers.sequence_first_step(input=x) x_first_step = fluid.layers.sequence_first_step(input=x)
...@@ -687,7 +688,7 @@ def sequence_last_step(input, **kwargs): ...@@ -687,7 +688,7 @@ def sequence_last_step(input, **kwargs):
out.dim = [3, 1] out.dim = [3, 1]
with condition len(x.lod[-1]) - 1 == out.dims[0] with condition len(x.lod[-1]) - 1 == out.dims[0]
out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1)
Args: Args:
input(variable): The input variable which is a LoDTensor. input(variable): The input variable which is a LoDTensor.
...@@ -697,7 +698,7 @@ def sequence_last_step(input, **kwargs): ...@@ -697,7 +698,7 @@ def sequence_last_step(input, **kwargs):
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1], x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
x_last_step = fluid.layers.sequence_last_step(input=x) x_last_step = fluid.layers.sequence_last_step(input=x)
...@@ -1132,7 +1133,7 @@ def reduce_sum(input, dim=None, keep_dim=False): ...@@ -1132,7 +1133,7 @@ def reduce_sum(input, dim=None, keep_dim=False):
Returns: Returns:
Variable: The reduced Tensor variable. Variable: The reduced Tensor variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1176,7 +1177,7 @@ def reduce_mean(input, dim=None, keep_dim=False): ...@@ -1176,7 +1177,7 @@ def reduce_mean(input, dim=None, keep_dim=False):
Returns: Returns:
Variable: The reduced Tensor variable. Variable: The reduced Tensor variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
...@@ -10,13 +10,13 @@ class TestExp(OpTest): ...@@ -10,13 +10,13 @@ class TestExp(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.exp(self.inputs['X'])} self.outputs = {'Out': np.exp(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSigmoid(OpTest): class TestSigmoid(OpTest):
...@@ -25,13 +25,13 @@ class TestSigmoid(OpTest): ...@@ -25,13 +25,13 @@ class TestSigmoid(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestLogSigmoid(OpTest): class TestLogSigmoid(OpTest):
...@@ -40,13 +40,13 @@ class TestLogSigmoid(OpTest): ...@@ -40,13 +40,13 @@ class TestLogSigmoid(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.log(1 / (1 + np.exp(-self.inputs['X'])))} self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestTanh(OpTest): class TestTanh(OpTest):
...@@ -55,13 +55,13 @@ class TestTanh(OpTest): ...@@ -55,13 +55,13 @@ class TestTanh(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.tanh(self.inputs['X'])} self.outputs = {'Out': np.tanh(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestTanhShrink(OpTest): class TestTanhShrink(OpTest):
...@@ -70,13 +70,13 @@ class TestTanhShrink(OpTest): ...@@ -70,13 +70,13 @@ class TestTanhShrink(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32")
} }
self.outputs = {'Y': self.inputs['X'] - np.tanh(self.inputs['X'])} self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestHardShrink(OpTest): class TestHardShrink(OpTest):
...@@ -90,13 +90,13 @@ class TestHardShrink(OpTest): ...@@ -90,13 +90,13 @@ class TestHardShrink(OpTest):
t = np.copy(x) t = np.copy(x)
t[(t >= -threshold) & (t <= threshold)] = 0 t[(t >= -threshold) & (t <= threshold)] = 0
self.outputs = {'Y': t} self.outputs = {'Out': t}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.005) self.check_grad(['X'], 'Out', max_relative_error=0.005)
class TestSoftShrink(OpTest): class TestSoftShrink(OpTest):
...@@ -110,13 +110,13 @@ class TestSoftShrink(OpTest): ...@@ -110,13 +110,13 @@ class TestSoftShrink(OpTest):
y = np.copy(self.inputs['X']) y = np.copy(self.inputs['X'])
y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * ( y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
y - lambda_val) y - lambda_val)
self.outputs = {'Y': y} self.outputs = {'Out': y}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSqrt(OpTest): class TestSqrt(OpTest):
...@@ -125,13 +125,13 @@ class TestSqrt(OpTest): ...@@ -125,13 +125,13 @@ class TestSqrt(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.sqrt(self.inputs['X'])} self.outputs = {'Out': np.sqrt(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestAbs(OpTest): class TestAbs(OpTest):
...@@ -144,13 +144,13 @@ class TestAbs(OpTest): ...@@ -144,13 +144,13 @@ class TestAbs(OpTest):
# we should avoid this # we should avoid this
x[np.abs(x) < 0.005] = 0.02 x[np.abs(x) < 0.005] = 0.02
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.abs(self.inputs['X'])} self.outputs = {'Out': np.abs(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestCeil(OpTest): class TestCeil(OpTest):
...@@ -158,13 +158,13 @@ class TestCeil(OpTest): ...@@ -158,13 +158,13 @@ class TestCeil(OpTest):
self.op_type = "ceil" self.op_type = "ceil"
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.ceil(self.inputs['X'])} self.outputs = {'Out': np.ceil(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestFloor(OpTest): class TestFloor(OpTest):
...@@ -173,13 +173,13 @@ class TestFloor(OpTest): ...@@ -173,13 +173,13 @@ class TestFloor(OpTest):
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
# numpy floor need +1 # numpy floor need +1
self.outputs = {'Y': np.floor(self.inputs['X']) + 1.0} self.outputs = {'Out': np.floor(self.inputs['X']) + 1.0}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestRound(OpTest): class TestRound(OpTest):
...@@ -187,13 +187,13 @@ class TestRound(OpTest): ...@@ -187,13 +187,13 @@ class TestRound(OpTest):
self.op_type = "round" self.op_type = "round"
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.round(self.inputs['X'])} self.outputs = {'Out': np.round(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestRelu(OpTest): class TestRelu(OpTest):
...@@ -203,13 +203,13 @@ class TestRelu(OpTest): ...@@ -203,13 +203,13 @@ class TestRelu(OpTest):
# The same reason with TestAbs # The same reason with TestAbs
x[np.abs(x) < 0.005] = 0.02 x[np.abs(x) < 0.005] = 0.02
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.maximum(self.inputs['X'], 0)} self.outputs = {'Out': np.maximum(self.inputs['X'], 0)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestBRelu(OpTest): class TestBRelu(OpTest):
...@@ -227,13 +227,13 @@ class TestBRelu(OpTest): ...@@ -227,13 +227,13 @@ class TestBRelu(OpTest):
t = np.copy(x) t = np.copy(x)
t[t < t_min] = t_min t[t < t_min] = t_min
t[t > t_max] = t_max t[t > t_max] = t_max
self.outputs = {'Y': t} self.outputs = {'Out': t}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestRelu6(OpTest): class TestRelu6(OpTest):
...@@ -248,14 +248,14 @@ class TestRelu6(OpTest): ...@@ -248,14 +248,14 @@ class TestRelu6(OpTest):
self.inputs = {'X': x} self.inputs = {'X': x}
self.attrs = {'threshold': threshold} self.attrs = {'threshold': threshold}
self.outputs = { self.outputs = {
'Y': np.minimum(np.maximum(self.inputs['X'], 0), threshold) 'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestSoftRelu(OpTest): class TestSoftRelu(OpTest):
...@@ -271,13 +271,13 @@ class TestSoftRelu(OpTest): ...@@ -271,13 +271,13 @@ class TestSoftRelu(OpTest):
t = np.copy(x) t = np.copy(x)
t[t < -threshold] = -threshold t[t < -threshold] = -threshold
t[t > threshold] = threshold t[t > threshold] = threshold
self.outputs = {'Y': np.log((np.exp(t) + 1))} self.outputs = {'Out': np.log((np.exp(t) + 1))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestELU(OpTest): class TestELU(OpTest):
...@@ -290,27 +290,27 @@ class TestELU(OpTest): ...@@ -290,27 +290,27 @@ class TestELU(OpTest):
self.inputs = {'X': x} self.inputs = {'X': x}
self.attrs = {'alpha': alpha} self.attrs = {'alpha': alpha}
self.outputs = { self.outputs = {
'Y': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) 'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestReciprocal(OpTest): class TestReciprocal(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reciprocal" self.op_type = "reciprocal"
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
self.outputs = {'Y': np.reciprocal(self.inputs['X'])} self.outputs = {'Out': np.reciprocal(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.01) self.check_grad(['X'], 'Out', max_relative_error=0.01)
class TestLog(OpTest): class TestLog(OpTest):
...@@ -319,13 +319,13 @@ class TestLog(OpTest): ...@@ -319,13 +319,13 @@ class TestLog(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.log(self.inputs['X'])} self.outputs = {'Out': np.log(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSquare(OpTest): class TestSquare(OpTest):
...@@ -334,13 +334,13 @@ class TestSquare(OpTest): ...@@ -334,13 +334,13 @@ class TestSquare(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.square(self.inputs['X'])} self.outputs = {'Out': np.square(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestPow(OpTest): class TestPow(OpTest):
...@@ -348,13 +348,13 @@ class TestPow(OpTest): ...@@ -348,13 +348,13 @@ class TestPow(OpTest):
self.op_type = "pow" self.op_type = "pow"
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
self.attrs = {'factor': 3.0} self.attrs = {'factor': 3.0}
self.outputs = {'Y': np.power(self.inputs['X'], 3)} self.outputs = {'Out': np.power(self.inputs['X'], 3)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestSTanh(OpTest): class TestSTanh(OpTest):
...@@ -366,13 +366,13 @@ class TestSTanh(OpTest): ...@@ -366,13 +366,13 @@ class TestSTanh(OpTest):
scale_a = 2.0 / 3.0 scale_a = 2.0 / 3.0
scale_b = 1.7159 scale_b = 1.7159
self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
self.outputs = {'Y': scale_b * np.tanh(self.inputs['X'] * scale_a)} self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSoftplus(OpTest): class TestSoftplus(OpTest):
...@@ -381,13 +381,13 @@ class TestSoftplus(OpTest): ...@@ -381,13 +381,13 @@ class TestSoftplus(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(-1, 1, [11, 17]).astype("float64") 'X': np.random.uniform(-1, 1, [11, 17]).astype("float64")
} }
self.outputs = {'Y': np.log(1 + np.exp(self.inputs['X']))} self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSoftsign(OpTest): class TestSoftsign(OpTest):
...@@ -397,14 +397,14 @@ class TestSoftsign(OpTest): ...@@ -397,14 +397,14 @@ class TestSoftsign(OpTest):
'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
} }
self.outputs = { self.outputs = {
'Y': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X'])) 'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X']))
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestThresholdedRelu(OpTest): class TestThresholdedRelu(OpTest):
...@@ -419,13 +419,13 @@ class TestThresholdedRelu(OpTest): ...@@ -419,13 +419,13 @@ class TestThresholdedRelu(OpTest):
self.inputs = {'X': X} self.inputs = {'X': X}
self.attrs = {'threshold': threshold} self.attrs = {'threshold': threshold}
self.outputs = {'Y': (X > threshold) * X} self.outputs = {'Out': (X > threshold) * X}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=self.relative_error) self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
class TestHardSigmoid(OpTest): class TestHardSigmoid(OpTest):
...@@ -447,13 +447,13 @@ class TestHardSigmoid(OpTest): ...@@ -447,13 +447,13 @@ class TestHardSigmoid(OpTest):
upper_threshold - 0.2 upper_threshold - 0.2
temp = X * slope + offset temp = X * slope + offset
self.outputs = {'Y': np.maximum(0.0, np.minimum(1.0, temp))} self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.002) self.check_grad(['X'], 'Out', max_relative_error=0.002)
class TestSwish(OpTest): class TestSwish(OpTest):
...@@ -462,13 +462,13 @@ class TestSwish(OpTest): ...@@ -462,13 +462,13 @@ class TestSwish(OpTest):
X = np.random.uniform(0.1, 1, [11, 17]).astype("float32") X = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
self.inputs = {'X': X} self.inputs = {'X': X}
self.attrs = {'beta': 2.3} self.attrs = {'beta': 2.3}
self.outputs = {'Y': X * expit(self.attrs['beta'] * X)} self.outputs = {'Out': X * expit(self.attrs['beta'] * X)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -7,7 +7,7 @@ def fc(X, W, Y): ...@@ -7,7 +7,7 @@ def fc(X, W, Y):
ret_v = core.Net.create() ret_v = core.Net.create()
ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation")) ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation"))
ret_v.append_op(Operator("sigmoid", X="pre_activation", Y=Y)) ret_v.append_op(Operator("sigmoid", X="pre_activation", Out=Y))
ret_v.complete_add_op(True) ret_v.complete_add_op(True)
return ret_v return ret_v
...@@ -30,7 +30,7 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} ...@@ -30,7 +30,7 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Y[fc.out]}. Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Out[fc.out]}.
''' '''
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册