提交 05bbe4e1 编写于 作者: J JiabinYang

test=develop, add simple rnn test

上级 e5a33062
...@@ -324,7 +324,7 @@ class SimpleRNNCell(layers.Layer): ...@@ -324,7 +324,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_i2h}, outputs={"Out": tmp_i2h},
attrs={"x_num_col_dims": 1, attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1}) "y_num_col_dims": 1})
# print("mul op 1")
self._helper.append_op( self._helper.append_op(
type="mul", type="mul",
inputs={"X": pre_hidden, inputs={"X": pre_hidden,
...@@ -332,7 +332,7 @@ class SimpleRNNCell(layers.Layer): ...@@ -332,7 +332,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_h2h}, outputs={"Out": tmp_h2h},
attrs={"x_num_col_dims": 1, attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1}) "y_num_col_dims": 1})
# print("mul op 2")
self._helper.append_op( self._helper.append_op(
type="elementwise_add", type="elementwise_add",
inputs={'X': tmp_h2h, inputs={'X': tmp_h2h,
...@@ -340,21 +340,6 @@ class SimpleRNNCell(layers.Layer): ...@@ -340,21 +340,6 @@ class SimpleRNNCell(layers.Layer):
outputs={'Out': hidden}, outputs={'Out': hidden},
attrs={'axis': -1, attrs={'axis': -1,
'use_mkldnn': False}) 'use_mkldnn': False})
# print("elementwise op 1")
# self._helper.append_op(
# type='print',
# inputs={'In': hidden},
# attrs={
# 'first_n': -1,
# 'summarize': -1,
# 'message': None or "",
# 'print_tensor_name': True,
# 'print_tensor_type': True,
# 'print_tensor_shape': True,
# 'print_tensor_lod': True,
# 'print_phase': 'BOTH'
# })
hidden = self._helper.append_activation(hidden) hidden = self._helper.append_activation(hidden)
self._helper.append_op( self._helper.append_op(
...@@ -364,14 +349,12 @@ class SimpleRNNCell(layers.Layer): ...@@ -364,14 +349,12 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": out}, outputs={"Out": out},
attrs={"x_num_col_dims": 1, attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1}) "y_num_col_dims": 1})
# print("mul op 3")
self._helper.append_op( self._helper.append_op(
type="softmax", type="softmax",
inputs={"X": out}, inputs={"X": out},
outputs={"Out": softmax_out}, outputs={"Out": softmax_out},
attrs={"use_cudnn": False}) attrs={"use_cudnn": False})
# print("softmax op 1")
self._helper.append_op( self._helper.append_op(
type='reduce_sum', type='reduce_sum',
...@@ -380,5 +363,5 @@ class SimpleRNNCell(layers.Layer): ...@@ -380,5 +363,5 @@ class SimpleRNNCell(layers.Layer):
attrs={'dim': None, attrs={'dim': None,
'keep_dim': False, 'keep_dim': False,
'reduce_all': True}) 'reduce_all': True})
# print("reduce_sum op 1")
return reduce_out, hidden return reduce_out, hidden
...@@ -245,7 +245,6 @@ class TestImperative(unittest.TestCase): ...@@ -245,7 +245,6 @@ class TestImperative(unittest.TestCase):
dy_grad_h2o = simple_rnn._cell._h2o_w._gradient() dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
dy_grad_h2h = simple_rnn._cell._h2h_w._gradient() dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
dy_grad_i2h = simple_rnn._cell._i2h_w._gradient() dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# print("dy_grad is {}".format(dy_grad))
with new_program_scope(): with new_program_scope():
print("im here") print("im here")
...@@ -262,10 +261,10 @@ class TestImperative(unittest.TestCase): ...@@ -262,10 +261,10 @@ class TestImperative(unittest.TestCase):
outs[3].name, param_grads[0][1].name, outs[3].name, param_grads[0][1].name,
param_grads[1][1].name, param_grads[2][1].name param_grads[1][1].name, param_grads[2][1].name
]) ])
self.assertTrue(np.allclose(dy_out, static_out)) self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o)) self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h)) self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h)) self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册