提交 e5a33062 编写于 作者: J JiabinYang

test=develop, add simple rnn test

上级 44c46e93
...@@ -315,7 +315,8 @@ class SimpleRNNCell(layers.Layer): ...@@ -315,7 +315,8 @@ class SimpleRNNCell(layers.Layer):
out = self._helper.create_variable_for_type_inference(self._dype) out = self._helper.create_variable_for_type_inference(self._dype)
softmax_out = self._helper.create_variable_for_type_inference( softmax_out = self._helper.create_variable_for_type_inference(
self._dtype) self._dtype)
reduce_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op( self._helper.append_op(
type="mul", type="mul",
inputs={"X": input, inputs={"X": input,
...@@ -323,7 +324,7 @@ class SimpleRNNCell(layers.Layer): ...@@ -323,7 +324,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_i2h}, outputs={"Out": tmp_i2h},
attrs={"x_num_col_dims": 1, attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1}) "y_num_col_dims": 1})
print("mul op 1") # print("mul op 1")
self._helper.append_op( self._helper.append_op(
type="mul", type="mul",
inputs={"X": pre_hidden, inputs={"X": pre_hidden,
...@@ -331,7 +332,7 @@ class SimpleRNNCell(layers.Layer): ...@@ -331,7 +332,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_h2h}, outputs={"Out": tmp_h2h},
attrs={"x_num_col_dims": 1, attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1}) "y_num_col_dims": 1})
print("mul op 2") # print("mul op 2")
self._helper.append_op( self._helper.append_op(
type="elementwise_add", type="elementwise_add",
inputs={'X': tmp_h2h, inputs={'X': tmp_h2h,
...@@ -339,35 +340,22 @@ class SimpleRNNCell(layers.Layer): ...@@ -339,35 +340,22 @@ class SimpleRNNCell(layers.Layer):
outputs={'Out': hidden}, outputs={'Out': hidden},
attrs={'axis': -1, attrs={'axis': -1,
'use_mkldnn': False}) 'use_mkldnn': False})
print("elementwise op 1") # print("elementwise op 1")
self._helper.append_op( # self._helper.append_op(
type='print', # type='print',
inputs={'In': hidden}, # inputs={'In': hidden},
attrs={ # attrs={
'first_n': -1, # 'first_n': -1,
'summarize': -1, # 'summarize': -1,
'message': None or "", # 'message': None or "",
'print_tensor_name': True, # 'print_tensor_name': True,
'print_tensor_type': True, # 'print_tensor_type': True,
'print_tensor_shape': True, # 'print_tensor_shape': True,
'print_tensor_lod': True, # 'print_tensor_lod': True,
'print_phase': 'BOTH' # 'print_phase': 'BOTH'
}) # })
hidden = self._helper.append_activation(hidden) hidden = self._helper.append_activation(hidden)
self._helper.append_op(
type='print',
inputs={'In': hidden},
attrs={
'first_n': -1,
'summarize': -1,
'message': None or "",
'print_tensor_name': True,
'print_tensor_type': True,
'print_tensor_shape': True,
'print_tensor_lod': True,
'print_phase': 'BOTH'
})
self._helper.append_op( self._helper.append_op(
type="mul", type="mul",
...@@ -376,13 +364,21 @@ class SimpleRNNCell(layers.Layer): ...@@ -376,13 +364,21 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": out}, outputs={"Out": out},
attrs={"x_num_col_dims": 1, attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1}) "y_num_col_dims": 1})
print("mul op 3") # print("mul op 3")
self._helper.append_op( self._helper.append_op(
type="softmax", type="softmax",
inputs={"X": out}, inputs={"X": out},
outputs={"Out": softmax_out}, outputs={"Out": softmax_out},
attrs={"use_cudnn": False}) attrs={"use_cudnn": False})
print("softmax op 1") # print("softmax op 1")
return softmax_out, hidden self._helper.append_op(
type='reduce_sum',
inputs={'X': softmax_out},
outputs={'Out': reduce_out},
attrs={'dim': None,
'keep_dim': False,
'reduce_all': True})
# print("reduce_sum op 1")
return reduce_out, hidden
...@@ -80,7 +80,7 @@ class SimpleRNN(fluid.imperative.Layer): ...@@ -80,7 +80,7 @@ class SimpleRNN(fluid.imperative.Layer):
fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1))) fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs): def forward(self, inputs):
out = list() outs = list()
pre_hiddens = list() pre_hiddens = list()
init_hidden = fluid.layers.tensor.create_parameter( init_hidden = fluid.layers.tensor.create_parameter(
...@@ -94,10 +94,10 @@ class SimpleRNN(fluid.imperative.Layer): ...@@ -94,10 +94,10 @@ class SimpleRNN(fluid.imperative.Layer):
input = fluid.layers.slice( input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1]) inputs, axes=[1], starts=[i], ends=[i + 1])
input = fluid.layers.reshape(input, shape=[1, 3]) input = fluid.layers.reshape(input, shape=[1, 3])
pre_hidden, out_softmax = self._cell(input, pre_hidden) out_softmax, pre_hidden = self._cell(input, pre_hidden)
out.append(out_softmax) outs.append(out_softmax)
return out, pre_hiddens return outs, pre_hiddens
class TestImperative(unittest.TestCase): class TestImperative(unittest.TestCase):
...@@ -235,15 +235,17 @@ class TestImperative(unittest.TestCase): ...@@ -235,15 +235,17 @@ class TestImperative(unittest.TestCase):
[10.0, 11.0, 12.0]]) [10.0, 11.0, 12.0]])
np_inp = np_inp.reshape((1, 4, 3)) np_inp = np_inp.reshape((1, 4, 3))
np_inp = np_inp.astype(np.float32) np_inp = np_inp.astype(np.float32)
# with fluid.imperative.guard(): with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.imperative.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN() simple_rnn = SimpleRNN()
# outs, pre_hiddens = simple_rnn.forward(var_inp) outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy() dy_out = outs[3]._numpy()
# outs[3]._backward() outs[3]._backward()
# dy_grad = simple_rnn._cell._i2h_w._gradient() dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
# print("dy_grad is {}".format(dy_grad)) dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# print("dy_grad is {}".format(dy_grad))
with new_program_scope(): with new_program_scope():
print("im here") print("im here")
...@@ -251,20 +253,19 @@ class TestImperative(unittest.TestCase): ...@@ -251,20 +253,19 @@ class TestImperative(unittest.TestCase):
name="inp", shape=[1, 4, 3], append_batch_size=False) name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN() simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn(inp) outs, pre_hiddens = simple_rnn(inp)
param_grads = fluid.backward.append_backward( param_grads = fluid.backward.append_backward(outs[3])
outs[3],
parameter_list=[
simple_rnn._cell._i2h_w.name, simple_rnn._cell._h2h_w.name,
simple_rnn._cell._h2o_w.name
])
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
# print("param_grads is : {} ".format(param_grads)) static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
static_out, static_grad = exe.run(
feed={inp.name: np_inp}, feed={inp.name: np_inp},
fetch_list=[outs[3].name, param_grads[2][1].name]) fetch_list=[
# self.assertTrue(np.allclose(dy_out, static_out)) outs[3].name, param_grads[0][1].name,
# self.assertTrue(np.allclose(dy_grad, static_grad)) param_grads[1][1].name, param_grads[2][1].name
])
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册