提交 e5a33062 编写于 作者: J JiabinYang

test=develop, add simple rnn test

上级 44c46e93
......@@ -315,7 +315,8 @@ class SimpleRNNCell(layers.Layer):
out = self._helper.create_variable_for_type_inference(self._dype)
softmax_out = self._helper.create_variable_for_type_inference(
self._dtype)
reduce_out = self._helper.create_variable_for_type_inference(
self._dtype)
self._helper.append_op(
type="mul",
inputs={"X": input,
......@@ -323,7 +324,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_i2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
print("mul op 1")
# print("mul op 1")
self._helper.append_op(
type="mul",
inputs={"X": pre_hidden,
......@@ -331,7 +332,7 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": tmp_h2h},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
print("mul op 2")
# print("mul op 2")
self._helper.append_op(
type="elementwise_add",
inputs={'X': tmp_h2h,
......@@ -339,35 +340,22 @@ class SimpleRNNCell(layers.Layer):
outputs={'Out': hidden},
attrs={'axis': -1,
'use_mkldnn': False})
print("elementwise op 1")
self._helper.append_op(
type='print',
inputs={'In': hidden},
attrs={
'first_n': -1,
'summarize': -1,
'message': None or "",
'print_tensor_name': True,
'print_tensor_type': True,
'print_tensor_shape': True,
'print_tensor_lod': True,
'print_phase': 'BOTH'
})
# print("elementwise op 1")
# self._helper.append_op(
# type='print',
# inputs={'In': hidden},
# attrs={
# 'first_n': -1,
# 'summarize': -1,
# 'message': None or "",
# 'print_tensor_name': True,
# 'print_tensor_type': True,
# 'print_tensor_shape': True,
# 'print_tensor_lod': True,
# 'print_phase': 'BOTH'
# })
hidden = self._helper.append_activation(hidden)
self._helper.append_op(
type='print',
inputs={'In': hidden},
attrs={
'first_n': -1,
'summarize': -1,
'message': None or "",
'print_tensor_name': True,
'print_tensor_type': True,
'print_tensor_shape': True,
'print_tensor_lod': True,
'print_phase': 'BOTH'
})
self._helper.append_op(
type="mul",
......@@ -376,13 +364,21 @@ class SimpleRNNCell(layers.Layer):
outputs={"Out": out},
attrs={"x_num_col_dims": 1,
"y_num_col_dims": 1})
print("mul op 3")
# print("mul op 3")
self._helper.append_op(
type="softmax",
inputs={"X": out},
outputs={"Out": softmax_out},
attrs={"use_cudnn": False})
print("softmax op 1")
# print("softmax op 1")
return softmax_out, hidden
self._helper.append_op(
type='reduce_sum',
inputs={'X': softmax_out},
outputs={'Out': reduce_out},
attrs={'dim': None,
'keep_dim': False,
'reduce_all': True})
# print("reduce_sum op 1")
return reduce_out, hidden
......@@ -80,7 +80,7 @@ class SimpleRNN(fluid.imperative.Layer):
fluid.ParamAttr(initializer=fluid.initializer.Constant(value=0.1)))
def forward(self, inputs):
out = list()
outs = list()
pre_hiddens = list()
init_hidden = fluid.layers.tensor.create_parameter(
......@@ -94,10 +94,10 @@ class SimpleRNN(fluid.imperative.Layer):
input = fluid.layers.slice(
inputs, axes=[1], starts=[i], ends=[i + 1])
input = fluid.layers.reshape(input, shape=[1, 3])
pre_hidden, out_softmax = self._cell(input, pre_hidden)
out.append(out_softmax)
out_softmax, pre_hidden = self._cell(input, pre_hidden)
outs.append(out_softmax)
return out, pre_hiddens
return outs, pre_hiddens
class TestImperative(unittest.TestCase):
......@@ -235,15 +235,17 @@ class TestImperative(unittest.TestCase):
[10.0, 11.0, 12.0]])
np_inp = np_inp.reshape((1, 4, 3))
np_inp = np_inp.astype(np.float32)
# with fluid.imperative.guard():
# var_inp = fluid.imperative.base.to_variable(np_inp)
# var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
# simple_rnn = SimpleRNN()
# outs, pre_hiddens = simple_rnn.forward(var_inp)
# dy_out = outs[3]._numpy()
# outs[3]._backward()
# dy_grad = simple_rnn._cell._i2h_w._gradient()
# print("dy_grad is {}".format(dy_grad))
with fluid.imperative.guard():
var_inp = fluid.imperative.base.to_variable(np_inp)
var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn.forward(var_inp)
dy_out = outs[3]._numpy()
outs[3]._backward()
dy_grad_h2o = simple_rnn._cell._h2o_w._gradient()
dy_grad_h2h = simple_rnn._cell._h2h_w._gradient()
dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
# print("dy_grad is {}".format(dy_grad))
with new_program_scope():
print("im here")
......@@ -251,20 +253,19 @@ class TestImperative(unittest.TestCase):
name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN()
outs, pre_hiddens = simple_rnn(inp)
param_grads = fluid.backward.append_backward(
outs[3],
parameter_list=[
simple_rnn._cell._i2h_w.name, simple_rnn._cell._h2h_w.name,
simple_rnn._cell._h2o_w.name
])
param_grads = fluid.backward.append_backward(outs[3])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# print("param_grads is : {} ".format(param_grads))
static_out, static_grad = exe.run(
static_out, static_grad_h2o, static_grad_h2h, static_grad_i2h = exe.run(
feed={inp.name: np_inp},
fetch_list=[outs[3].name, param_grads[2][1].name])
# self.assertTrue(np.allclose(dy_out, static_out))
# self.assertTrue(np.allclose(dy_grad, static_grad))
fetch_list=[
outs[3].name, param_grads[0][1].name,
param_grads[1][1].name, param_grads[2][1].name
])
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad_h2o, static_grad_h2o))
self.assertTrue(np.allclose(dy_grad_h2h, static_grad_h2h))
self.assertTrue(np.allclose(dy_grad_i2h, static_grad_i2h))
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册