simple_rnn_layers.py 1.1 KB
Newer Older
1 2
from paddle.trainer_config_helpers import *

Q
qijun 已提交
3
settings(batch_size=1000, learning_rate=1e-4)
4 5 6 7 8 9 10 11 12

din = data_layer(name='data', size=200)

hidden = fc_layer(input=din, size=200, act=SigmoidActivation())

rnn = recurrent_layer(input=hidden, act=SigmoidActivation())

rnn2 = recurrent_layer(input=hidden, act=SigmoidActivation(), reverse=True)

Q
qijun 已提交
13 14
lstm1_param = fc_layer(
    input=hidden, size=200 * 4, act=LinearActivation(), bias_attr=False)
15 16 17

lstm1 = lstmemory(input=lstm1_param, act=SigmoidActivation())

Q
qijun 已提交
18 19
lstm2_param = fc_layer(
    input=hidden, size=200 * 4, act=LinearActivation(), bias_attr=False)
20 21 22

lstm2 = lstmemory(input=lstm2_param, act=SigmoidActivation(), reverse=True)

Q
qijun 已提交
23 24
gru1_param = fc_layer(
    input=hidden, size=200 * 3, act=LinearActivation(), bias_attr=False)
25 26
gru1 = grumemory(input=gru1_param, act=SigmoidActivation())

Q
qijun 已提交
27 28
gru2_param = fc_layer(
    input=hidden, size=200 * 3, act=LinearActivation(), bias_attr=False)
29 30
gru2 = grumemory(input=gru2_param, act=SigmoidActivation(), reverse=True)

Q
qijun 已提交
31 32 33 34 35 36 37
outputs(
    last_seq(input=rnn),
    first_seq(input=rnn2),
    last_seq(input=lstm1),
    first_seq(input=lstm2),
    last_seq(input=gru1),
    first_seq(gru2))