sequence_nest_rnn_multi_input.conf 2.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
#edit-mode: -*- python -*-
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from paddle.trainer_config_helpers import *

######################## data source ################################
define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list',
                        test_list=None,
                        module='rnn_data_provider',
                        obj='process_subseq')


settings(batch_size=2, learning_rate=0.01)
######################## network configure ################################
dict_dim = 10
word_dim = 8
hidden_dim = 8
label_dim = 3

data = data_layer(name="word", size=dict_dim)

emb = embedding_layer(input=data, size=word_dim)

# This hierachical RNN is designed to be equivalent to the simple RNN in
# sequence_rnn.conf

def outer_step(wid, x):
    outer_mem = memory(name="outer_rnn_state", size=hidden_dim)
    def inner_step(y, wid):
        z = embedding_layer(input=wid, size=word_dim)
        inner_mem = memory(name="inner_rnn_state",
                           size=hidden_dim,
                           boot_layer=outer_mem)
        out = fc_layer(input=[y, z, inner_mem],
                        size=hidden_dim,
                        act=TanhActivation(),
                        bias_attr=True,
                        name="inner_rnn_state")
        return out

    inner_rnn_output = recurrent_group(
        step=inner_step,
        name="inner",
        input=[x, wid])
    last = last_seq(input=inner_rnn_output, name="outer_rnn_state")

    # "return last" should also work. But currently RecurrentGradientMachine
    # does not handle it correctly. Current implementation requires that
    # all the out links are from sequences. However, it does not report error
    # when the out links are not sequences.
    return inner_rnn_output

out = recurrent_group(
    name="outer",
    step=outer_step,
    input=[SubsequenceInput(data), SubsequenceInput(emb)])

rep = last_seq(input=out)
prob = fc_layer(size=label_dim,
                input=rep,
                act=SoftmaxActivation(),
                bias_attr=True)

outputs(classification_cost(input=prob,
                            label=data_layer(name="label", size=label_dim)))