test_understand_sentiment_lstm.py 4.4 KB
Newer Older
Q
Qiao Longfei 已提交
1
import numpy as np
Y
Yang Yang(Tony) 已提交
2
import paddle.v2 as paddle
3
import paddle.v2.fluid as fluid
Y
Yu Yang 已提交
4 5 6
from paddle.v2.fluid.layer_helper import LayerHelper


7
def lstm(x, c_pre_init, hidden_dim, forget_bias=None):
Y
Yu Yang 已提交
8 9 10 11 12 13 14 15 16 17
    """
    This function helps create an operator for the LSTM (Long Short Term
    Memory) cell that can be used inside an RNN.
    """
    helper = LayerHelper('lstm_unit', **locals())
    rnn = fluid.layers.StaticRNN()
    with rnn.step():
        c_pre = rnn.memory(init=c_pre_init)
        x_t = rnn.step_input(x)

18 19
        before_fc = fluid.layers.concat(input=[x_t, c_pre], axis=1)
        after_fc = fluid.layers.fc(input=before_fc, size=hidden_dim * 4)
Y
Yu Yang 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36

        dtype = x.dtype
        c = helper.create_tmp_variable(dtype)
        h = helper.create_tmp_variable(dtype)

        helper.append_op(
            type='lstm_unit',
            inputs={"X": after_fc,
                    "C_prev": c_pre},
            outputs={"C": c,
                     "H": h},
            attrs={"forget_bias": forget_bias})

        rnn.update_memory(c_pre, c)
        rnn.output(h)

    return rnn()
Y
Yang Yang(Tony) 已提交
37 38 39


def lstm_net(dict_dim, class_dim=2, emb_dim=32, seq_len=80, batch_size=50):
40
    data = fluid.layers.data(
Y
Yang Yang(Tony) 已提交
41 42 43
        name="words",
        shape=[seq_len * batch_size, 1],
        append_batch_size=False,
44 45
        dtype="int64",
        lod_level=1)
46
    label = fluid.layers.data(
Y
Yang Yang(Tony) 已提交
47 48 49
        name="label",
        shape=[batch_size, 1],
        append_batch_size=False,
F
fengjiayi 已提交
50
        dtype="int64")
Y
Yang Yang(Tony) 已提交
51

52 53 54
    emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
    emb = fluid.layers.reshape(x=emb, shape=[batch_size, seq_len, emb_dim])
    emb = fluid.layers.transpose(x=emb, axis=[1, 0, 2])
Y
Yang Yang(Tony) 已提交
55

56
    c_pre_init = fluid.layers.fill_constant(
F
fengjiayi 已提交
57
        dtype=emb.dtype, shape=[batch_size, emb_dim], value=0.0)
58
    c_pre_init.stop_gradient = False
Y
Yu Yang 已提交
59
    layer_1_out = lstm(emb, c_pre_init=c_pre_init, hidden_dim=emb_dim)
60
    layer_1_out = fluid.layers.transpose(x=layer_1_out, axis=[1, 0, 2])
Y
Yang Yang(Tony) 已提交
61

62 63 64 65
    prediction = fluid.layers.fc(input=layer_1_out,
                                 size=class_dim,
                                 act="softmax")
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
Y
Yang Yang(Tony) 已提交
66

67 68 69 70
    avg_cost = fluid.layers.mean(x=cost)
    adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002)
    adam_optimizer.minimize(avg_cost)
    acc = fluid.layers.accuracy(input=prediction, label=label)
Y
Yang Yang(Tony) 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83

    return avg_cost, acc


def to_lodtensor(data, place):
    seq_lens = [len(seq) for seq in data]
    cur_len = 0
    lod = [cur_len]
    for l in seq_lens:
        cur_len += l
        lod.append(cur_len)
    flattened_data = np.concatenate(data, axis=0).astype("int64")
    flattened_data = flattened_data.reshape([len(flattened_data), 1])
84
    res = fluid.LoDTensor()
Y
Yang Yang(Tony) 已提交
85 86 87 88 89
    res.set(flattened_data, place)
    res.set_lod([lod])
    return res


90
def chop_data(data, chop_len=80, batch_size=50):
Y
Yang Yang(Tony) 已提交
91 92
    data = [(x[0][:chop_len], x[1]) for x in data if len(x[0]) >= chop_len]

93
    return data[:batch_size]
Y
Yang Yang(Tony) 已提交
94 95 96 97 98 99


def prepare_feed_data(data, place):
    tensor_words = to_lodtensor(map(lambda x: x[0], data), place)

    label = np.array(map(lambda x: x[1], data)).astype("int64")
100
    label = label.reshape([len(label), 1])
101
    tensor_label = fluid.LoDTensor()
Y
Yang Yang(Tony) 已提交
102 103 104 105 106 107
    tensor_label.set(label, place)

    return tensor_words, tensor_label


def main():
108 109
    BATCH_SIZE = 100
    PASS_NUM = 5
Y
Yang Yang(Tony) 已提交
110

111 112 113 114
    word_dict = paddle.dataset.imdb.word_dict()
    print "load word dict successfully"
    dict_dim = len(word_dict)
    class_dim = 2
Y
Yang Yang(Tony) 已提交
115

116
    cost, acc = lstm_net(dict_dim=dict_dim, class_dim=class_dim)
Y
Yang Yang(Tony) 已提交
117

118 119 120 121
    train_data = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.imdb.train(word_dict), buf_size=BATCH_SIZE * 10),
        batch_size=BATCH_SIZE)
122 123
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
124

125
    exe.run(fluid.default_startup_program())
Y
Yang Yang(Tony) 已提交
126

127 128 129 130 131
    for pass_id in xrange(PASS_NUM):
        for data in train_data():
            chopped_data = chop_data(data)
            tensor_words, tensor_label = prepare_feed_data(chopped_data, place)

132
            outs = exe.run(fluid.default_main_program(),
133 134 135 136 137 138 139 140 141 142
                           feed={"words": tensor_words,
                                 "label": tensor_label},
                           fetch_list=[cost, acc])
            cost_val = np.array(outs[0])
            acc_val = np.array(outs[1])

            print("cost=" + str(cost_val) + " acc=" + str(acc_val))
            if acc_val > 0.7:
                exit(0)
    exit(1)
Y
Yang Yang(Tony) 已提交
143 144 145 146


if __name__ == '__main__':
    main()