test_word2vec.py 4.8 KB
Newer Older
Q
QI JUN 已提交
1
import paddle.v2 as paddle
Q
Qiao Longfei 已提交
2 3 4
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.core as core
import paddle.v2.fluid.optimizer as optimizer
Q
QI JUN 已提交
5

Q
Qiao Longfei 已提交
6 7
from paddle.v2.fluid.framework import Program
from paddle.v2.fluid.executor import Executor
Q
QI JUN 已提交
8 9 10

import numpy as np

11 12
startup_program = Program()
main_program = Program()
Q
QI JUN 已提交
13 14 15 16 17

embed_size = 32
hidden_size = 256
N = 5
batch_size = 32
18
is_sparse = True
Q
QI JUN 已提交
19 20 21 22 23 24 25

word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)

first_word = layers.data(
    name='firstw',
    shape=[1],
26
    data_type='int64',
27 28
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
29 30 31
second_word = layers.data(
    name='secondw',
    shape=[1],
32
    data_type='int64',
33 34
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
35 36 37
third_word = layers.data(
    name='thirdw',
    shape=[1],
38
    data_type='int64',
39 40
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
41 42 43
forth_word = layers.data(
    name='forthw',
    shape=[1],
44
    data_type='int64',
45 46
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
47 48 49
next_word = layers.data(
    name='nextw',
    shape=[1],
50
    data_type='int64',
51 52
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
53 54 55 56 57

embed_first = layers.embedding(
    input=first_word,
    size=[dict_size, embed_size],
    data_type='float32',
58
    is_sparse=is_sparse,
Y
Yu Yang 已提交
59
    param_attr={'name': 'shared_w'},
60 61
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
62 63 64 65
embed_second = layers.embedding(
    input=second_word,
    size=[dict_size, embed_size],
    data_type='float32',
66
    is_sparse=is_sparse,
Y
Yu Yang 已提交
67
    param_attr={'name': 'shared_w'},
68 69
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
70 71 72 73 74

embed_third = layers.embedding(
    input=third_word,
    size=[dict_size, embed_size],
    data_type='float32',
75
    is_sparse=is_sparse,
Y
Yu Yang 已提交
76
    param_attr={'name': 'shared_w'},
77 78
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
79 80 81 82
embed_forth = layers.embedding(
    input=forth_word,
    size=[dict_size, embed_size],
    data_type='float32',
83
    is_sparse=is_sparse,
Y
Yu Yang 已提交
84
    param_attr={'name': 'shared_w'},
85 86
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
87 88 89 90

concat_embed = layers.concat(
    input=[embed_first, embed_second, embed_third, embed_forth],
    axis=1,
91 92
    main_program=main_program,
    startup_program=startup_program)
Q
QI JUN 已提交
93 94 95 96

hidden1 = layers.fc(input=concat_embed,
                    size=hidden_size,
                    act='sigmoid',
97 98
                    main_program=main_program,
                    startup_program=startup_program)
Q
QI JUN 已提交
99 100 101
predict_word = layers.fc(input=hidden1,
                         size=dict_size,
                         act='softmax',
102 103
                         main_program=main_program,
                         startup_program=startup_program)
Q
QI JUN 已提交
104 105 106
cost = layers.cross_entropy(
    input=predict_word,
    label=next_word,
107 108 109 110
    main_program=main_program,
    startup_program=startup_program)
avg_cost = layers.mean(
    x=cost, main_program=main_program, startup_program=startup_program)
Q
QI JUN 已提交
111 112

sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
113
opts = sgd_optimizer.minimize(avg_cost, startup_program)
Q
QI JUN 已提交
114 115 116 117 118 119 120

train_reader = paddle.batch(
    paddle.dataset.imikolov.train(word_dict, N), batch_size)

place = core.CPUPlace()
exe = Executor(place)

T
update  
typhoonzero 已提交
121 122 123 124
# fix https://github.com/PaddlePaddle/Paddle/issues/5434 then remove
# below exit line.
exit(0)

D
dzhwinter 已提交
125
exe.run(startup_program)
Q
QI JUN 已提交
126 127 128 129
PASS_NUM = 100
for pass_id in range(PASS_NUM):
    for data in train_reader():
        input_data = [[data_idx[idx] for data_idx in data] for idx in xrange(5)]
130
        input_data = map(lambda x: np.array(x).astype("int64"), input_data)
Q
QI JUN 已提交
131 132 133 134 135 136
        input_data = map(lambda x: np.expand_dims(x, axis=1), input_data)

        first_data = input_data[0]
        first_tensor = core.LoDTensor()
        first_tensor.set(first_data, place)

137
        second_data = input_data[1]
Q
QI JUN 已提交
138 139 140
        second_tensor = core.LoDTensor()
        second_tensor.set(second_data, place)

141
        third_data = input_data[2]
Q
QI JUN 已提交
142 143 144
        third_tensor = core.LoDTensor()
        third_tensor.set(third_data, place)

145
        forth_data = input_data[3]
Q
QI JUN 已提交
146 147 148
        forth_tensor = core.LoDTensor()
        forth_tensor.set(forth_data, place)

149
        next_data = input_data[4]
Q
QI JUN 已提交
150 151 152
        next_tensor = core.LoDTensor()
        next_tensor.set(next_data, place)

153
        outs = exe.run(main_program,
Q
QI JUN 已提交
154 155 156 157 158 159 160 161 162 163 164 165
                       feed={
                           'firstw': first_tensor,
                           'secondw': second_tensor,
                           'thirdw': third_tensor,
                           'forthw': forth_tensor,
                           'nextw': next_tensor
                       },
                       fetch_list=[avg_cost])
        out = np.array(outs[0])
        if out[0] < 10.0:
            exit(0)  # if avg cost less than 10.0, we think our code is good.
exit(1)