no_test_word2vec_new_api.py 5.1 KB
Newer Older
H
Helin Wang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
import paddle.fluid as fluid
import numpy as np
import math
import sys
from functools import partial

PASS_NUM = 100
EMBED_SIZE = 32
HIDDEN_SIZE = 256
N = 5
BATCH_SIZE = 32


def create_random_lodtensor(lod, place, low, high):
    # The range of data elements is [low, high]
    data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
    res = fluid.LoDTensor()
    res.set(data, place)
    res.set_lod([lod])
    return res


word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)


H
Helin Wang 已提交
42
def inference_program(is_sparse):
H
Helin Wang 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
    first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
    second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
    third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
    forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')

    embed_first = fluid.layers.embedding(
        input=first_word,
        size=[dict_size, EMBED_SIZE],
        dtype='float32',
        is_sparse=is_sparse,
        param_attr='shared_w')
    embed_second = fluid.layers.embedding(
        input=second_word,
        size=[dict_size, EMBED_SIZE],
        dtype='float32',
        is_sparse=is_sparse,
        param_attr='shared_w')
    embed_third = fluid.layers.embedding(
        input=third_word,
        size=[dict_size, EMBED_SIZE],
        dtype='float32',
        is_sparse=is_sparse,
        param_attr='shared_w')
    embed_forth = fluid.layers.embedding(
        input=forth_word,
        size=[dict_size, EMBED_SIZE],
        dtype='float32',
        is_sparse=is_sparse,
        param_attr='shared_w')

    concat_embed = fluid.layers.concat(
        input=[embed_first, embed_second, embed_third, embed_forth], axis=1)
    hidden1 = fluid.layers.fc(input=concat_embed,
                              size=HIDDEN_SIZE,
                              act='sigmoid')
    predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax')
    return predict_word


H
Helin Wang 已提交
82
def train_program(is_sparse):
F
fengjiayi 已提交
83 84 85
    # The declaration of 'next_word' must be after the invoking of inference_program,
    # or the data input order of train program would be [next_word, firstw, secondw,
    # thirdw, forthw], which is not correct.
H
Helin Wang 已提交
86
    predict_word = inference_program(is_sparse)
F
fengjiayi 已提交
87
    next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
H
Helin Wang 已提交
88 89 90 91 92
    cost = fluid.layers.cross_entropy(input=predict_word, label=next_word)
    avg_cost = fluid.layers.mean(cost)
    return avg_cost


D
daminglu 已提交
93
def train(use_cuda, train_program, save_path):
H
Helin Wang 已提交
94 95
    train_reader = paddle.batch(
        paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)
F
fengjiayi 已提交
96 97
    test_reader = paddle.batch(
        paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE)
H
Helin Wang 已提交
98 99 100 101

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    def event_handler(event):
Y
Yu Yang 已提交
102
        if isinstance(event, fluid.EndEpochEvent):
F
fengjiayi 已提交
103 104 105
            outs = trainer.test(reader=test_reader)
            avg_cost = outs[0]
            print("loss= ", avg_cost)
H
Helin Wang 已提交
106 107

            if avg_cost < 5.0:
D
daminglu 已提交
108
                trainer.save_params(save_path)
H
Helin Wang 已提交
109 110 111 112 113
                return
            if math.isnan(avg_cost):
                sys.exit("got NaN loss, training failed.")

    trainer = fluid.Trainer(
D
daminglu 已提交
114
        train_program, fluid.optimizer.SGD(learning_rate=0.001), place=place)
Y
Yu Yang 已提交
115
    trainer.train(
Q
Qiao Longfei 已提交
116
        reader=train_reader, num_epochs=1, event_handler=event_handler)
H
Helin Wang 已提交
117 118


D
daminglu 已提交
119
def infer(use_cuda, inference_program, save_path):
H
Helin Wang 已提交
120
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
D
daminglu 已提交
121 122
    inferencer = fluid.Inferencer(
        infer_func=inference_program, param_path=save_path, place=place)
H
Helin Wang 已提交
123 124 125 126 127 128

    lod = [0, 1]
    first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
    second_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
    third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
    fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
Q
Qiao Longfei 已提交
129

130 131 132 133 134 135
    result = inferencer.infer({
        'firstw': first_word,
        'secondw': second_word,
        'thirdw': third_word,
        'forthw': fourth_word
    })
Q
Qiao Longfei 已提交
136
    print(np.array(result[0]))
H
Helin Wang 已提交
137 138 139 140 141 142


def main(use_cuda, is_sparse):
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return

D
daminglu 已提交
143 144 145
    save_path = "word2vec.params"
    train(use_cuda, partial(train_program, is_sparse), save_path)
    infer(use_cuda, partial(inference_program, is_sparse), save_path)
H
Helin Wang 已提交
146 147 148 149 150 151


if __name__ == '__main__':
    for use_cuda in (False, True):
        for is_sparse in (False, True):
            main(use_cuda=use_cuda, is_sparse=is_sparse)