test_word2vec.py 10.4 KB
Newer Older
D
dzhwinter 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
L
Liu Yiqun 已提交
2 3
#
# Licensed under the Apache License, Version 2.0 (the "License");
D
dzhwinter 已提交
4 5
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import paddle
18
import paddle.fluid as fluid
19
from paddle.fluid.layers.device import get_places
Y
Yang Yu 已提交
20
import unittest
Y
Yang Yu 已提交
21
import os
22
import numpy as np
23 24
import math
import sys
Q
QI JUN 已提交
25

Y
Yang Yu 已提交
26

武毅 已提交
27
def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True):
Y
Yang Yu 已提交
28 29 30 31 32
    PASS_NUM = 100
    EMBED_SIZE = 32
    HIDDEN_SIZE = 256
    N = 5
    BATCH_SIZE = 32
Y
Yang Yu 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
    IS_SPARSE = is_sparse

    def __network__(words):
        embed_first = fluid.layers.embedding(
            input=words[0],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')
        embed_second = fluid.layers.embedding(
            input=words[1],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')
        embed_third = fluid.layers.embedding(
            input=words[2],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')
        embed_forth = fluid.layers.embedding(
            input=words[3],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')

        concat_embed = fluid.layers.concat(
            input=[embed_first, embed_second, embed_third, embed_forth], axis=1)
        hidden1 = fluid.layers.fc(input=concat_embed,
                                  size=HIDDEN_SIZE,
                                  act='sigmoid')
        predict_word = fluid.layers.fc(input=hidden1,
                                       size=dict_size,
                                       act='softmax')
        cost = fluid.layers.cross_entropy(input=predict_word, label=words[4])
Y
Yu Yang 已提交
70
        avg_cost = fluid.layers.mean(cost)
71
        return avg_cost, predict_word
Y
Yang Yu 已提交
72 73 74 75 76 77 78 79 80 81

    word_dict = paddle.dataset.imikolov.build_dict()
    dict_size = len(word_dict)

    first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
    second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
    third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
    forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
    next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')

L
Liu Yiqun 已提交
82
    if not is_parallel:
83
        avg_cost, predict_word = __network__(
Y
Yang Yu 已提交
84 85
            [first_word, second_word, third_word, forth_word, next_word])
    else:
86
        places = get_places()
Y
Yang Yu 已提交
87 88
        pd = fluid.layers.ParallelDo(places)
        with pd.do():
89
            avg_cost, predict_word = __network__(
90 91 92 93 94
                list(
                    map(pd.read_input, [
                        first_word, second_word, third_word, forth_word,
                        next_word
                    ])))
Y
Yang Yu 已提交
95 96
            pd.write_output(avg_cost)

Y
Yu Yang 已提交
97
        avg_cost = fluid.layers.mean(pd())
Y
Yang Yu 已提交
98

Y
Yang Yu 已提交
99
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
W
Wu Yi 已提交
100
    sgd_optimizer.minimize(avg_cost)
Y
Yang Yu 已提交
101 102 103 104 105 106 107 108 109 110

    train_reader = paddle.batch(
        paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    feeder = fluid.DataFeeder(
        feed_list=[first_word, second_word, third_word, forth_word, next_word],
        place=place)

武毅 已提交
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
    def train_loop(main_program):
        exe.run(fluid.default_startup_program())

        for pass_id in range(PASS_NUM):
            for data in train_reader():
                avg_cost_np = exe.run(main_program,
                                      feed=feeder.feed(data),
                                      fetch_list=[avg_cost])
                if avg_cost_np[0] < 5.0:
                    if save_dirname is not None:
                        fluid.io.save_inference_model(save_dirname, [
                            'firstw', 'secondw', 'thirdw', 'forthw'
                        ], [predict_word], exe)
                    return
                if math.isnan(float(avg_cost_np[0])):
                    sys.exit("got NaN loss, training failed.")

        raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0]))

    if is_local:
        train_loop(fluid.default_main_program())
    else:
G
gongweibao 已提交
133 134
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS")  # ip,ip...
武毅 已提交
135 136 137 138
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)  # ip:port,ip:port...
G
gongweibao 已提交
139
        trainers = int(os.getenv("PADDLE_TRAINERS"))
武毅 已提交
140
        current_endpoint = os.getenv("POD_IP") + ":" + port
G
gongweibao 已提交
141 142
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
武毅 已提交
143
        t = fluid.DistributeTranspiler()
Y
Yancey1989 已提交
144
        t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
武毅 已提交
145 146 147 148 149 150 151 152
        if training_role == "PSERVER":
            pserver_prog = t.get_pserver_program(current_endpoint)
            pserver_startup = t.get_startup_program(current_endpoint,
                                                    pserver_prog)
            exe.run(pserver_startup)
            exe.run(pserver_prog)
        elif training_role == "TRAINER":
            train_loop(t.get_trainer_program())
Y
Yang Yu 已提交
153 154


L
Liu Yiqun 已提交
155 156 157 158 159 160 161
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

162 163 164 165 166 167 168 169 170 171 172 173
    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        word_dict = paddle.dataset.imikolov.build_dict()
        dict_size = len(word_dict)

174 175 176 177 178
        # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
        # is simply an index to look up for the corresponding word vector and hence
        # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
        # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
        # meaning there is only one level of detail and there is only one sequence of
179 180 181
        # one word on this level.
        # Note that recursive_sequence_lengths should be a list of lists.
        recursive_seq_lens = [[1]]
182
        base_shape = [1]
K
Kexin Zhao 已提交
183
        # The range of random integers is [low, high]
184
        first_word = fluid.create_random_int_lodtensor(
185
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
186
        second_word = fluid.create_random_int_lodtensor(
187
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
188
        third_word = fluid.create_random_int_lodtensor(
189
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
190
        fourth_word = fluid.create_random_int_lodtensor(
191
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

        assert feed_target_names[0] == 'firstw'
        assert feed_target_names[1] == 'secondw'
        assert feed_target_names[2] == 'thirdw'
        assert feed_target_names[3] == 'forthw'

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: first_word,
                              feed_target_names[1]: second_word,
                              feed_target_names[2]: third_word,
                              feed_target_names[3]: fourth_word
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
209
        print(results[0].recursive_sequence_lengths())
210
        np_data = np.array(results[0])
211
        print("Inference Shape: ", np_data.shape)
L
Liu Yiqun 已提交
212 213 214


def main(use_cuda, is_sparse, is_parallel):
215 216
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return
L
Liu Yiqun 已提交
217 218 219 220 221 222 223

    if not is_parallel:
        save_dirname = "word2vec.inference.model"
    else:
        save_dirname = None

    train(use_cuda, is_sparse, is_parallel, save_dirname)
224 225 226
    infer(use_cuda, save_dirname)


Y
Yang Yu 已提交
227
FULL_TEST = os.getenv('FULL_TEST',
Y
Yang Yu 已提交
228
                      '0').lower() in ['true', '1', 't', 'y', 'yes', 'on']
Y
Yang Yu 已提交
229
SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster"
Y
Yang Yu 已提交
230 231 232


class W2VTest(unittest.TestCase):
Y
Yang Yu 已提交
233 234 235
    pass


L
Liu Yiqun 已提交
236
def inject_test_method(use_cuda, is_sparse, is_parallel):
Y
Yang Yu 已提交
237 238
    fn_name = "test_{0}_{1}_{2}".format("cuda" if use_cuda else "cpu", "sparse"
                                        if is_sparse else "dense", "parallel"
L
Liu Yiqun 已提交
239
                                        if is_parallel else "normal")
Y
Yang Yu 已提交
240 241 242 243 244 245 246

    def __impl__(*args, **kwargs):
        prog = fluid.Program()
        startup_prog = fluid.Program()
        scope = fluid.core.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(prog, startup_prog):
L
Liu Yiqun 已提交
247 248 249 250
                main(
                    use_cuda=use_cuda,
                    is_sparse=is_sparse,
                    is_parallel=is_parallel)
Y
Yang Yu 已提交
251

252
    if (not fluid.core.is_compiled_with_cuda() or use_cuda) and is_sparse:
Y
Yang Yu 已提交
253 254 255 256 257 258 259
        fn = __impl__
    else:
        # skip the other test when on CI server
        fn = unittest.skipUnless(
            condition=FULL_TEST, reason=SKIP_REASON)(__impl__)

    setattr(W2VTest, fn_name, fn)
Y
Yang Yu 已提交
260 261


Y
Yang Yu 已提交
262 263
for use_cuda in (False, True):
    for is_sparse in (False, True):
L
Liu Yiqun 已提交
264 265
        for is_parallel in (False, True):
            inject_test_method(use_cuda, is_sparse, is_parallel)
Y
Yang Yu 已提交
266 267 268

if __name__ == '__main__':
    unittest.main()