test_word2vec.py 10.3 KB
Newer Older
D
dzhwinter 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
L
Liu Yiqun 已提交
2 3
#
# Licensed under the Apache License, Version 2.0 (the "License");
D
dzhwinter 已提交
4 5
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import paddle
16
import paddle.fluid as fluid
17
from paddle.fluid.layers.device import get_places
Y
Yang Yu 已提交
18
import unittest
Y
Yang Yu 已提交
19
import os
20
import numpy as np
21 22
import math
import sys
Q
QI JUN 已提交
23

Y
Yang Yu 已提交
24

武毅 已提交
25
def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True):
Y
Yang Yu 已提交
26 27 28 29 30
    PASS_NUM = 100
    EMBED_SIZE = 32
    HIDDEN_SIZE = 256
    N = 5
    BATCH_SIZE = 32
Y
Yang Yu 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
    IS_SPARSE = is_sparse

    def __network__(words):
        embed_first = fluid.layers.embedding(
            input=words[0],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')
        embed_second = fluid.layers.embedding(
            input=words[1],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')
        embed_third = fluid.layers.embedding(
            input=words[2],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')
        embed_forth = fluid.layers.embedding(
            input=words[3],
            size=[dict_size, EMBED_SIZE],
            dtype='float32',
            is_sparse=IS_SPARSE,
            param_attr='shared_w')

        concat_embed = fluid.layers.concat(
            input=[embed_first, embed_second, embed_third, embed_forth], axis=1)
        hidden1 = fluid.layers.fc(input=concat_embed,
                                  size=HIDDEN_SIZE,
                                  act='sigmoid')
        predict_word = fluid.layers.fc(input=hidden1,
                                       size=dict_size,
                                       act='softmax')
        cost = fluid.layers.cross_entropy(input=predict_word, label=words[4])
Y
Yu Yang 已提交
68
        avg_cost = fluid.layers.mean(cost)
69
        return avg_cost, predict_word
Y
Yang Yu 已提交
70 71 72 73 74 75 76 77 78 79

    word_dict = paddle.dataset.imikolov.build_dict()
    dict_size = len(word_dict)

    first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
    second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
    third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
    forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
    next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')

L
Liu Yiqun 已提交
80
    if not is_parallel:
81
        avg_cost, predict_word = __network__(
Y
Yang Yu 已提交
82 83
            [first_word, second_word, third_word, forth_word, next_word])
    else:
84
        places = get_places()
Y
Yang Yu 已提交
85 86
        pd = fluid.layers.ParallelDo(places)
        with pd.do():
87
            avg_cost, predict_word = __network__(
Y
Yang Yu 已提交
88 89 90 91 92
                map(pd.read_input, [
                    first_word, second_word, third_word, forth_word, next_word
                ]))
            pd.write_output(avg_cost)

Y
Yu Yang 已提交
93
        avg_cost = fluid.layers.mean(pd())
Y
Yang Yu 已提交
94

Y
Yang Yu 已提交
95
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
W
Wu Yi 已提交
96
    sgd_optimizer.minimize(avg_cost)
Y
Yang Yu 已提交
97 98 99 100 101 102 103 104 105 106

    train_reader = paddle.batch(
        paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    feeder = fluid.DataFeeder(
        feed_list=[first_word, second_word, third_word, forth_word, next_word],
        place=place)

武毅 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
    def train_loop(main_program):
        exe.run(fluid.default_startup_program())

        for pass_id in range(PASS_NUM):
            for data in train_reader():
                avg_cost_np = exe.run(main_program,
                                      feed=feeder.feed(data),
                                      fetch_list=[avg_cost])
                if avg_cost_np[0] < 5.0:
                    if save_dirname is not None:
                        fluid.io.save_inference_model(save_dirname, [
                            'firstw', 'secondw', 'thirdw', 'forthw'
                        ], [predict_word], exe)
                    return
                if math.isnan(float(avg_cost_np[0])):
                    sys.exit("got NaN loss, training failed.")

        raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0]))

    if is_local:
        train_loop(fluid.default_main_program())
    else:
G
gongweibao 已提交
129 130
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS")  # ip,ip...
武毅 已提交
131 132 133 134
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)  # ip:port,ip:port...
G
gongweibao 已提交
135
        trainers = int(os.getenv("PADDLE_TRAINERS"))
武毅 已提交
136
        current_endpoint = os.getenv("POD_IP") + ":" + port
G
gongweibao 已提交
137 138
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
武毅 已提交
139
        t = fluid.DistributeTranspiler()
Y
Yancey1989 已提交
140
        t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
武毅 已提交
141 142 143 144 145 146 147 148
        if training_role == "PSERVER":
            pserver_prog = t.get_pserver_program(current_endpoint)
            pserver_startup = t.get_startup_program(current_endpoint,
                                                    pserver_prog)
            exe.run(pserver_startup)
            exe.run(pserver_prog)
        elif training_role == "TRAINER":
            train_loop(t.get_trainer_program())
Y
Yang Yu 已提交
149 150


L
Liu Yiqun 已提交
151 152 153 154 155 156 157
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

158 159 160 161 162 163 164 165 166 167 168 169
    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        word_dict = paddle.dataset.imikolov.build_dict()
        dict_size = len(word_dict)

170 171
        # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word 
        # is simply an index to look up for the corresponding word vector and hence 
172 173 174 175 176 177
        # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths, 
        # which is length-based level of detail (lod) of each LoDTensor, should be [[1]] 
        # meaning there is only one level of detail and there is only one sequence of 
        # one word on this level.
        # Note that recursive_sequence_lengths should be a list of lists.
        recursive_seq_lens = [[1]]
178
        base_shape = [1]
K
Kexin Zhao 已提交
179
        # The range of random integers is [low, high]
180
        first_word = fluid.create_random_int_lodtensor(
181
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
182
        second_word = fluid.create_random_int_lodtensor(
183
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
184
        third_word = fluid.create_random_int_lodtensor(
185
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
186
        fourth_word = fluid.create_random_int_lodtensor(
187
            recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1)
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204

        assert feed_target_names[0] == 'firstw'
        assert feed_target_names[1] == 'secondw'
        assert feed_target_names[2] == 'thirdw'
        assert feed_target_names[3] == 'forthw'

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: first_word,
                              feed_target_names[1]: second_word,
                              feed_target_names[2]: third_word,
                              feed_target_names[3]: fourth_word
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
205
        print(results[0].recursive_sequence_lengths())
206 207
        np_data = np.array(results[0])
        print("Inference Shape: ", np_data.shape)
L
Liu Yiqun 已提交
208 209 210


def main(use_cuda, is_sparse, is_parallel):
211 212
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return
L
Liu Yiqun 已提交
213 214 215 216 217 218 219

    if not is_parallel:
        save_dirname = "word2vec.inference.model"
    else:
        save_dirname = None

    train(use_cuda, is_sparse, is_parallel, save_dirname)
220 221 222
    infer(use_cuda, save_dirname)


Y
Yang Yu 已提交
223
FULL_TEST = os.getenv('FULL_TEST',
Y
Yang Yu 已提交
224
                      '0').lower() in ['true', '1', 't', 'y', 'yes', 'on']
Y
Yang Yu 已提交
225
SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster"
Y
Yang Yu 已提交
226 227 228


class W2VTest(unittest.TestCase):
Y
Yang Yu 已提交
229 230 231
    pass


L
Liu Yiqun 已提交
232
def inject_test_method(use_cuda, is_sparse, is_parallel):
Y
Yang Yu 已提交
233 234
    fn_name = "test_{0}_{1}_{2}".format("cuda" if use_cuda else "cpu", "sparse"
                                        if is_sparse else "dense", "parallel"
L
Liu Yiqun 已提交
235
                                        if is_parallel else "normal")
Y
Yang Yu 已提交
236 237 238 239 240 241 242

    def __impl__(*args, **kwargs):
        prog = fluid.Program()
        startup_prog = fluid.Program()
        scope = fluid.core.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(prog, startup_prog):
L
Liu Yiqun 已提交
243 244 245 246
                main(
                    use_cuda=use_cuda,
                    is_sparse=is_sparse,
                    is_parallel=is_parallel)
Y
Yang Yu 已提交
247

248
    if (not fluid.core.is_compiled_with_cuda() or use_cuda) and is_sparse:
Y
Yang Yu 已提交
249 250 251 252 253 254 255
        fn = __impl__
    else:
        # skip the other test when on CI server
        fn = unittest.skipUnless(
            condition=FULL_TEST, reason=SKIP_REASON)(__impl__)

    setattr(W2VTest, fn_name, fn)
Y
Yang Yu 已提交
256 257


Y
Yang Yu 已提交
258 259
for use_cuda in (False, True):
    for is_sparse in (False, True):
L
Liu Yiqun 已提交
260 261
        for is_parallel in (False, True):
            inject_test_method(use_cuda, is_sparse, is_parallel)
Y
Yang Yu 已提交
262 263 264

if __name__ == '__main__':
    unittest.main()