test_word2vec_book.py 13.9 KB
Newer Older
D
dzhwinter 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
L
Liu Yiqun 已提交
2 3
#
# Licensed under the Apache License, Version 2.0 (the "License");
D
dzhwinter 已提交
4 5
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import paddle
18
import paddle.fluid as fluid
19
from paddle.fluid.layers.device import get_places
Y
Yang Yu 已提交
20
import unittest
Y
Yang Yu 已提交
21
import os
22
import numpy as np
23 24
import math
import sys
Q
QI JUN 已提交
25

P
pangyoki 已提交
26 27
paddle.enable_static()

Y
Yang Yu 已提交
28

29 30 31 32 33 34 35 36 37 38 39 40 41
def get_place(target):
    if target == "cuda":
        return fluid.CUDAPlace(0)
    elif target == "xpu":
        return fluid.XPUPlace(0)
    elif target == "cpu":
        return fluid.CPUPlace()
    else:
        raise ValueError(
            "Target `{0}` is not on the support list: `cuda`, `xpu` and `cpu`.".
            format(target))


42 43 44 45 46
def train(target,
          is_sparse,
          is_parallel,
          save_dirname,
          is_local=True,
A
arlesniak 已提交
47 48
          use_bf16=False,
          pure_bf16=False):
Y
Yang Yu 已提交
49 50 51 52 53
    PASS_NUM = 100
    EMBED_SIZE = 32
    HIDDEN_SIZE = 256
    N = 5
    BATCH_SIZE = 32
Y
Yang Yu 已提交
54 55 56
    IS_SPARSE = is_sparse

    def __network__(words):
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
        embed_first = fluid.layers.embedding(input=words[0],
                                             size=[dict_size, EMBED_SIZE],
                                             dtype='float32',
                                             is_sparse=IS_SPARSE,
                                             param_attr='shared_w')
        embed_second = fluid.layers.embedding(input=words[1],
                                              size=[dict_size, EMBED_SIZE],
                                              dtype='float32',
                                              is_sparse=IS_SPARSE,
                                              param_attr='shared_w')
        embed_third = fluid.layers.embedding(input=words[2],
                                             size=[dict_size, EMBED_SIZE],
                                             dtype='float32',
                                             is_sparse=IS_SPARSE,
                                             param_attr='shared_w')
        embed_forth = fluid.layers.embedding(input=words[3],
                                             size=[dict_size, EMBED_SIZE],
                                             dtype='float32',
                                             is_sparse=IS_SPARSE,
                                             param_attr='shared_w')
Y
Yang Yu 已提交
77 78 79 80 81 82 83 84 85 86

        concat_embed = fluid.layers.concat(
            input=[embed_first, embed_second, embed_third, embed_forth], axis=1)
        hidden1 = fluid.layers.fc(input=concat_embed,
                                  size=HIDDEN_SIZE,
                                  act='sigmoid')
        predict_word = fluid.layers.fc(input=hidden1,
                                       size=dict_size,
                                       act='softmax')
        cost = fluid.layers.cross_entropy(input=predict_word, label=words[4])
Y
Yu Yang 已提交
87
        avg_cost = fluid.layers.mean(cost)
88
        return avg_cost, predict_word
Y
Yang Yu 已提交
89 90 91 92 93 94 95 96 97 98

    word_dict = paddle.dataset.imikolov.build_dict()
    dict_size = len(word_dict)

    first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
    second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
    third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
    forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
    next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')

L
Liu Yiqun 已提交
99
    if not is_parallel:
100
        avg_cost, predict_word = __network__(
Y
Yang Yu 已提交
101 102
            [first_word, second_word, third_word, forth_word, next_word])
    else:
X
Xin Pan 已提交
103
        raise NotImplementedError()
Y
Yang Yu 已提交
104

Y
Yang Yu 已提交
105
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
106
    if use_bf16:
A
arlesniak 已提交
107 108 109 110 111 112 113
        sgd_optimizer = paddle.static.amp.bf16.decorate_bf16(
            sgd_optimizer,
            amp_lists=paddle.static.amp.bf16.AutoMixedPrecisionListsBF16(
                custom_fp32_list={'softmax', 'concat'}, ),
            use_bf16_guard=False,
            use_pure_bf16=pure_bf16)

114
    sgd_optimizer.minimize(avg_cost, fluid.default_startup_program())
Y
Yang Yu 已提交
115

116 117
    train_reader = paddle.batch(paddle.dataset.imikolov.train(word_dict, N),
                                BATCH_SIZE)
Y
Yang Yu 已提交
118

119
    place = get_place(target)
Y
Yang Yu 已提交
120 121 122 123 124
    exe = fluid.Executor(place)
    feeder = fluid.DataFeeder(
        feed_list=[first_word, second_word, third_word, forth_word, next_word],
        place=place)

武毅 已提交
125 126
    def train_loop(main_program):
        exe.run(fluid.default_startup_program())
A
arlesniak 已提交
127 128
        if pure_bf16:
            sgd_optimizer.amp_init(exe.place)
武毅 已提交
129 130 131 132 133 134 135

        for pass_id in range(PASS_NUM):
            for data in train_reader():
                avg_cost_np = exe.run(main_program,
                                      feed=feeder.feed(data),
                                      fetch_list=[avg_cost])
                if avg_cost_np[0] < 5.0:
A
arlesniak 已提交
136
                    if save_dirname is not None and not pure_bf16:
137 138 139 140
                        fluid.io.save_inference_model(
                            save_dirname,
                            ['firstw', 'secondw', 'thirdw', 'forthw'],
                            [predict_word], exe)
武毅 已提交
141 142 143 144 145 146 147 148 149
                    return
                if math.isnan(float(avg_cost_np[0])):
                    sys.exit("got NaN loss, training failed.")

        raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0]))

    if is_local:
        train_loop(fluid.default_main_program())
    else:
G
gongweibao 已提交
150 151
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS")  # ip,ip...
武毅 已提交
152 153 154 155
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)  # ip:port,ip:port...
G
gongweibao 已提交
156
        trainers = int(os.getenv("PADDLE_TRAINERS"))
武毅 已提交
157
        current_endpoint = os.getenv("POD_IP") + ":" + port
G
gongweibao 已提交
158 159
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
武毅 已提交
160
        t = fluid.DistributeTranspiler()
Y
Yancey1989 已提交
161
        t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
武毅 已提交
162 163 164 165 166 167 168 169
        if training_role == "PSERVER":
            pserver_prog = t.get_pserver_program(current_endpoint)
            pserver_startup = t.get_startup_program(current_endpoint,
                                                    pserver_prog)
            exe.run(pserver_startup)
            exe.run(pserver_prog)
        elif training_role == "TRAINER":
            train_loop(t.get_trainer_program())
Y
Yang Yu 已提交
170 171


172
def infer(target, save_dirname=None):
L
Liu Yiqun 已提交
173 174 175
    if save_dirname is None:
        return

176
    place = get_place(target)
L
Liu Yiqun 已提交
177
    exe = fluid.Executor(place)
178 179 180
    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
T
tianshuo78520a 已提交
181
        # the feed_target_names (the names of variables that will be fed
182 183 184 185 186 187 188 189
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        word_dict = paddle.dataset.imikolov.build_dict()
        dict_size = len(word_dict)

190 191 192 193 194
        # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
        # is simply an index to look up for the corresponding word vector and hence
        # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
        # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
        # meaning there is only one level of detail and there is only one sequence of
195 196 197
        # one word on this level.
        # Note that recursive_sequence_lengths should be a list of lists.
        recursive_seq_lens = [[1]]
198
        base_shape = [1]
K
Kexin Zhao 已提交
199
        # The range of random integers is [low, high]
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
        first_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                       base_shape,
                                                       place,
                                                       low=0,
                                                       high=dict_size - 1)
        second_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                        base_shape,
                                                        place,
                                                        low=0,
                                                        high=dict_size - 1)
        third_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                       base_shape,
                                                       place,
                                                       low=0,
                                                       high=dict_size - 1)
        fourth_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                        base_shape,
                                                        place,
                                                        low=0,
                                                        high=dict_size - 1)
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

        assert feed_target_names[0] == 'firstw'
        assert feed_target_names[1] == 'secondw'
        assert feed_target_names[2] == 'thirdw'
        assert feed_target_names[3] == 'forthw'

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: first_word,
                              feed_target_names[1]: second_word,
                              feed_target_names[2]: third_word,
                              feed_target_names[3]: fourth_word
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
F
flame 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250

        def to_infer_tensor(lod_tensor):
            infer_tensor = fluid.core.PaddleTensor()
            infer_tensor.lod = lod_tensor.lod()
            infer_tensor.data = fluid.core.PaddleBuf(np.array(lod_tensor))
            infer_tensor.shape = lod_tensor.shape()
            infer_tensor.dtype = fluid.core.PaddleDType.INT64
            return infer_tensor

        infer_inputs = [first_word, second_word, third_word, fourth_word]
        infer_inputs = [to_infer_tensor(t) for t in infer_inputs]

        infer_config = fluid.core.NativeConfig()
        infer_config.model_dir = 'word2vec.inference.model'
251 252
        if target == "cuda":
            infer_config.use_gpu = True
F
flame 已提交
253 254
            infer_config.device = 0
            infer_config.fraction_of_gpu_memory = 0.15
255 256
        elif target == "xpu":
            infer_config.use_xpu = True
F
flame 已提交
257
        compiled_program = fluid.compiler.CompiledProgram(inference_program)
F
flame 已提交
258
        compiled_program._with_inference_optimize(infer_config)
F
flame 已提交
259 260
        assert compiled_program._is_inference is True
        infer_outputs = exe.run(compiled_program, feed=infer_inputs)
261
        np_data = np.array(results[0])
F
flame 已提交
262 263
        infer_out = infer_outputs[0].data.float_data()
        for a, b in zip(np_data[0], infer_out):
264
            assert np.isclose(a, b, rtol=5e-5), "a: {}, b: {}".format(a, b)
L
Liu Yiqun 已提交
265 266


A
arlesniak 已提交
267
def main(target, is_sparse, is_parallel, use_bf16, pure_bf16):
268 269 270
    if target == "cuda" and not fluid.core.is_compiled_with_cuda():
        return
    if target == "xpu" and not fluid.core.is_compiled_with_xpu():
271
        return
L
Liu Yiqun 已提交
272

273 274 275
    if use_bf16 and not fluid.core.is_compiled_with_mkldnn():
        return

L
Liu Yiqun 已提交
276 277 278 279 280
    if not is_parallel:
        save_dirname = "word2vec.inference.model"
    else:
        save_dirname = None

281 282 283 284 285
    if target == "xpu":
        # This model cannot be trained with xpu temporarily,
        # so only inference is turned on.
        train("cpu", is_sparse, is_parallel, save_dirname)
    else:
286 287 288 289 290 291
        train(target,
              is_sparse,
              is_parallel,
              save_dirname,
              use_bf16=use_bf16,
              pure_bf16=pure_bf16)
292
    infer(target, save_dirname)
293 294


Y
Yang Yu 已提交
295
FULL_TEST = os.getenv('FULL_TEST',
Y
Yang Yu 已提交
296
                      '0').lower() in ['true', '1', 't', 'y', 'yes', 'on']
Y
Yang Yu 已提交
297
SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster"
Y
Yang Yu 已提交
298 299 300


class W2VTest(unittest.TestCase):
Y
Yang Yu 已提交
301 302 303
    pass


A
arlesniak 已提交
304 305 306 307 308
def inject_test_method(target,
                       is_sparse,
                       is_parallel,
                       use_bf16=False,
                       pure_bf16=False):
309 310 311 312
    fn_name = "test_{0}_{1}_{2}{3}".format(
        target, "sparse" if is_sparse else "dense",
        "parallel" if is_parallel else "normal",
        "_purebf16" if pure_bf16 else "_bf16" if use_bf16 else "")
Y
Yang Yu 已提交
313 314 315 316 317 318 319

    def __impl__(*args, **kwargs):
        prog = fluid.Program()
        startup_prog = fluid.Program()
        scope = fluid.core.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(prog, startup_prog):
A
arlesniak 已提交
320
                main(target, is_sparse, is_parallel, use_bf16, pure_bf16)
Y
Yang Yu 已提交
321

322 323
    if (not fluid.core.is_compiled_with_cuda()
            or target == "cuda") and is_sparse:
Y
Yang Yu 已提交
324 325 326
        fn = __impl__
    else:
        # skip the other test when on CI server
327 328
        fn = unittest.skipUnless(condition=FULL_TEST,
                                 reason=SKIP_REASON)(__impl__)
Y
Yang Yu 已提交
329 330

    setattr(W2VTest, fn_name, fn)
Y
Yang Yu 已提交
331 332


333
for target in ("cuda", "cpu", "xpu"):
Y
Yang Yu 已提交
334
    for is_sparse in (False, True):
X
fix  
Xin Pan 已提交
335
        for is_parallel in (False, ):
336
            inject_test_method(target, is_sparse, is_parallel)
A
arlesniak 已提交
337 338
inject_test_method("cpu", False, False, True)
inject_test_method("cpu", False, False, True, True)
Y
Yang Yu 已提交
339 340 341

if __name__ == '__main__':
    unittest.main()