test_word2vec_book.py 13.9 KB
Newer Older
D
dzhwinter 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
L
Liu Yiqun 已提交
2 3
#
# Licensed under the Apache License, Version 2.0 (the "License");
D
dzhwinter 已提交
4 5
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import paddle
16
import paddle.fluid as fluid
Y
Yang Yu 已提交
17
import unittest
Y
Yang Yu 已提交
18
import os
19
import numpy as np
20 21
import math
import sys
22
import tempfile
Q
QI JUN 已提交
23

P
pangyoki 已提交
24 25
paddle.enable_static()

Y
Yang Yu 已提交
26

27 28 29 30 31 32 33 34 35 36 37 38 39
def get_place(target):
    if target == "cuda":
        return fluid.CUDAPlace(0)
    elif target == "xpu":
        return fluid.XPUPlace(0)
    elif target == "cpu":
        return fluid.CPUPlace()
    else:
        raise ValueError(
            "Target `{0}` is not on the support list: `cuda`, `xpu` and `cpu`.".
            format(target))


40 41 42 43 44
def train(target,
          is_sparse,
          is_parallel,
          save_dirname,
          is_local=True,
A
arlesniak 已提交
45 46
          use_bf16=False,
          pure_bf16=False):
Y
Yang Yu 已提交
47 48 49 50 51
    PASS_NUM = 100
    EMBED_SIZE = 32
    HIDDEN_SIZE = 256
    N = 5
    BATCH_SIZE = 32
Y
Yang Yu 已提交
52 53 54
    IS_SPARSE = is_sparse

    def __network__(words):
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
        embed_first = fluid.layers.embedding(input=words[0],
                                             size=[dict_size, EMBED_SIZE],
                                             dtype='float32',
                                             is_sparse=IS_SPARSE,
                                             param_attr='shared_w')
        embed_second = fluid.layers.embedding(input=words[1],
                                              size=[dict_size, EMBED_SIZE],
                                              dtype='float32',
                                              is_sparse=IS_SPARSE,
                                              param_attr='shared_w')
        embed_third = fluid.layers.embedding(input=words[2],
                                             size=[dict_size, EMBED_SIZE],
                                             dtype='float32',
                                             is_sparse=IS_SPARSE,
                                             param_attr='shared_w')
        embed_forth = fluid.layers.embedding(input=words[3],
                                             size=[dict_size, EMBED_SIZE],
                                             dtype='float32',
                                             is_sparse=IS_SPARSE,
                                             param_attr='shared_w')
Y
Yang Yu 已提交
75 76 77 78 79 80 81 82 83 84

        concat_embed = fluid.layers.concat(
            input=[embed_first, embed_second, embed_third, embed_forth], axis=1)
        hidden1 = fluid.layers.fc(input=concat_embed,
                                  size=HIDDEN_SIZE,
                                  act='sigmoid')
        predict_word = fluid.layers.fc(input=hidden1,
                                       size=dict_size,
                                       act='softmax')
        cost = fluid.layers.cross_entropy(input=predict_word, label=words[4])
85
        avg_cost = paddle.mean(cost)
86
        return avg_cost, predict_word
Y
Yang Yu 已提交
87 88 89 90 91 92 93 94 95 96

    word_dict = paddle.dataset.imikolov.build_dict()
    dict_size = len(word_dict)

    first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
    second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
    third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
    forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64')
    next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')

L
Liu Yiqun 已提交
97
    if not is_parallel:
98
        avg_cost, predict_word = __network__(
Y
Yang Yu 已提交
99 100
            [first_word, second_word, third_word, forth_word, next_word])
    else:
X
Xin Pan 已提交
101
        raise NotImplementedError()
Y
Yang Yu 已提交
102

Y
Yang Yu 已提交
103
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
104
    if use_bf16:
A
arlesniak 已提交
105 106 107 108 109 110 111
        sgd_optimizer = paddle.static.amp.bf16.decorate_bf16(
            sgd_optimizer,
            amp_lists=paddle.static.amp.bf16.AutoMixedPrecisionListsBF16(
                custom_fp32_list={'softmax', 'concat'}, ),
            use_bf16_guard=False,
            use_pure_bf16=pure_bf16)

112
    sgd_optimizer.minimize(avg_cost, fluid.default_startup_program())
Y
Yang Yu 已提交
113

114 115
    train_reader = paddle.batch(paddle.dataset.imikolov.train(word_dict, N),
                                BATCH_SIZE)
Y
Yang Yu 已提交
116

117
    place = get_place(target)
Y
Yang Yu 已提交
118 119 120 121 122
    exe = fluid.Executor(place)
    feeder = fluid.DataFeeder(
        feed_list=[first_word, second_word, third_word, forth_word, next_word],
        place=place)

武毅 已提交
123 124
    def train_loop(main_program):
        exe.run(fluid.default_startup_program())
A
arlesniak 已提交
125 126
        if pure_bf16:
            sgd_optimizer.amp_init(exe.place)
武毅 已提交
127 128 129 130 131 132 133

        for pass_id in range(PASS_NUM):
            for data in train_reader():
                avg_cost_np = exe.run(main_program,
                                      feed=feeder.feed(data),
                                      fetch_list=[avg_cost])
                if avg_cost_np[0] < 5.0:
A
arlesniak 已提交
134
                    if save_dirname is not None and not pure_bf16:
135 136 137 138
                        fluid.io.save_inference_model(
                            save_dirname,
                            ['firstw', 'secondw', 'thirdw', 'forthw'],
                            [predict_word], exe)
武毅 已提交
139 140 141 142 143 144 145 146 147
                    return
                if math.isnan(float(avg_cost_np[0])):
                    sys.exit("got NaN loss, training failed.")

        raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0]))

    if is_local:
        train_loop(fluid.default_main_program())
    else:
G
gongweibao 已提交
148 149
        port = os.getenv("PADDLE_PSERVER_PORT", "6174")
        pserver_ips = os.getenv("PADDLE_PSERVER_IPS")  # ip,ip...
武毅 已提交
150 151 152 153
        eplist = []
        for ip in pserver_ips.split(","):
            eplist.append(':'.join([ip, port]))
        pserver_endpoints = ",".join(eplist)  # ip:port,ip:port...
G
gongweibao 已提交
154
        trainers = int(os.getenv("PADDLE_TRAINERS"))
武毅 已提交
155
        current_endpoint = os.getenv("POD_IP") + ":" + port
G
gongweibao 已提交
156 157
        trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
武毅 已提交
158
        t = fluid.DistributeTranspiler()
Y
Yancey1989 已提交
159
        t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
武毅 已提交
160 161 162 163 164 165 166 167
        if training_role == "PSERVER":
            pserver_prog = t.get_pserver_program(current_endpoint)
            pserver_startup = t.get_startup_program(current_endpoint,
                                                    pserver_prog)
            exe.run(pserver_startup)
            exe.run(pserver_prog)
        elif training_role == "TRAINER":
            train_loop(t.get_trainer_program())
Y
Yang Yu 已提交
168 169


170
def infer(target, save_dirname=None):
L
Liu Yiqun 已提交
171 172 173
    if save_dirname is None:
        return

174
    place = get_place(target)
L
Liu Yiqun 已提交
175
    exe = fluid.Executor(place)
176 177 178
    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
T
tianshuo78520a 已提交
179
        # the feed_target_names (the names of variables that will be fed
180 181 182 183 184 185 186 187
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        word_dict = paddle.dataset.imikolov.build_dict()
        dict_size = len(word_dict)

188 189 190 191 192
        # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
        # is simply an index to look up for the corresponding word vector and hence
        # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
        # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
        # meaning there is only one level of detail and there is only one sequence of
193 194 195
        # one word on this level.
        # Note that recursive_sequence_lengths should be a list of lists.
        recursive_seq_lens = [[1]]
196
        base_shape = [1]
K
Kexin Zhao 已提交
197
        # The range of random integers is [low, high]
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
        first_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                       base_shape,
                                                       place,
                                                       low=0,
                                                       high=dict_size - 1)
        second_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                        base_shape,
                                                        place,
                                                        low=0,
                                                        high=dict_size - 1)
        third_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                       base_shape,
                                                       place,
                                                       low=0,
                                                       high=dict_size - 1)
        fourth_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                        base_shape,
                                                        place,
                                                        low=0,
                                                        high=dict_size - 1)
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

        assert feed_target_names[0] == 'firstw'
        assert feed_target_names[1] == 'secondw'
        assert feed_target_names[2] == 'thirdw'
        assert feed_target_names[3] == 'forthw'

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: first_word,
                              feed_target_names[1]: second_word,
                              feed_target_names[2]: third_word,
                              feed_target_names[3]: fourth_word
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
F
flame 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247

        def to_infer_tensor(lod_tensor):
            infer_tensor = fluid.core.PaddleTensor()
            infer_tensor.lod = lod_tensor.lod()
            infer_tensor.data = fluid.core.PaddleBuf(np.array(lod_tensor))
            infer_tensor.shape = lod_tensor.shape()
            infer_tensor.dtype = fluid.core.PaddleDType.INT64
            return infer_tensor

        infer_inputs = [first_word, second_word, third_word, fourth_word]
        infer_inputs = [to_infer_tensor(t) for t in infer_inputs]

        infer_config = fluid.core.NativeConfig()
248
        infer_config.model_dir = save_dirname
249 250
        if target == "cuda":
            infer_config.use_gpu = True
F
flame 已提交
251 252
            infer_config.device = 0
            infer_config.fraction_of_gpu_memory = 0.15
253 254
        elif target == "xpu":
            infer_config.use_xpu = True
F
flame 已提交
255
        compiled_program = fluid.compiler.CompiledProgram(inference_program)
F
flame 已提交
256
        compiled_program._with_inference_optimize(infer_config)
F
flame 已提交
257 258
        assert compiled_program._is_inference is True
        infer_outputs = exe.run(compiled_program, feed=infer_inputs)
259
        np_data = np.array(results[0])
F
flame 已提交
260 261
        infer_out = infer_outputs[0].data.float_data()
        for a, b in zip(np_data[0], infer_out):
262
            assert np.isclose(a, b, rtol=5e-5), "a: {}, b: {}".format(a, b)
L
Liu Yiqun 已提交
263 264


A
arlesniak 已提交
265
def main(target, is_sparse, is_parallel, use_bf16, pure_bf16):
266 267 268
    if target == "cuda" and not fluid.core.is_compiled_with_cuda():
        return
    if target == "xpu" and not fluid.core.is_compiled_with_xpu():
269
        return
L
Liu Yiqun 已提交
270

271 272 273
    if use_bf16 and not fluid.core.is_compiled_with_mkldnn():
        return

274
    temp_dir = tempfile.TemporaryDirectory()
L
Liu Yiqun 已提交
275
    if not is_parallel:
276
        save_dirname = os.path.join(temp_dir.name, "word2vec.inference.model")
L
Liu Yiqun 已提交
277 278 279
    else:
        save_dirname = None

280 281 282 283 284
    if target == "xpu":
        # This model cannot be trained with xpu temporarily,
        # so only inference is turned on.
        train("cpu", is_sparse, is_parallel, save_dirname)
    else:
285 286 287 288 289 290
        train(target,
              is_sparse,
              is_parallel,
              save_dirname,
              use_bf16=use_bf16,
              pure_bf16=pure_bf16)
291
    infer(target, save_dirname)
292
    temp_dir.cleanup()
293 294


Y
Yang Yu 已提交
295
FULL_TEST = os.getenv('FULL_TEST',
Y
Yang Yu 已提交
296
                      '0').lower() in ['true', '1', 't', 'y', 'yes', 'on']
Y
Yang Yu 已提交
297
SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster"
Y
Yang Yu 已提交
298 299 300


class W2VTest(unittest.TestCase):
Y
Yang Yu 已提交
301 302 303
    pass


A
arlesniak 已提交
304 305 306 307 308
def inject_test_method(target,
                       is_sparse,
                       is_parallel,
                       use_bf16=False,
                       pure_bf16=False):
309 310 311 312
    fn_name = "test_{0}_{1}_{2}{3}".format(
        target, "sparse" if is_sparse else "dense",
        "parallel" if is_parallel else "normal",
        "_purebf16" if pure_bf16 else "_bf16" if use_bf16 else "")
Y
Yang Yu 已提交
313 314 315 316 317 318 319

    def __impl__(*args, **kwargs):
        prog = fluid.Program()
        startup_prog = fluid.Program()
        scope = fluid.core.Scope()
        with fluid.scope_guard(scope):
            with fluid.program_guard(prog, startup_prog):
A
arlesniak 已提交
320
                main(target, is_sparse, is_parallel, use_bf16, pure_bf16)
Y
Yang Yu 已提交
321

322 323
    if (not fluid.core.is_compiled_with_cuda()
            or target == "cuda") and is_sparse:
Y
Yang Yu 已提交
324 325 326
        fn = __impl__
    else:
        # skip the other test when on CI server
327 328
        fn = unittest.skipUnless(condition=FULL_TEST,
                                 reason=SKIP_REASON)(__impl__)
Y
Yang Yu 已提交
329 330

    setattr(W2VTest, fn_name, fn)
Y
Yang Yu 已提交
331 332


333
for target in ("cuda", "cpu", "xpu"):
Y
Yang Yu 已提交
334
    for is_sparse in (False, True):
X
fix  
Xin Pan 已提交
335
        for is_parallel in (False, ):
336
            inject_test_method(target, is_sparse, is_parallel)
A
arlesniak 已提交
337 338
inject_test_method("cpu", False, False, True)
inject_test_method("cpu", False, False, True, True)
Y
Yang Yu 已提交
339 340 341

if __name__ == '__main__':
    unittest.main()