test_dist_transpiler.py 49.3 KB
Newer Older
Y
Yancey 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import functools
16 17
import gc
import math
18
import unittest
19

20
import numpy as np
T
tangwei12 已提交
21

22 23
gc.set_debug(gc.DEBUG_COLLECTABLE)

24
import paddle
25
from paddle import fluid
26

Y
Yancey 已提交
27

W
Wu Yi 已提交
28
class TranspilerTest(unittest.TestCase):
Y
Yancey 已提交
29
    def setUp(self):
W
Wu Yi 已提交
30 31 32 33 34 35 36 37 38 39 40
        self.trainer_id = 0
        self.trainers = 2
        self.pservers = 2
        # NOTE: we do not actually bind this port
        self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
        self.pserver1_ep = "127.0.0.1:6174"
        self.pserver2_ep = "127.0.0.1:6175"
        self.sync_mode = True
        self.transpiler = None

    def net_conf(self):
G
GGBond8488 已提交
41
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
42 43
        y_predict = paddle.static.nn.fc(
            x,
44
            size=1000,
C
Charles-hit 已提交
45
            weight_attr=fluid.ParamAttr(name='fc_w'),
46 47
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
48
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
49
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
50
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
51 52 53 54 55
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
        sgd_optimizer.minimize(avg_cost)

    def get_main_program(self):
        main = fluid.Program()
56
        main.random_seed = 1
W
Wu Yi 已提交
57 58 59 60 61
        with fluid.program_guard(main):
            self.net_conf()
        self.origin_prog = main.clone()
        return main

1
123malin 已提交
62
    def get_trainer(self, config=None, sync_mode=True):
G
gongweibao 已提交
63 64
        src = fluid.default_startup_program().clone()

1
123malin 已提交
65
        t = self._transpiler_instance(config, sync_mode=True)
G
gongweibao 已提交
66

W
Wu Yi 已提交
67
        trainer_main = t.get_trainer_program(wait_port=False)
G
gongweibao 已提交
68 69
        trainer_startup = fluid.default_startup_program()

70 71
        assert src.num_blocks == 1
        assert trainer_startup.num_blocks == src.num_blocks
G
gongweibao 已提交
72 73

        return trainer_main, trainer_startup
W
Wu Yi 已提交
74

Q
qiaolongfei 已提交
75 76
    def get_pserver(self, ep, config=None, sync_mode=True):
        t = self._transpiler_instance(config, sync_mode)
W
Wu Yi 已提交
77 78 79 80
        pserver = t.get_pserver_program(ep)
        startup = t.get_startup_program(ep, pserver)
        return pserver, startup

Q
qiaolongfei 已提交
81
    def _transpiler_instance(self, config=None, sync_mode=True):
W
Wu Yi 已提交
82 83
        if not self.transpiler:
            main = self.get_main_program()
84 85 86 87 88
            self.transpiler = (
                paddle.distributed.transpiler.DistributeTranspiler(
                    config=config
                )
            )
89 90 91 92 93 94 95
            self.transpiler.transpile(
                self.trainer_id,
                program=main,
                pservers=self.pserver_eps,
                trainers=self.trainers,
                sync_mode=sync_mode,
            )
G
gongweibao 已提交
96

W
Wu Yi 已提交
97
        return self.transpiler
Y
Yancey 已提交
98

Q
qiaolongfei 已提交
99 100
    def transpiler_test_impl(self):
        pass
W
Wu Yi 已提交
101

Y
Yancey 已提交
102
    def test_transpiler(self):
Q
qiaolongfei 已提交
103 104
        main = fluid.Program()
        startup = fluid.Program()
T
tangwei12 已提交
105 106 107
        with fluid.unique_name.guard():
            with fluid.program_guard(main, startup):
                self.transpiler_test_impl()
108 109 110 111 112 113
        # NOTE: run gc.collect to eliminate pybind side objects to
        # prevent random double-deallocate when inherited in python.
        del self.transpiler
        del main
        del startup
        gc.collect()
Q
qiaolongfei 已提交
114 115 116 117


class TestBasicModel(TranspilerTest):
    def transpiler_test_impl(self):
W
Wu Yi 已提交
118 119 120
        pserver, startup = self.get_pserver(self.pserver1_ep)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep)

G
gongweibao 已提交
121 122
        trainer, trainer_startup = self.get_trainer()

T
tianshuo78520a 已提交
123
        # split var blocks should be in startup program
G
gongweibao 已提交
124 125 126 127 128 129 130 131
        self.assertTrue("fc_w.block0" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w.block1" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w" in trainer_startup.global_block().vars)
        self.assertTrue("fc_b" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w@GRAD" not in trainer_startup.global_block().vars)
        self.assertTrue("fc_b@GRAD" not in trainer_startup.global_block().vars)

        src = [op.type for op in trainer_startup.global_block().ops]
132 133 134 135 136 137 138 139 140
        dst = [
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
        ]
G
gongweibao 已提交
141 142

        self.assertEqual(src, dst)
W
Wu Yi 已提交
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
        self.assertEqual(
            [op.type for op in trainer.global_block().ops],
            [
                'mul',
                'elementwise_add',
                'elementwise_sub',
                'square',
                'mean',
                'fill_constant',
                'mean_grad',
                'square_grad',
                'elementwise_sub_grad',
                'elementwise_add_grad',
                'send',
                'mul_grad',
                'split_byref',
                'send',
                'send_barrier',
                'recv',
                'recv',
                'fetch_barrier',
                'concat',
            ],
        )
Y
Yancey 已提交
168 169 170

        self.assertEqual(len(pserver.blocks), 3)
        # block0: listen_and_serv
171 172 173
        self.assertEqual(
            [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]
        )
W
Wu Yi 已提交
174
        # block1~2: optimize pass
175 176 177
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]
        )
Y
Yancey 已提交
178
        # confirm startup program
179 180 181 182
        self.assertEqual(
            [op.type for op in startup.global_block().ops],
            ["fill_constant", "fill_constant", "uniform_random"],
        )
Y
Yancey1989 已提交
183
        # the variable #fc_w will be split into two blocks
Y
Yancey 已提交
184 185
        fc_w_var = startup.global_block().var("fc_w.block1")
        self.assertEqual(fc_w_var.shape, (500, 1000))
W
Wu Yi 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
        # all parameters should be optimized on pserver

        pserver_params = []
        for prog in [pserver, pserver2]:
            for blk in prog.blocks:
                for op in blk.ops:
                    if "Param" in op.input_names:
                        param_name = op.input("Param")[0]
                        is_block_idx = param_name.find(".block")
                        if is_block_idx != -1:
                            origin_param_name = param_name[:is_block_idx]
                        else:
                            origin_param_name = param_name
                        pserver_params.append(origin_param_name)
        trainer_params = []
        for op in self.origin_prog.global_block().ops:
            if "Param" in op.input_names:
                trainer_params.append(op.input("Param")[0])
        self.assertEqual(set(pserver_params), set(trainer_params))


G
gongweibao 已提交
207
class TestBasicModelWithLargeBlockSize(TranspilerTest):
Q
qiaolongfei 已提交
208
    def transpiler_test_impl(self):
209
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
G
gongweibao 已提交
210 211 212 213 214
        config.min_block_size = 1048576

        pserver, startup = self.get_pserver(self.pserver1_ep, config)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep, config)

G
gongweibao 已提交
215
        trainer, _ = self.get_trainer(config)
G
gongweibao 已提交
216

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
        self.assertEqual(
            [op.type for op in trainer.global_block().ops],
            [
                'mul',
                'elementwise_add',
                'elementwise_sub',
                'square',
                'mean',
                'fill_constant',
                'mean_grad',
                'square_grad',
                'elementwise_sub_grad',
                'elementwise_add_grad',
                'send',
                'mul_grad',
                'send',
                'send_barrier',
                'recv',
                'recv',
                'fetch_barrier',
            ],
        )
G
gongweibao 已提交
239 240 241

        self.assertEqual(len(pserver.blocks), 2)
        # block0: listen_and_serv
242 243 244
        self.assertEqual(
            [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]
        )
G
gongweibao 已提交
245
        # block1~2: optimize pass
246 247 248
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]
        )
G
gongweibao 已提交
249
        # confirm startup program
250 251 252 253
        self.assertEqual(
            [op.type for op in startup.global_block().ops],
            ["fill_constant", "fill_constant"],
        )
G
gongweibao 已提交
254 255
        # the variable #fc_w will be split into two blocks
        fc_w_var = startup2.global_block().var("fc_w")
256
        self.assertEqual(fc_w_var.shape, (1000, 1000))
G
gongweibao 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
        # all parameters should be optimized on pserver

        pserver_params = []
        for prog in [pserver, pserver2]:
            for blk in prog.blocks:
                for op in blk.ops:
                    if "Param" in op.input_names:
                        param_name = op.input("Param")[0]
                        is_block_idx = param_name.find(".block")
                        if is_block_idx != -1:
                            origin_param_name = param_name[:is_block_idx]
                        else:
                            origin_param_name = param_name
                        pserver_params.append(origin_param_name)
        trainer_params = []
        for op in self.origin_prog.global_block().ops:
            if "Param" in op.input_names:
                trainer_params.append(op.input("Param")[0])
        self.assertEqual(set(pserver_params), set(trainer_params))


W
Wu Yi 已提交
278 279
class TestNoSliceVar(TranspilerTest):
    def setUp(self):
280
        super().setUp()
W
Wu Yi 已提交
281

Q
qiaolongfei 已提交
282
    def transpiler_test_impl(self):
283
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
G
gongweibao 已提交
284 285 286 287
        config.slice_var_up = False

        _, startup = self.get_pserver(self.pserver1_ep, config)
        _, startup2 = self.get_pserver(self.pserver2_ep, config)
W
Wu Yi 已提交
288

289
        if "fc_w" in startup.global_block().vars:
W
Wu Yi 已提交
290
            fc_w_var = startup.global_block().vars["fc_w"]
291
        elif "fc_w" in startup2.global_block().vars:
W
Wu Yi 已提交
292 293 294
            fc_w_var = startup2.global_block().vars["fc_w"]

        self.assertEqual(fc_w_var.shape, (1000, 1000))
Y
Yancey 已提交
295 296


W
Wu Yi 已提交
297 298
class TestLRDecay(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
299
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
300 301
        y_predict = paddle.static.nn.fc(
            x,
302
            size=1000,
C
Charles-hit 已提交
303
            weight_attr=fluid.ParamAttr(name='fc_w'),
304 305
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
306
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
307
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
308
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
309
        sgd_optimizer = fluid.optimizer.SGD(
310
            learning_rate=paddle.optimizer.lr.ExponentialDecay(
311
                learning_rate=1.0,
312
                gamma=0.1,
313 314
            )
        )
W
Wu Yi 已提交
315 316
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
317
    def transpiler_test_impl(self):
W
Wu Yi 已提交
318
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
319
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
320 321 322

        self.assertEqual(len(pserver.blocks), 4)
        lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
323 324 325 326 327 328 329 330 331 332 333 334 335 336
        self.assertEqual(
            lr_decay_ops,
            [
                "increment",
                "cast",
                "fill_constant",
                "elementwise_div",
                "floor",
                "fill_constant",
                "elementwise_pow",
                "fill_constant",
                "elementwise_mul",
            ],
        )
W
Wu Yi 已提交
337 338


T
tangwei12 已提交
339 340 341 342
class TestFakeInit(TranspilerTest):
    def net_conf(self):
        dict_size, embedding_size, neg_num = 10000, 8, 5

G
GGBond8488 已提交
343 344
        input_word = paddle.static.data(
            name="input_word", shape=[-1, 1], dtype='int64', lod_level=1
345
        )
G
GGBond8488 已提交
346 347
        true_word = paddle.static.data(
            name='true_label', shape=[-1, 1], dtype='int64', lod_level=1
348
        )
G
GGBond8488 已提交
349 350
        neg_word = paddle.static.data(
            name="neg_label", shape=[-1, 1], dtype='int64', lod_level=1
351
        )
T
tangwei12 已提交
352 353 354 355 356 357 358
        inputs = [input_word, true_word, neg_word]

        init_width = 0.5 / embedding_size
        input_emb = fluid.layers.embedding(
            input=inputs[0],
            is_sparse=True,
            size=[dict_size, embedding_size],
359 360
            param_attr=fluid.ParamAttr(
                name='emb',
361 362 363
                initializer=paddle.nn.initializer.Uniform(
                    -init_width, init_width
                ),
364 365
            ),
        )
T
tangwei12 已提交
366 367 368 369 370 371

        true_emb_w = fluid.layers.embedding(
            input=inputs[1],
            is_sparse=True,
            size=[dict_size, embedding_size],
            param_attr=fluid.ParamAttr(
372 373
                name='emb_w',
                initializer=paddle.nn.initializer.Constant(value=0.0),
374 375
            ),
        )
T
tangwei12 已提交
376 377 378 379 380 381

        true_emb_b = fluid.layers.embedding(
            input=inputs[1],
            is_sparse=True,
            size=[dict_size, 1],
            param_attr=fluid.ParamAttr(
382 383
                name='emb_b',
                initializer=paddle.nn.initializer.Constant(value=0.0),
384 385
            ),
        )
T
tangwei12 已提交
386

387
        neg_word_reshape = paddle.reshape(inputs[2], shape=[-1, 1])
T
tangwei12 已提交
388 389
        neg_word_reshape.stop_gradient = True

390 391 392 393 394 395
        neg_emb_w = fluid.layers.embedding(
            input=neg_word_reshape,
            is_sparse=True,
            size=[dict_size, embedding_size],
            param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0),
        )
T
tangwei12 已提交
396

397
        neg_emb_w_re = paddle.reshape(
398 399
            neg_emb_w, shape=[-1, neg_num, embedding_size]
        )
T
tangwei12 已提交
400

401 402 403 404 405 406
        neg_emb_b = fluid.layers.embedding(
            input=neg_word_reshape,
            is_sparse=True,
            size=[dict_size, 1],
            param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0),
        )
T
tangwei12 已提交
407

408
        neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num])
T
tangwei12 已提交
409

410
        true_logits = paddle.add(
411
            paddle.sum(
412
                paddle.multiply(input_emb, true_emb_w),
413 414 415 416 417 418
                dim=1,
                keep_dim=True,
            ),
            true_emb_b,
        )

419
        input_emb_re = paddle.reshape(input_emb, shape=[-1, 1, embedding_size])
420

K
kangguangli 已提交
421
        neg_matmul = paddle.matmul(input_emb_re, neg_emb_w_re, transpose_y=True)
422
        neg_matmul_re = paddle.reshape(neg_matmul, shape=[-1, neg_num])
423
        neg_logits = paddle.add(neg_matmul_re, neg_emb_b_vec)
T
tangwei12 已提交
424
        # nce loss
425 426 427
        label_ones = fluid.layers.fill_constant_batch_size_like(
            true_logits, shape=[-1, 1], value=1.0, dtype='float32'
        )
T
tangwei12 已提交
428
        label_zeros = fluid.layers.fill_constant_batch_size_like(
429 430
            true_logits, shape=[-1, neg_num], value=0.0, dtype='float32'
        )
T
tangwei12 已提交
431

432
        true_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
433 434
            true_logits, label_ones
        )
435
        neg_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
436 437
            neg_logits, label_zeros
        )
438
        cost = paddle.add(
439 440
            paddle.sum(true_xent, axis=1),
            paddle.sum(neg_xent, axis=1),
441
        )
442
        avg_cost = paddle.mean(cost)
T
tangwei12 已提交
443 444

        sgd_optimizer = fluid.optimizer.SGD(
445
            learning_rate=paddle.optimizer.lr.ExponentialDecay(
446
                learning_rate=1.0,
447
                gamma=0.1,
448 449
            )
        )
T
tangwei12 已提交
450 451 452 453 454 455 456 457 458 459 460 461 462
        sgd_optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        trainer, startup = self.get_trainer()

        fake_init_ops = []
        for op in startup.global_block().ops:
            if op.type == "fake_init":
                fake_init_ops.append(op)

        self.assertEqual(len(fake_init_ops), 3)


463 464
class TestDecayedAdagrad(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
465
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
466 467
        y_predict = paddle.static.nn.fc(
            x,
468
            size=1000,
C
Charles-hit 已提交
469
            weight_attr=fluid.ParamAttr(name='fc_w'),
470 471
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
472
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
473
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
474
        avg_cost = paddle.mean(cost)
475 476 477 478 479 480 481 482
        opt = fluid.optimizer.DecayedAdagrad(learning_rate=0.1)
        opt.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        trainer, _ = self.get_trainer()


483 484
class TestFtrl(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
485
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
486 487
        y_predict = paddle.static.nn.fc(
            x,
488
            size=1000,
C
Charles-hit 已提交
489
            weight_attr=fluid.ParamAttr(name='fc_w'),
490 491
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
492
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
493
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
494
        avg_cost = paddle.mean(cost)
495 496 497 498 499 500 501 502
        opt = fluid.optimizer.Ftrl(learning_rate=0.1)
        opt.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        trainer, _ = self.get_trainer()


W
Wu Yi 已提交
503 504
class TestLRDecayConditional(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
505
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
506 507
        y_predict = paddle.static.nn.fc(
            x,
508
            size=1000,
C
Charles-hit 已提交
509
            weight_attr=fluid.ParamAttr(name='fc_w'),
510 511
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
512
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
513
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
514
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
515
        sgd_optimizer = fluid.optimizer.SGD(
516 517 518 519
            learning_rate=fluid.layers.piecewise_decay(
                [10000, 20000], [1.0, 0.5, 1.0]
            )
        )
W
Wu Yi 已提交
520 521
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
522
    def transpiler_test_impl(self):
W
Wu Yi 已提交
523
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
524
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
525 526 527 528

        serv_op = pserver.blocks[0].ops[0]
        sub_blocks = []
        optimize_blocks = []
G
gongweibao 已提交
529
        for b in serv_op.all_attrs()["optimize_blocks"]:
W
Wu Yi 已提交
530 531 532 533 534 535 536
            optimize_blocks.append(b.idx)
        for b in pserver.blocks:
            if b.idx not in optimize_blocks:
                sub_blocks.append(b.idx)

        self.assertEqual(len(pserver.blocks), 7)
        lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
        self.assertEqual(
            lr_decay_ops,
            [
                "increment",
                "cast",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "conditional_block",
            ],
        )
W
Wu Yi 已提交
558 559 560 561 562 563 564 565 566 567
        # test the condition blocks
        for b in sub_blocks:
            if b == 0:
                continue
            block = pserver.blocks[b]
            self.assertEqual([op.type for op in block.ops], ["assign"])


class TestL2Decay(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
568
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
569 570
        y_predict = paddle.static.nn.fc(
            x,
W
Wu Yi 已提交
571
            size=1000,
C
Charles-hit 已提交
572
            weight_attr=fluid.ParamAttr(
573
                name='fc_w', regularizer=paddle.regularizer.L2Decay()
574 575 576
            ),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
577
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
578
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
579
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
580
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
581 582 583 584

        def filter(param):
            return param.name == "fc_w"

585
        clip = paddle.nn.ClipGradByValue(0.1, need_clip=filter)
586
        sgd_optimizer.minimize(avg_cost, grad_clip=clip)
W
Wu Yi 已提交
587

Q
qiaolongfei 已提交
588
    def transpiler_test_impl(self):
W
Wu Yi 已提交
589
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
590
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
591 592

        self.assertEqual(len(pserver.blocks), 3)
593 594 595 596 597 598 599 600
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            ["sum", "scale", "clip", "sgd"],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[2].ops],
            ["sum", "scale", "clip", "scale", "sum", "sgd"],
        )
W
Wu Yi 已提交
601 602
        # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer

Y
Yancey 已提交
603

T
typhoonzero 已提交
604 605
class TestL2DecayWithPiecewise(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
606
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
607 608
        y_predict = paddle.static.nn.fc(
            x,
609
            size=1000,
C
Charles-hit 已提交
610
            weight_attr=fluid.ParamAttr(name='fc_w'),
611 612
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
613
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
614
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
615
        avg_cost = paddle.mean(cost)
T
typhoonzero 已提交
616 617 618 619
        base_lr = 1.0
        bd = [1, 10, 20, 30]
        lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
        sgd_optimizer = fluid.optimizer.Momentum(
620 621 622
            learning_rate=fluid.layers.piecewise_decay(
                boundaries=bd, values=lr
            ),
T
typhoonzero 已提交
623
            momentum=0.9,
624
            regularization=paddle.regularizer.L2Decay(1e-4),
625
        )
T
typhoonzero 已提交
626 627
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
628
    def transpiler_test_impl(self):
T
typhoonzero 已提交
629
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
630
        trainer, _ = self.get_trainer()
T
typhoonzero 已提交
631 632

        self.assertEqual(len(pserver.blocks), 9)
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            [
                "increment",
                "cast",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "conditional_block",
            ],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[7].ops],
            ["sum", "scale", "scale", "sum", "momentum"],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[8].ops],
            ["sum", "scale", "scale", "sum", "momentum"],
        )
Y
Yancey 已提交
676 677


Q
Qiao Longfei 已提交
678 679
class TestEmptyPserverOptimizeBlocks(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
680
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
Q
Qiao Longfei 已提交
681
        # only one parameter
C
Charles-hit 已提交
682 683
        y_predict = paddle.static.nn.fc(
            x,
684
            size=1000,
C
Charles-hit 已提交
685
            weight_attr=fluid.ParamAttr(name='fc_w'),
686 687
            bias_attr=False,
        )
G
GGBond8488 已提交
688
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
689
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
690
        avg_cost = paddle.mean(cost)
Q
Qiao Longfei 已提交
691 692 693 694
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=1.0)
        sgd_optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
695
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
Q
Qiao Longfei 已提交
696 697 698 699 700 701 702 703
        config.slice_var_up = False

        pserver, startup = self.get_pserver(ep=self.pserver2_ep, config=config)

        self.assertEqual(len(pserver.blocks), 2)
        self.assertEqual(len(pserver.blocks[1].ops), 0)


704
class TestDistLookupTableBase(TranspilerTest):
Q
Qiao Longfei 已提交
705
    def network_with_table(self, is_sparse, is_distributed):
T
tangwei12 已提交
706 707
        self.table_size = 1000
        self.emb_size = 64
T
tangwei12 已提交
708
        self.lookup_table_name = 'shared_w'
T
tangwei12 已提交
709

Q
Qiao Longfei 已提交
710
        def emb_pool(ids, table_name, is_distributed):
711 712 713 714 715 716 717 718
            emb = fluid.layers.embedding(
                input=ids,
                size=[self.table_size, self.emb_size],
                dtype='float32',
                param_attr=table_name,
                is_sparse=is_sparse,
                is_distributed=is_distributed,
            )
719 720 721
            pool = paddle.static.nn.sequence_lod.sequence_pool(
                input=emb, pool_type='average'
            )
722 723
            return pool

G
GGBond8488 已提交
724 725
        title_ids = paddle.static.data(
            name='title_ids', shape=[-1, 1], dtype='int64', lod_level=1
726
        )
G
GGBond8488 已提交
727 728
        brand_ids = paddle.static.data(
            name='brand_ids', shape=[-1, 1], dtype='int64', lod_level=1
729
        )
G
GGBond8488 已提交
730 731
        profile_ids = paddle.static.data(
            name='brand_ids', shape=[-1, 1], dtype='int64', lod_level=1
732
        )
Q
Qiao Longfei 已提交
733 734 735
        title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed)
        brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed)
        profile_emb = emb_pool(profile_ids, "profile_emb", False)
736
        fc0 = paddle.concat([title_emb, brand_emb, profile_emb], axis=1)
C
Charles-hit 已提交
737 738
        predict = paddle.static.nn.fc(
            x=fc0,
739
            size=2,
C
Charles-hit 已提交
740
            weight_attr=fluid.ParamAttr(name='fc_w'),
741 742
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
743

G
GGBond8488 已提交
744
        label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
745 746 747
        cost = paddle.nn.functional.cross_entropy(
            input=predict, label=label, reduction='none', use_softmax=False
        )
748
        avg_cost = paddle.mean(cost)
749 750 751 752
        optimizer = fluid.optimizer.Adam(learning_rate=0.003)
        optimizer.minimize(avg_cost)


Q
qiaolongfei 已提交
753 754 755 756 757 758 759
class TestLocalLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

760
        self.assertEqual(len(pserver1.blocks), 4)
Q
qiaolongfei 已提交
761 762
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
763 764 765 766
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
767 768
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
769 770 771 772
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
773

774 775
        # 3 optimize for table 2 adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
776 777 778 779
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
780

G
gongweibao 已提交
781
        trainer, _ = self.get_trainer()
Q
qiaolongfei 已提交
782 783
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
qiaolongfei 已提交
818 819 820 821
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


822 823 824 825 826 827 828
class TestDistLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

829
        self.assertEqual(len(pserver1.blocks), 6)
830 831
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
832 833 834 835
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
836
        # 4 prefetch -> lookup_sparse_table_read for data0
837 838 839 840
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
841
        # 2 optimize for table sgd
842 843 844
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops], ["sum", "sgd"]
        )
845
        # 3 prefetch -> lookup_sparse_table_read for data0
846 847 848 849
        self.assertEqual(
            [op.type for op in pserver1.blocks[4].ops],
            ["lookup_sparse_table_read"],
        )
Q
Qiao Longfei 已提交
850 851 852 853 854 855
        # 5 save table
        self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])

        trainer, trainer_startup = self.get_trainer()
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
            'split_ids',
            'prefetch',
            'merge_ids',
            'sequence_pool',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_ids',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
Qiao Longfei 已提交
891 892 893
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
        startup_ops = [
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'uniform_random',
            'recv',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
            'fake_init',
Q
Qiao Longfei 已提交
916
        ]
917 918 919
        self.assertEqual(
            [op.type for op in trainer_startup.blocks[0].ops], startup_ops
        )
Q
Qiao Longfei 已提交
920 921


Q
qiaolongfei 已提交
922 923 924 925 926
class TestAsyncLocalLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
927
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
Q
qiaolongfei 已提交
928
        pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
Q
qiaolongfei 已提交
929

930
        self.assertEqual(len(pserver1.blocks), 4)
Q
qiaolongfei 已提交
931 932
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
933 934 935 936
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
937 938
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
939 940 941 942
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["adam", "scale", "scale"],
        )
943 944
        # 3 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
945 946 947 948
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
949

G
gongweibao 已提交
950
        trainer, _ = self.get_trainer(config)
Q
qiaolongfei 已提交
951 952
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'recv',
            'recv',
Q
qiaolongfei 已提交
985 986 987 988
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


Q
qiaolongfei 已提交
989 990 991 992 993
class TestAsyncDistLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
994
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
Q
qiaolongfei 已提交
995

Q
qiaolongfei 已提交
996
        pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
Q
qiaolongfei 已提交
997

998
        self.assertEqual(len(pserver1.blocks), 6)
Q
qiaolongfei 已提交
999 1000
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
1001 1002 1003 1004
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["adam", "scale", "scale"],
        )
1005
        # 2 optimize for table adam
1006 1007 1008 1009
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["adam", "scale", "scale"],
        )
1010 1011
        # 3 optimize for table sgd
        self.assertEqual([op.type for op in pserver1.blocks[3].ops], ["sgd"])
1012
        # 4 prefetch -> lookup_sparse_table_read for data0
1013 1014 1015 1016
        self.assertEqual(
            [op.type for op in pserver1.blocks[4].ops],
            ["lookup_sparse_table_read"],
        )
1017 1018
        # 5 save table
        self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
Q
qiaolongfei 已提交
1019

Q
Qiao Longfei 已提交
1020
        trainer, trainer_startup = self.get_trainer(config)
Q
qiaolongfei 已提交
1021 1022
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
            'split_ids',
            'prefetch',
            'merge_ids',
            'sequence_pool',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_ids',
            'send',
            'recv',
            'recv',
Q
Qiao Longfei 已提交
1056
        ]
Q
qiaolongfei 已提交
1057
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
Q
Qiao Longfei 已提交
1058
        startup_ops = [
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'uniform_random',
            'recv',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
            'fake_init',
Q
Qiao Longfei 已提交
1081
        ]
1082 1083 1084
        self.assertEqual(
            [op.type for op in trainer_startup.blocks[0].ops], startup_ops
        )
Q
qiaolongfei 已提交
1085 1086


T
tangwei12 已提交
1087
class TestDistLookupTableSliceSize(TestDistLookupTableBase):
T
tangwei12 已提交
1088 1089 1090 1091
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
1092
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
T
tangwei12 已提交
1093
        pserver1, _ = self.get_pserver(self.pserver1_ep, config)
T
tangwei12 已提交
1094 1095 1096

        self.assertTrue(self.transpiler.has_distributed_lookup_table)
        lookup_table_var = pserver1.global_block().vars[
1097 1098
            self.transpiler.table_name
        ]
T
tangwei12 已提交
1099 1100 1101
        row_size = lookup_table_var.shape[0]
        calc_row_size = int(math.ceil(self.table_size / self.pservers))
        self.assertEqual(row_size, calc_row_size)
T
tangwei12 已提交
1102 1103


T
tangwei12 已提交
1104 1105 1106 1107 1108 1109 1110 1111 1112
class TestDistArgsInProgram(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()

        self.assertTrue(trainer._is_distributed)
        self.assertTrue(trainer._is_chief)
1113 1114 1115 1116 1117 1118
        self.assertEqual(
            trainer._distributed_lookup_table, self.lookup_table_name
        )
        self.assertEqual(
            trainer._endpoints, [self.pserver1_ep, self.pserver2_ep]
        )
T
tangwei12 已提交
1119 1120


W
Wu Yi 已提交
1121 1122
class TestRMSPropOptimizer(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
1123
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
1124 1125
        y_predict = paddle.static.nn.fc(
            x,
1126
            size=1000,
C
Charles-hit 已提交
1127
            weight_attr=fluid.ParamAttr(name='fc_w'),
1128 1129
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
1130
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
1131
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
1132
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141
        optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
        optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep)

        self.assertEqual(len(pserver.blocks), 3)
        # block1~2: optimize pass
1142 1143 1144 1145
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            ["sum", "scale", "rmsprop"],
        )
W
Wu Yi 已提交
1146 1147 1148 1149 1150 1151 1152
        # the variable #fc_w will be split into two blocks
        fc_w_var = startup.global_block().var("fc_w.block1")
        self.assertEqual(fc_w_var.shape, (500, 1000))
        moment_var = startup.global_block().var("momentum_1")
        self.assertEqual(moment_var.shape, (500, 1000))


T
tangwei12 已提交
1153 1154
class TestLoadSliceVar(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
1155
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
1156 1157
        y_predict = paddle.static.nn.fc(
            x,
1158
            size=1000,
C
Charles-hit 已提交
1159
            weight_attr=fluid.ParamAttr(name='fc_w'),
1160 1161
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
1162
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
1163
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
1164
        avg_cost = paddle.mean(cost)
T
tangwei12 已提交
1165 1166 1167 1168 1169 1170 1171
        optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
        optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, _ = self.get_pserver(self.pserver1_ep)
        pserver2, _ = self.get_pserver(self.pserver2_ep)

1172
        vars_ps1 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
1173 1174
            self.pserver1_ep
        )
1175
        vars_ps2 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
1176 1177
            self.pserver2_ep
        )
1178 1179 1180 1181

        self.assertTrue(vars_ps1)
        self.assertTrue(vars_ps2)

1182
        for idx in range(len(vars_ps1)):
1183 1184 1185 1186 1187 1188
            total_numel = 0
            ps1_numel, ps2_numel = 0, 0

            ps1_var = vars_ps1[idx]

            if not ps1_var.is_slice:
1189 1190 1191 1192 1193 1194
                total_numel = functools.reduce(
                    lambda x, y: x * y, vars_ps1[idx].origin.shape
                )
                ps1_numel = functools.reduce(
                    lambda x, y: x * y, vars_ps1[idx].slice.shape
                )
1195 1196 1197 1198 1199 1200 1201
            else:
                ps2_var = None
                for var in vars_ps2:
                    if var.origin.name == ps1_var.origin.name:
                        ps2_var = var
                        break

1202 1203 1204 1205 1206 1207 1208 1209 1210
                total_numel = functools.reduce(
                    lambda x, y: x * y, ps1_var.origin.shape
                )
                ps1_numel = functools.reduce(
                    lambda x, y: x * y, ps1_var.slice.shape
                )
                ps2_numel = functools.reduce(
                    lambda x, y: x * y, ps2_var.slice.shape
                )
1211 1212

            self.assertEqual(total_numel, ps1_numel + ps2_numel)
T
tangwei12 已提交
1213 1214


W
Wu Yi 已提交
1215 1216
class TestNCCL2Transpile(TranspilerTest):
    def test_nccl2_transpile(self):
T
tangwei12 已提交
1217
        if fluid.core.is_compiled_with_cuda():  # test nccl2 only with cuda
J
JiabinYang 已提交
1218 1219 1220 1221 1222
            main = fluid.Program()
            startup = fluid.Program()
            with fluid.program_guard(main, startup):
                self.net_conf()

1223
            config = paddle.distributed.transpiler.DistributeTranspilerConfig()
J
JiabinYang 已提交
1224
            config.mode = "nccl2"
W
Wu Yi 已提交
1225
            config.wait_port = False
1226 1227 1228
            t = paddle.distributed.transpiler.DistributeTranspiler(
                config=config
            )
1229 1230 1231 1232 1233 1234
            t.transpile(
                0,
                trainers="127.0.0.1:6174,127.0.0.1:6175",
                current_endpoint="127.0.0.1:6174",
                startup_program=startup,
            )
J
JiabinYang 已提交
1235 1236 1237
            print([op.type for op in startup.global_block().ops])
            self.assertEqual(startup.global_block().ops[-1].type, "gen_nccl_id")
            self.assertIsNotNone(startup.global_block().vars.get("NCCLID"))
1238
            gc.collect()
J
JiabinYang 已提交
1239 1240
        else:
            pass
W
Wu Yi 已提交
1241 1242


Q
Qiao Longfei 已提交
1243 1244 1245
# test for remote prefetch
class TestRemoteLookupTable(TestDistLookupTableBase):
    def net_conf(self):
1246
        import os
1247

1248
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
Q
Qiao Longfei 已提交
1249
        self.network_with_table(is_sparse=True, is_distributed=False)
Q
Qiao Longfei 已提交
1250 1251 1252 1253 1254 1255 1256

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

        self.assertEqual(len(pserver1.blocks), 4)
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
1257 1258 1259 1260
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1261 1262
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
1263 1264 1265 1266
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1267 1268 1269

        # 3 optimize for table 2 adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
1270 1271 1272 1273
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1274 1275 1276 1277

        trainer, _ = self.get_trainer()
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
Qiao Longfei 已提交
1312 1313 1314 1315
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


1316 1317 1318 1319 1320 1321 1322
# test for remote prefetch
class TestRemoteNce(TestDistLookupTableBase):
    def network_with_table(self, is_sparse, is_distributed):
        num_total_classes = 20
        sampler = "uniform"
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')

G
GGBond8488 已提交
1323 1324 1325 1326
        input = paddle.static.data(
            name="input", shape=[-1, 10], dtype="float32"
        )
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
1327

1328 1329 1330 1331 1332 1333 1334
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='nce_w',
1335
                initializer=paddle.nn.initializer.Constant(),
1336 1337 1338 1339 1340 1341 1342 1343 1344
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 1],
                dtype='float32',
                name='nce_b',
1345
                initializer=paddle.nn.initializer.Constant(),
1346 1347 1348
            )
        )

1349
        cost = paddle.static.nn.nce(
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
            input=input,
            label=label,
            num_total_classes=num_total_classes,
            sampler=sampler,
            custom_dist=nid_freq_arr.tolist(),
            sample_weight=None,
            param_attr='nce_w',
            bias_attr='nce_b',
            seed=1,
            num_neg_samples=5,
            is_sparse=is_sparse,
        )
1362
        avg_cost = paddle.mean(cost)
1363 1364 1365 1366 1367 1368
        # optimizer
        optimizer = fluid.optimizer.Adam(learning_rate=0.003)
        optimizer.minimize(avg_cost)

    def net_conf(self):
        import os
1369

1370 1371 1372 1373 1374
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()
T
tangwei12 已提交
1375

1376 1377
        out_vars = ["nce_w"]
        in_vars = ["nce_b"]
T
tangwei12 已提交
1378 1379 1380

        recv_var_names = []

1381 1382
        for op in trainer.blocks[0].ops:
            if op.type == "recv":
T
tangwei12 已提交
1383 1384 1385 1386 1387 1388 1389
                for var in op.output("Out"):
                    recv_var_names.append(var)

        for out_var in out_vars:
            self.assertFalse(out_var in recv_var_names)
        for in_var in in_vars:
            self.assertTrue(in_var in recv_var_names)
1390 1391


J
JiabinYang 已提交
1392 1393 1394
# test for remote prefetch
class TestRemoteHsigmoid(TestDistLookupTableBase):
    def network_with_table(self, is_sparse, is_distributed):
1395
        num_total_classes = 3
J
JiabinYang 已提交
1396

G
GGBond8488 已提交
1397 1398 1399 1400
        input = paddle.static.data(name="input", shape=[-1, 1], dtype="float32")
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
        path_table = paddle.static.data(
            name='path_table', shape=[-1, 3], dtype='int64'
1401
        )
G
GGBond8488 已提交
1402 1403
        path_code = paddle.static.data(
            name='path_code', shape=[-1, 3], dtype='int64'
1404 1405 1406 1407 1408 1409 1410 1411
        )
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='hs_w',
1412
                initializer=paddle.nn.initializer.Constant(),
1413 1414 1415 1416 1417 1418 1419 1420 1421
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[3, 1],
                dtype='float32',
                name='hs_b',
1422
                initializer=paddle.nn.initializer.Constant(),
1423 1424
            )
        )
J
JiabinYang 已提交
1425

1426
        emb = fluid.layers.embedding(
J
JiabinYang 已提交
1427
            input=input,
1428 1429
            is_sparse=is_sparse,
            size=[3, 3],
1430
            param_attr=fluid.ParamAttr(
1431
                initializer=paddle.nn.initializer.Normal(
1432 1433 1434 1435 1436
                    scale=1 / math.sqrt(num_total_classes)
                )
            ),
        )

1437 1438 1439 1440 1441 1442 1443 1444
        loss = paddle.nn.HSigmoidLoss(
            feature_size=emb.shape[1],
            num_classes=num_total_classes,
            is_custom=True,
            is_sparse=is_sparse,
        )

        cost = loss(
1445 1446 1447 1448 1449
            input=emb,
            label=label,
            path_table=path_table,
            path_code=path_code,
        )
1450

1451
        avg_cost = paddle.mean(cost)
J
JiabinYang 已提交
1452 1453 1454 1455 1456 1457
        # optimizer
        optimizer = fluid.optimizer.SGD(learning_rate=0.003)
        optimizer.minimize(avg_cost)

    def net_conf(self):
        import os
1458

J
JiabinYang 已提交
1459 1460 1461 1462 1463
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()
1464
        params_to_check = []
J
JiabinYang 已提交
1465
        for op in trainer.blocks[0].ops:
1466 1467 1468 1469 1470
            if op.type == "hierarchical_sigmoid":
                params_to_check = [op.input("W")[0], op.input("Bias")[0]]
                for name in ["epmap", "table_names", "epmap"]:
                    assert op.has_attr(name)
                    if name == "epmap":
1471
                        assert op.attr(name)[0] == '127.0.0.1:6174'
1472
                    elif name == "table_names":
1473
                        assert op.attr(name)[0] == 'hierarchical_sigmoid_0.w_0'
1474 1475 1476 1477 1478
                    else:
                        assert op.attr(name) == 3
            elif op.type == "lookup_table":
                params_to_check.append(op.input("W")[0])
            else:
J
JiabinYang 已提交
1479
                pass
1480 1481 1482 1483
        op_count = 0
        for op in trainer.blocks[0].ops:
            if op.type == "recv":
                assert len(op.output("Out")) == 1
1484
                assert op.output("Out")[0] == 'hierarchical_sigmoid_0.b_0'
1485 1486
                op_count += 1
        assert op_count == 1
J
JiabinYang 已提交
1487 1488


Y
Yancey 已提交
1489 1490
if __name__ == "__main__":
    unittest.main()