test_dist_transpiler.py 49.1 KB
Newer Older
Y
Yancey 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import functools
16 17
import gc
import math
18
import unittest
19

20
import numpy as np
T
tangwei12 已提交
21

22 23
gc.set_debug(gc.DEBUG_COLLECTABLE)

24
import paddle
25
import paddle.fluid as fluid
26

Y
Yancey 已提交
27

W
Wu Yi 已提交
28
class TranspilerTest(unittest.TestCase):
Y
Yancey 已提交
29
    def setUp(self):
W
Wu Yi 已提交
30 31 32 33 34 35 36 37 38 39 40
        self.trainer_id = 0
        self.trainers = 2
        self.pservers = 2
        # NOTE: we do not actually bind this port
        self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
        self.pserver1_ep = "127.0.0.1:6174"
        self.pserver2_ep = "127.0.0.1:6175"
        self.sync_mode = True
        self.transpiler = None

    def net_conf(self):
G
GGBond8488 已提交
41
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
42 43
        y_predict = paddle.static.nn.fc(
            x,
44
            size=1000,
C
Charles-hit 已提交
45
            weight_attr=fluid.ParamAttr(name='fc_w'),
46 47
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
48
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
49
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
50
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
51 52 53 54 55
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
        sgd_optimizer.minimize(avg_cost)

    def get_main_program(self):
        main = fluid.Program()
56
        main.random_seed = 1
W
Wu Yi 已提交
57 58 59 60 61
        with fluid.program_guard(main):
            self.net_conf()
        self.origin_prog = main.clone()
        return main

1
123malin 已提交
62
    def get_trainer(self, config=None, sync_mode=True):
G
gongweibao 已提交
63 64
        src = fluid.default_startup_program().clone()

1
123malin 已提交
65
        t = self._transpiler_instance(config, sync_mode=True)
G
gongweibao 已提交
66

W
Wu Yi 已提交
67
        trainer_main = t.get_trainer_program(wait_port=False)
G
gongweibao 已提交
68 69
        trainer_startup = fluid.default_startup_program()

70 71
        assert src.num_blocks == 1
        assert trainer_startup.num_blocks == src.num_blocks
G
gongweibao 已提交
72 73

        return trainer_main, trainer_startup
W
Wu Yi 已提交
74

Q
qiaolongfei 已提交
75 76
    def get_pserver(self, ep, config=None, sync_mode=True):
        t = self._transpiler_instance(config, sync_mode)
W
Wu Yi 已提交
77 78 79 80
        pserver = t.get_pserver_program(ep)
        startup = t.get_startup_program(ep, pserver)
        return pserver, startup

Q
qiaolongfei 已提交
81
    def _transpiler_instance(self, config=None, sync_mode=True):
W
Wu Yi 已提交
82 83
        if not self.transpiler:
            main = self.get_main_program()
G
gongweibao 已提交
84
            self.transpiler = fluid.DistributeTranspiler(config=config)
85 86 87 88 89 90 91
            self.transpiler.transpile(
                self.trainer_id,
                program=main,
                pservers=self.pserver_eps,
                trainers=self.trainers,
                sync_mode=sync_mode,
            )
G
gongweibao 已提交
92

W
Wu Yi 已提交
93
        return self.transpiler
Y
Yancey 已提交
94

Q
qiaolongfei 已提交
95 96
    def transpiler_test_impl(self):
        pass
W
Wu Yi 已提交
97

Y
Yancey 已提交
98
    def test_transpiler(self):
Q
qiaolongfei 已提交
99 100
        main = fluid.Program()
        startup = fluid.Program()
T
tangwei12 已提交
101 102 103
        with fluid.unique_name.guard():
            with fluid.program_guard(main, startup):
                self.transpiler_test_impl()
104 105 106 107 108 109
        # NOTE: run gc.collect to eliminate pybind side objects to
        # prevent random double-deallocate when inherited in python.
        del self.transpiler
        del main
        del startup
        gc.collect()
Q
qiaolongfei 已提交
110 111 112 113


class TestBasicModel(TranspilerTest):
    def transpiler_test_impl(self):
W
Wu Yi 已提交
114 115 116
        pserver, startup = self.get_pserver(self.pserver1_ep)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep)

G
gongweibao 已提交
117 118
        trainer, trainer_startup = self.get_trainer()

T
tianshuo78520a 已提交
119
        # split var blocks should be in startup program
G
gongweibao 已提交
120 121 122 123 124 125 126 127
        self.assertTrue("fc_w.block0" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w.block1" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w" in trainer_startup.global_block().vars)
        self.assertTrue("fc_b" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w@GRAD" not in trainer_startup.global_block().vars)
        self.assertTrue("fc_b@GRAD" not in trainer_startup.global_block().vars)

        src = [op.type for op in trainer_startup.global_block().ops]
128 129 130 131 132 133 134 135 136
        dst = [
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
        ]
G
gongweibao 已提交
137 138

        self.assertEqual(src, dst)
W
Wu Yi 已提交
139

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
        self.assertEqual(
            [op.type for op in trainer.global_block().ops],
            [
                'mul',
                'elementwise_add',
                'elementwise_sub',
                'square',
                'mean',
                'fill_constant',
                'mean_grad',
                'square_grad',
                'elementwise_sub_grad',
                'elementwise_add_grad',
                'send',
                'mul_grad',
                'split_byref',
                'send',
                'send_barrier',
                'recv',
                'recv',
                'fetch_barrier',
                'concat',
            ],
        )
Y
Yancey 已提交
164 165 166

        self.assertEqual(len(pserver.blocks), 3)
        # block0: listen_and_serv
167 168 169
        self.assertEqual(
            [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]
        )
W
Wu Yi 已提交
170
        # block1~2: optimize pass
171 172 173
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]
        )
Y
Yancey 已提交
174
        # confirm startup program
175 176 177 178
        self.assertEqual(
            [op.type for op in startup.global_block().ops],
            ["fill_constant", "fill_constant", "uniform_random"],
        )
Y
Yancey1989 已提交
179
        # the variable #fc_w will be split into two blocks
Y
Yancey 已提交
180 181
        fc_w_var = startup.global_block().var("fc_w.block1")
        self.assertEqual(fc_w_var.shape, (500, 1000))
W
Wu Yi 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
        # all parameters should be optimized on pserver

        pserver_params = []
        for prog in [pserver, pserver2]:
            for blk in prog.blocks:
                for op in blk.ops:
                    if "Param" in op.input_names:
                        param_name = op.input("Param")[0]
                        is_block_idx = param_name.find(".block")
                        if is_block_idx != -1:
                            origin_param_name = param_name[:is_block_idx]
                        else:
                            origin_param_name = param_name
                        pserver_params.append(origin_param_name)
        trainer_params = []
        for op in self.origin_prog.global_block().ops:
            if "Param" in op.input_names:
                trainer_params.append(op.input("Param")[0])
        self.assertEqual(set(pserver_params), set(trainer_params))


G
gongweibao 已提交
203
class TestBasicModelWithLargeBlockSize(TranspilerTest):
Q
qiaolongfei 已提交
204
    def transpiler_test_impl(self):
G
gongweibao 已提交
205 206 207 208 209 210
        config = fluid.DistributeTranspilerConfig()
        config.min_block_size = 1048576

        pserver, startup = self.get_pserver(self.pserver1_ep, config)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep, config)

G
gongweibao 已提交
211
        trainer, _ = self.get_trainer(config)
G
gongweibao 已提交
212

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
        self.assertEqual(
            [op.type for op in trainer.global_block().ops],
            [
                'mul',
                'elementwise_add',
                'elementwise_sub',
                'square',
                'mean',
                'fill_constant',
                'mean_grad',
                'square_grad',
                'elementwise_sub_grad',
                'elementwise_add_grad',
                'send',
                'mul_grad',
                'send',
                'send_barrier',
                'recv',
                'recv',
                'fetch_barrier',
            ],
        )
G
gongweibao 已提交
235 236 237

        self.assertEqual(len(pserver.blocks), 2)
        # block0: listen_and_serv
238 239 240
        self.assertEqual(
            [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]
        )
G
gongweibao 已提交
241
        # block1~2: optimize pass
242 243 244
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]
        )
G
gongweibao 已提交
245
        # confirm startup program
246 247 248 249
        self.assertEqual(
            [op.type for op in startup.global_block().ops],
            ["fill_constant", "fill_constant"],
        )
G
gongweibao 已提交
250 251
        # the variable #fc_w will be split into two blocks
        fc_w_var = startup2.global_block().var("fc_w")
252
        self.assertEqual(fc_w_var.shape, (1000, 1000))
G
gongweibao 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
        # all parameters should be optimized on pserver

        pserver_params = []
        for prog in [pserver, pserver2]:
            for blk in prog.blocks:
                for op in blk.ops:
                    if "Param" in op.input_names:
                        param_name = op.input("Param")[0]
                        is_block_idx = param_name.find(".block")
                        if is_block_idx != -1:
                            origin_param_name = param_name[:is_block_idx]
                        else:
                            origin_param_name = param_name
                        pserver_params.append(origin_param_name)
        trainer_params = []
        for op in self.origin_prog.global_block().ops:
            if "Param" in op.input_names:
                trainer_params.append(op.input("Param")[0])
        self.assertEqual(set(pserver_params), set(trainer_params))


W
Wu Yi 已提交
274 275
class TestNoSliceVar(TranspilerTest):
    def setUp(self):
276
        super().setUp()
W
Wu Yi 已提交
277

Q
qiaolongfei 已提交
278
    def transpiler_test_impl(self):
G
gongweibao 已提交
279 280 281 282 283
        config = fluid.DistributeTranspilerConfig()
        config.slice_var_up = False

        _, startup = self.get_pserver(self.pserver1_ep, config)
        _, startup2 = self.get_pserver(self.pserver2_ep, config)
W
Wu Yi 已提交
284

285
        if "fc_w" in startup.global_block().vars:
W
Wu Yi 已提交
286
            fc_w_var = startup.global_block().vars["fc_w"]
287
        elif "fc_w" in startup2.global_block().vars:
W
Wu Yi 已提交
288 289 290
            fc_w_var = startup2.global_block().vars["fc_w"]

        self.assertEqual(fc_w_var.shape, (1000, 1000))
Y
Yancey 已提交
291 292


W
Wu Yi 已提交
293 294
class TestLRDecay(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
295
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
296 297
        y_predict = paddle.static.nn.fc(
            x,
298
            size=1000,
C
Charles-hit 已提交
299
            weight_attr=fluid.ParamAttr(name='fc_w'),
300 301
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
302
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
303
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
304
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
305
        sgd_optimizer = fluid.optimizer.SGD(
306 307 308 309 310 311 312
            learning_rate=fluid.layers.exponential_decay(
                learning_rate=1.0,
                decay_steps=2100,
                decay_rate=0.1,
                staircase=True,
            )
        )
W
Wu Yi 已提交
313 314
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
315
    def transpiler_test_impl(self):
W
Wu Yi 已提交
316
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
317
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
318 319 320

        self.assertEqual(len(pserver.blocks), 4)
        lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
321 322 323 324 325 326 327 328 329 330 331 332 333 334
        self.assertEqual(
            lr_decay_ops,
            [
                "increment",
                "cast",
                "fill_constant",
                "elementwise_div",
                "floor",
                "fill_constant",
                "elementwise_pow",
                "fill_constant",
                "elementwise_mul",
            ],
        )
W
Wu Yi 已提交
335 336


T
tangwei12 已提交
337 338 339 340
class TestFakeInit(TranspilerTest):
    def net_conf(self):
        dict_size, embedding_size, neg_num = 10000, 8, 5

G
GGBond8488 已提交
341 342
        input_word = paddle.static.data(
            name="input_word", shape=[-1, 1], dtype='int64', lod_level=1
343
        )
G
GGBond8488 已提交
344 345
        true_word = paddle.static.data(
            name='true_label', shape=[-1, 1], dtype='int64', lod_level=1
346
        )
G
GGBond8488 已提交
347 348
        neg_word = paddle.static.data(
            name="neg_label", shape=[-1, 1], dtype='int64', lod_level=1
349
        )
T
tangwei12 已提交
350 351 352 353 354 355 356
        inputs = [input_word, true_word, neg_word]

        init_width = 0.5 / embedding_size
        input_emb = fluid.layers.embedding(
            input=inputs[0],
            is_sparse=True,
            size=[dict_size, embedding_size],
357 358 359 360 361
            param_attr=fluid.ParamAttr(
                name='emb',
                initializer=fluid.initializer.Uniform(-init_width, init_width),
            ),
        )
T
tangwei12 已提交
362 363 364 365 366 367

        true_emb_w = fluid.layers.embedding(
            input=inputs[1],
            is_sparse=True,
            size=[dict_size, embedding_size],
            param_attr=fluid.ParamAttr(
368 369 370
                name='emb_w', initializer=fluid.initializer.Constant(value=0.0)
            ),
        )
T
tangwei12 已提交
371 372 373 374 375 376

        true_emb_b = fluid.layers.embedding(
            input=inputs[1],
            is_sparse=True,
            size=[dict_size, 1],
            param_attr=fluid.ParamAttr(
377 378 379
                name='emb_b', initializer=fluid.initializer.Constant(value=0.0)
            ),
        )
T
tangwei12 已提交
380

381
        neg_word_reshape = paddle.reshape(inputs[2], shape=[-1, 1])
T
tangwei12 已提交
382 383
        neg_word_reshape.stop_gradient = True

384 385 386 387 388 389
        neg_emb_w = fluid.layers.embedding(
            input=neg_word_reshape,
            is_sparse=True,
            size=[dict_size, embedding_size],
            param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0),
        )
T
tangwei12 已提交
390

391
        neg_emb_w_re = paddle.reshape(
392 393
            neg_emb_w, shape=[-1, neg_num, embedding_size]
        )
T
tangwei12 已提交
394

395 396 397 398 399 400
        neg_emb_b = fluid.layers.embedding(
            input=neg_word_reshape,
            is_sparse=True,
            size=[dict_size, 1],
            param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0),
        )
T
tangwei12 已提交
401

402
        neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num])
T
tangwei12 已提交
403

404
        true_logits = paddle.add(
405
            paddle.sum(
406
                paddle.multiply(input_emb, true_emb_w),
407 408 409 410 411 412
                dim=1,
                keep_dim=True,
            ),
            true_emb_b,
        )

413
        input_emb_re = paddle.reshape(input_emb, shape=[-1, 1, embedding_size])
414

K
kangguangli 已提交
415
        neg_matmul = paddle.matmul(input_emb_re, neg_emb_w_re, transpose_y=True)
416
        neg_matmul_re = paddle.reshape(neg_matmul, shape=[-1, neg_num])
417
        neg_logits = paddle.add(neg_matmul_re, neg_emb_b_vec)
T
tangwei12 已提交
418
        # nce loss
419 420 421
        label_ones = fluid.layers.fill_constant_batch_size_like(
            true_logits, shape=[-1, 1], value=1.0, dtype='float32'
        )
T
tangwei12 已提交
422
        label_zeros = fluid.layers.fill_constant_batch_size_like(
423 424
            true_logits, shape=[-1, neg_num], value=0.0, dtype='float32'
        )
T
tangwei12 已提交
425

426
        true_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
427 428
            true_logits, label_ones
        )
429
        neg_xent = paddle.nn.functional.binary_cross_entropy_with_logits(
430 431
            neg_logits, label_zeros
        )
432
        cost = paddle.add(
433 434
            paddle.sum(true_xent, axis=1),
            paddle.sum(neg_xent, axis=1),
435
        )
436
        avg_cost = paddle.mean(cost)
T
tangwei12 已提交
437 438

        sgd_optimizer = fluid.optimizer.SGD(
439 440 441 442 443 444 445
            learning_rate=fluid.layers.exponential_decay(
                learning_rate=1.0,
                decay_steps=2100,
                decay_rate=0.1,
                staircase=True,
            )
        )
T
tangwei12 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458
        sgd_optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        trainer, startup = self.get_trainer()

        fake_init_ops = []
        for op in startup.global_block().ops:
            if op.type == "fake_init":
                fake_init_ops.append(op)

        self.assertEqual(len(fake_init_ops), 3)


459 460
class TestDecayedAdagrad(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
461
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
462 463
        y_predict = paddle.static.nn.fc(
            x,
464
            size=1000,
C
Charles-hit 已提交
465
            weight_attr=fluid.ParamAttr(name='fc_w'),
466 467
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
468
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
469
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
470
        avg_cost = paddle.mean(cost)
471 472 473 474 475 476 477 478
        opt = fluid.optimizer.DecayedAdagrad(learning_rate=0.1)
        opt.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        trainer, _ = self.get_trainer()


479 480
class TestFtrl(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
481
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
482 483
        y_predict = paddle.static.nn.fc(
            x,
484
            size=1000,
C
Charles-hit 已提交
485
            weight_attr=fluid.ParamAttr(name='fc_w'),
486 487
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
488
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
489
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
490
        avg_cost = paddle.mean(cost)
491 492 493 494 495 496 497 498
        opt = fluid.optimizer.Ftrl(learning_rate=0.1)
        opt.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        trainer, _ = self.get_trainer()


W
Wu Yi 已提交
499 500
class TestLRDecayConditional(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
501
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
502 503
        y_predict = paddle.static.nn.fc(
            x,
504
            size=1000,
C
Charles-hit 已提交
505
            weight_attr=fluid.ParamAttr(name='fc_w'),
506 507
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
508
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
509
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
510
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
511
        sgd_optimizer = fluid.optimizer.SGD(
512 513 514 515
            learning_rate=fluid.layers.piecewise_decay(
                [10000, 20000], [1.0, 0.5, 1.0]
            )
        )
W
Wu Yi 已提交
516 517
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
518
    def transpiler_test_impl(self):
W
Wu Yi 已提交
519
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
520
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
521 522 523 524

        serv_op = pserver.blocks[0].ops[0]
        sub_blocks = []
        optimize_blocks = []
G
gongweibao 已提交
525
        for b in serv_op.all_attrs()["optimize_blocks"]:
W
Wu Yi 已提交
526 527 528 529 530 531 532
            optimize_blocks.append(b.idx)
        for b in pserver.blocks:
            if b.idx not in optimize_blocks:
                sub_blocks.append(b.idx)

        self.assertEqual(len(pserver.blocks), 7)
        lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
        self.assertEqual(
            lr_decay_ops,
            [
                "increment",
                "cast",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "conditional_block",
            ],
        )
W
Wu Yi 已提交
554 555 556 557 558 559 560 561 562 563
        # test the condition blocks
        for b in sub_blocks:
            if b == 0:
                continue
            block = pserver.blocks[b]
            self.assertEqual([op.type for op in block.ops], ["assign"])


class TestL2Decay(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
564
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
565 566
        y_predict = paddle.static.nn.fc(
            x,
W
Wu Yi 已提交
567
            size=1000,
C
Charles-hit 已提交
568
            weight_attr=fluid.ParamAttr(
569 570 571 572
                name='fc_w', regularizer=fluid.regularizer.L2Decay()
            ),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
573
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
574
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
575
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
576
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
577 578 579 580

        def filter(param):
            return param.name == "fc_w"

581
        clip = paddle.nn.ClipGradByValue(0.1, need_clip=filter)
582
        sgd_optimizer.minimize(avg_cost, grad_clip=clip)
W
Wu Yi 已提交
583

Q
qiaolongfei 已提交
584
    def transpiler_test_impl(self):
W
Wu Yi 已提交
585
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
586
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
587 588

        self.assertEqual(len(pserver.blocks), 3)
589 590 591 592 593 594 595 596
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            ["sum", "scale", "clip", "sgd"],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[2].ops],
            ["sum", "scale", "clip", "scale", "sum", "sgd"],
        )
W
Wu Yi 已提交
597 598
        # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer

Y
Yancey 已提交
599

T
typhoonzero 已提交
600 601
class TestL2DecayWithPiecewise(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
602
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
603 604
        y_predict = paddle.static.nn.fc(
            x,
605
            size=1000,
C
Charles-hit 已提交
606
            weight_attr=fluid.ParamAttr(name='fc_w'),
607 608
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
609
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
610
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
611
        avg_cost = paddle.mean(cost)
T
typhoonzero 已提交
612 613 614 615
        base_lr = 1.0
        bd = [1, 10, 20, 30]
        lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
        sgd_optimizer = fluid.optimizer.Momentum(
616 617 618
            learning_rate=fluid.layers.piecewise_decay(
                boundaries=bd, values=lr
            ),
T
typhoonzero 已提交
619
            momentum=0.9,
620 621
            regularization=fluid.regularizer.L2Decay(1e-4),
        )
T
typhoonzero 已提交
622 623
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
624
    def transpiler_test_impl(self):
T
typhoonzero 已提交
625
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
626
        trainer, _ = self.get_trainer()
T
typhoonzero 已提交
627 628

        self.assertEqual(len(pserver.blocks), 9)
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            [
                "increment",
                "cast",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "conditional_block",
            ],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[7].ops],
            ["sum", "scale", "scale", "sum", "momentum"],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[8].ops],
            ["sum", "scale", "scale", "sum", "momentum"],
        )
Y
Yancey 已提交
672 673


Q
Qiao Longfei 已提交
674 675
class TestEmptyPserverOptimizeBlocks(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
676
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
Q
Qiao Longfei 已提交
677
        # only one parameter
C
Charles-hit 已提交
678 679
        y_predict = paddle.static.nn.fc(
            x,
680
            size=1000,
C
Charles-hit 已提交
681
            weight_attr=fluid.ParamAttr(name='fc_w'),
682 683
            bias_attr=False,
        )
G
GGBond8488 已提交
684
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
685
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
686
        avg_cost = paddle.mean(cost)
Q
Qiao Longfei 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=1.0)
        sgd_optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()
        config.slice_var_up = False

        pserver, startup = self.get_pserver(ep=self.pserver2_ep, config=config)

        self.assertEqual(len(pserver.blocks), 2)
        self.assertEqual(len(pserver.blocks[1].ops), 0)


700
class TestDistLookupTableBase(TranspilerTest):
Q
Qiao Longfei 已提交
701
    def network_with_table(self, is_sparse, is_distributed):
T
tangwei12 已提交
702 703
        self.table_size = 1000
        self.emb_size = 64
T
tangwei12 已提交
704
        self.lookup_table_name = 'shared_w'
T
tangwei12 已提交
705

Q
Qiao Longfei 已提交
706
        def emb_pool(ids, table_name, is_distributed):
707 708 709 710 711 712 713 714
            emb = fluid.layers.embedding(
                input=ids,
                size=[self.table_size, self.emb_size],
                dtype='float32',
                param_attr=table_name,
                is_sparse=is_sparse,
                is_distributed=is_distributed,
            )
715 716 717
            pool = fluid.layers.sequence_pool(input=emb, pool_type='average')
            return pool

G
GGBond8488 已提交
718 719
        title_ids = paddle.static.data(
            name='title_ids', shape=[-1, 1], dtype='int64', lod_level=1
720
        )
G
GGBond8488 已提交
721 722
        brand_ids = paddle.static.data(
            name='brand_ids', shape=[-1, 1], dtype='int64', lod_level=1
723
        )
G
GGBond8488 已提交
724 725
        profile_ids = paddle.static.data(
            name='brand_ids', shape=[-1, 1], dtype='int64', lod_level=1
726
        )
Q
Qiao Longfei 已提交
727 728 729
        title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed)
        brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed)
        profile_emb = emb_pool(profile_ids, "profile_emb", False)
730 731 732
        fc0 = fluid.layers.concat(
            input=[title_emb, brand_emb, profile_emb], axis=1
        )
C
Charles-hit 已提交
733 734
        predict = paddle.static.nn.fc(
            x=fc0,
735
            size=2,
C
Charles-hit 已提交
736
            weight_attr=fluid.ParamAttr(name='fc_w'),
737 738
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
739

G
GGBond8488 已提交
740
        label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64')
741 742 743
        cost = paddle.nn.functional.cross_entropy(
            input=predict, label=label, reduction='none', use_softmax=False
        )
744
        avg_cost = paddle.mean(cost)
745 746 747 748
        optimizer = fluid.optimizer.Adam(learning_rate=0.003)
        optimizer.minimize(avg_cost)


Q
qiaolongfei 已提交
749 750 751 752 753 754 755
class TestLocalLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

756
        self.assertEqual(len(pserver1.blocks), 4)
Q
qiaolongfei 已提交
757 758
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
759 760 761 762
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
763 764
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
765 766 767 768
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
769

770 771
        # 3 optimize for table 2 adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
772 773 774 775
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
776

G
gongweibao 已提交
777
        trainer, _ = self.get_trainer()
Q
qiaolongfei 已提交
778 779
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
qiaolongfei 已提交
814 815 816 817
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


818 819 820 821 822 823 824
class TestDistLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

825
        self.assertEqual(len(pserver1.blocks), 6)
826 827
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
828 829 830 831
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
832
        # 4 prefetch -> lookup_sparse_table_read for data0
833 834 835 836
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
837
        # 2 optimize for table sgd
838 839 840
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops], ["sum", "sgd"]
        )
841
        # 3 prefetch -> lookup_sparse_table_read for data0
842 843 844 845
        self.assertEqual(
            [op.type for op in pserver1.blocks[4].ops],
            ["lookup_sparse_table_read"],
        )
Q
Qiao Longfei 已提交
846 847 848 849 850 851
        # 5 save table
        self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])

        trainer, trainer_startup = self.get_trainer()
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
            'split_ids',
            'prefetch',
            'merge_ids',
            'sequence_pool',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_ids',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
Qiao Longfei 已提交
887 888 889
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
        startup_ops = [
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'uniform_random',
            'recv',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
            'fake_init',
Q
Qiao Longfei 已提交
912
        ]
913 914 915
        self.assertEqual(
            [op.type for op in trainer_startup.blocks[0].ops], startup_ops
        )
Q
Qiao Longfei 已提交
916 917


Q
qiaolongfei 已提交
918 919 920 921 922 923
class TestAsyncLocalLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()
Q
qiaolongfei 已提交
924
        pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
Q
qiaolongfei 已提交
925

926
        self.assertEqual(len(pserver1.blocks), 4)
Q
qiaolongfei 已提交
927 928
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
929 930 931 932
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
933 934
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
935 936 937 938
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["adam", "scale", "scale"],
        )
939 940
        # 3 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
941 942 943 944
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
945

G
gongweibao 已提交
946
        trainer, _ = self.get_trainer(config)
Q
qiaolongfei 已提交
947 948
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'recv',
            'recv',
Q
qiaolongfei 已提交
981 982 983 984
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


Q
qiaolongfei 已提交
985 986 987 988 989 990 991
class TestAsyncDistLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()

Q
qiaolongfei 已提交
992
        pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
Q
qiaolongfei 已提交
993

994
        self.assertEqual(len(pserver1.blocks), 6)
Q
qiaolongfei 已提交
995 996
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
997 998 999 1000
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["adam", "scale", "scale"],
        )
1001
        # 2 optimize for table adam
1002 1003 1004 1005
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["adam", "scale", "scale"],
        )
1006 1007
        # 3 optimize for table sgd
        self.assertEqual([op.type for op in pserver1.blocks[3].ops], ["sgd"])
1008
        # 4 prefetch -> lookup_sparse_table_read for data0
1009 1010 1011 1012
        self.assertEqual(
            [op.type for op in pserver1.blocks[4].ops],
            ["lookup_sparse_table_read"],
        )
1013 1014
        # 5 save table
        self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
Q
qiaolongfei 已提交
1015

Q
Qiao Longfei 已提交
1016
        trainer, trainer_startup = self.get_trainer(config)
Q
qiaolongfei 已提交
1017 1018
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
            'split_ids',
            'prefetch',
            'merge_ids',
            'sequence_pool',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_ids',
            'send',
            'recv',
            'recv',
Q
Qiao Longfei 已提交
1052
        ]
Q
qiaolongfei 已提交
1053
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
Q
Qiao Longfei 已提交
1054
        startup_ops = [
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'uniform_random',
            'recv',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
            'fake_init',
Q
Qiao Longfei 已提交
1077
        ]
1078 1079 1080
        self.assertEqual(
            [op.type for op in trainer_startup.blocks[0].ops], startup_ops
        )
Q
qiaolongfei 已提交
1081 1082


T
tangwei12 已提交
1083
class TestDistLookupTableSliceSize(TestDistLookupTableBase):
T
tangwei12 已提交
1084 1085 1086 1087 1088
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()
T
tangwei12 已提交
1089
        pserver1, _ = self.get_pserver(self.pserver1_ep, config)
T
tangwei12 已提交
1090 1091 1092

        self.assertTrue(self.transpiler.has_distributed_lookup_table)
        lookup_table_var = pserver1.global_block().vars[
1093 1094
            self.transpiler.table_name
        ]
T
tangwei12 已提交
1095 1096 1097
        row_size = lookup_table_var.shape[0]
        calc_row_size = int(math.ceil(self.table_size / self.pservers))
        self.assertEqual(row_size, calc_row_size)
T
tangwei12 已提交
1098 1099


T
tangwei12 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108
class TestDistArgsInProgram(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()

        self.assertTrue(trainer._is_distributed)
        self.assertTrue(trainer._is_chief)
1109 1110 1111 1112 1113 1114
        self.assertEqual(
            trainer._distributed_lookup_table, self.lookup_table_name
        )
        self.assertEqual(
            trainer._endpoints, [self.pserver1_ep, self.pserver2_ep]
        )
T
tangwei12 已提交
1115 1116


W
Wu Yi 已提交
1117 1118
class TestRMSPropOptimizer(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
1119
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
1120 1121
        y_predict = paddle.static.nn.fc(
            x,
1122
            size=1000,
C
Charles-hit 已提交
1123
            weight_attr=fluid.ParamAttr(name='fc_w'),
1124 1125
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
1126
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
1127
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
1128
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
1129 1130 1131 1132 1133 1134 1135 1136 1137
        optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
        optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep)

        self.assertEqual(len(pserver.blocks), 3)
        # block1~2: optimize pass
1138 1139 1140 1141
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            ["sum", "scale", "rmsprop"],
        )
W
Wu Yi 已提交
1142 1143 1144 1145 1146 1147 1148
        # the variable #fc_w will be split into two blocks
        fc_w_var = startup.global_block().var("fc_w.block1")
        self.assertEqual(fc_w_var.shape, (500, 1000))
        moment_var = startup.global_block().var("momentum_1")
        self.assertEqual(moment_var.shape, (500, 1000))


T
tangwei12 已提交
1149 1150
class TestLoadSliceVar(TranspilerTest):
    def net_conf(self):
G
GGBond8488 已提交
1151
        x = paddle.static.data(name='x', shape=[-1, 1000], dtype='float32')
C
Charles-hit 已提交
1152 1153
        y_predict = paddle.static.nn.fc(
            x,
1154
            size=1000,
C
Charles-hit 已提交
1155
            weight_attr=fluid.ParamAttr(name='fc_w'),
1156 1157
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
G
GGBond8488 已提交
1158
        y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32')
1159
        cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y)
1160
        avg_cost = paddle.mean(cost)
T
tangwei12 已提交
1161 1162 1163 1164 1165 1166 1167
        optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
        optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, _ = self.get_pserver(self.pserver1_ep)
        pserver2, _ = self.get_pserver(self.pserver2_ep)

1168
        vars_ps1 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
1169 1170
            self.pserver1_ep
        )
1171
        vars_ps2 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
1172 1173
            self.pserver2_ep
        )
1174 1175 1176 1177

        self.assertTrue(vars_ps1)
        self.assertTrue(vars_ps2)

1178
        for idx in range(len(vars_ps1)):
1179 1180 1181 1182 1183 1184
            total_numel = 0
            ps1_numel, ps2_numel = 0, 0

            ps1_var = vars_ps1[idx]

            if not ps1_var.is_slice:
1185 1186 1187 1188 1189 1190
                total_numel = functools.reduce(
                    lambda x, y: x * y, vars_ps1[idx].origin.shape
                )
                ps1_numel = functools.reduce(
                    lambda x, y: x * y, vars_ps1[idx].slice.shape
                )
1191 1192 1193 1194 1195 1196 1197
            else:
                ps2_var = None
                for var in vars_ps2:
                    if var.origin.name == ps1_var.origin.name:
                        ps2_var = var
                        break

1198 1199 1200 1201 1202 1203 1204 1205 1206
                total_numel = functools.reduce(
                    lambda x, y: x * y, ps1_var.origin.shape
                )
                ps1_numel = functools.reduce(
                    lambda x, y: x * y, ps1_var.slice.shape
                )
                ps2_numel = functools.reduce(
                    lambda x, y: x * y, ps2_var.slice.shape
                )
1207 1208

            self.assertEqual(total_numel, ps1_numel + ps2_numel)
T
tangwei12 已提交
1209 1210


W
Wu Yi 已提交
1211 1212
class TestNCCL2Transpile(TranspilerTest):
    def test_nccl2_transpile(self):
T
tangwei12 已提交
1213
        if fluid.core.is_compiled_with_cuda():  # test nccl2 only with cuda
J
JiabinYang 已提交
1214 1215 1216 1217 1218 1219 1220
            main = fluid.Program()
            startup = fluid.Program()
            with fluid.program_guard(main, startup):
                self.net_conf()

            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
W
Wu Yi 已提交
1221
            config.wait_port = False
J
JiabinYang 已提交
1222
            t = fluid.DistributeTranspiler(config=config)
1223 1224 1225 1226 1227 1228
            t.transpile(
                0,
                trainers="127.0.0.1:6174,127.0.0.1:6175",
                current_endpoint="127.0.0.1:6174",
                startup_program=startup,
            )
J
JiabinYang 已提交
1229 1230 1231
            print([op.type for op in startup.global_block().ops])
            self.assertEqual(startup.global_block().ops[-1].type, "gen_nccl_id")
            self.assertIsNotNone(startup.global_block().vars.get("NCCLID"))
1232
            gc.collect()
J
JiabinYang 已提交
1233 1234
        else:
            pass
W
Wu Yi 已提交
1235 1236


Q
Qiao Longfei 已提交
1237 1238 1239
# test for remote prefetch
class TestRemoteLookupTable(TestDistLookupTableBase):
    def net_conf(self):
1240
        import os
1241

1242
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
Q
Qiao Longfei 已提交
1243
        self.network_with_table(is_sparse=True, is_distributed=False)
Q
Qiao Longfei 已提交
1244 1245 1246 1247 1248 1249 1250

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

        self.assertEqual(len(pserver1.blocks), 4)
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
1251 1252 1253 1254
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1255 1256
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
1257 1258 1259 1260
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1261 1262 1263

        # 3 optimize for table 2 adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
1264 1265 1266 1267
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1268 1269 1270 1271

        trainer, _ = self.get_trainer()
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
Qiao Longfei 已提交
1306 1307 1308 1309
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


1310 1311 1312 1313 1314 1315 1316 1317
# test for remote prefetch
class TestRemoteNce(TestDistLookupTableBase):
    def network_with_table(self, is_sparse, is_distributed):

        num_total_classes = 20
        sampler = "uniform"
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')

G
GGBond8488 已提交
1318 1319 1320 1321
        input = paddle.static.data(
            name="input", shape=[-1, 10], dtype="float32"
        )
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
1322

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='nce_w',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 1],
                dtype='float32',
                name='nce_b',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )

1344
        cost = paddle.static.nn.nce(
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
            input=input,
            label=label,
            num_total_classes=num_total_classes,
            sampler=sampler,
            custom_dist=nid_freq_arr.tolist(),
            sample_weight=None,
            param_attr='nce_w',
            bias_attr='nce_b',
            seed=1,
            num_neg_samples=5,
            is_sparse=is_sparse,
        )
1357
        avg_cost = paddle.mean(cost)
1358 1359 1360 1361 1362 1363
        # optimizer
        optimizer = fluid.optimizer.Adam(learning_rate=0.003)
        optimizer.minimize(avg_cost)

    def net_conf(self):
        import os
1364

1365 1366 1367 1368 1369
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()
T
tangwei12 已提交
1370

1371 1372
        out_vars = ["nce_w"]
        in_vars = ["nce_b"]
T
tangwei12 已提交
1373 1374 1375

        recv_var_names = []

1376 1377
        for op in trainer.blocks[0].ops:
            if op.type == "recv":
T
tangwei12 已提交
1378 1379 1380 1381 1382 1383 1384
                for var in op.output("Out"):
                    recv_var_names.append(var)

        for out_var in out_vars:
            self.assertFalse(out_var in recv_var_names)
        for in_var in in_vars:
            self.assertTrue(in_var in recv_var_names)
1385 1386


J
JiabinYang 已提交
1387 1388 1389 1390
# test for remote prefetch
class TestRemoteHsigmoid(TestDistLookupTableBase):
    def network_with_table(self, is_sparse, is_distributed):

1391
        num_total_classes = 3
J
JiabinYang 已提交
1392

G
GGBond8488 已提交
1393 1394 1395 1396
        input = paddle.static.data(name="input", shape=[-1, 1], dtype="float32")
        label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64")
        path_table = paddle.static.data(
            name='path_table', shape=[-1, 3], dtype='int64'
1397
        )
G
GGBond8488 已提交
1398 1399
        path_code = paddle.static.data(
            name='path_code', shape=[-1, 3], dtype='int64'
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
        )
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='hs_w',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[3, 1],
                dtype='float32',
                name='hs_b',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )
J
JiabinYang 已提交
1421

1422
        emb = fluid.layers.embedding(
J
JiabinYang 已提交
1423
            input=input,
1424 1425
            is_sparse=is_sparse,
            size=[3, 3],
1426 1427 1428 1429 1430 1431 1432
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Normal(
                    scale=1 / math.sqrt(num_total_classes)
                )
            ),
        )

1433 1434 1435 1436 1437 1438 1439 1440
        loss = paddle.nn.HSigmoidLoss(
            feature_size=emb.shape[1],
            num_classes=num_total_classes,
            is_custom=True,
            is_sparse=is_sparse,
        )

        cost = loss(
1441 1442 1443 1444 1445
            input=emb,
            label=label,
            path_table=path_table,
            path_code=path_code,
        )
1446

1447
        avg_cost = paddle.mean(cost)
J
JiabinYang 已提交
1448 1449 1450 1451 1452 1453
        # optimizer
        optimizer = fluid.optimizer.SGD(learning_rate=0.003)
        optimizer.minimize(avg_cost)

    def net_conf(self):
        import os
1454

J
JiabinYang 已提交
1455 1456 1457 1458 1459
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()
1460
        params_to_check = list()
J
JiabinYang 已提交
1461
        for op in trainer.blocks[0].ops:
1462 1463 1464 1465 1466
            if op.type == "hierarchical_sigmoid":
                params_to_check = [op.input("W")[0], op.input("Bias")[0]]
                for name in ["epmap", "table_names", "epmap"]:
                    assert op.has_attr(name)
                    if name == "epmap":
1467
                        assert op.attr(name)[0] == '127.0.0.1:6174'
1468
                    elif name == "table_names":
1469
                        assert op.attr(name)[0] == 'hierarchical_sigmoid_0.w_0'
1470 1471 1472 1473 1474
                    else:
                        assert op.attr(name) == 3
            elif op.type == "lookup_table":
                params_to_check.append(op.input("W")[0])
            else:
J
JiabinYang 已提交
1475
                pass
1476 1477 1478 1479
        op_count = 0
        for op in trainer.blocks[0].ops:
            if op.type == "recv":
                assert len(op.output("Out")) == 1
1480
                assert op.output("Out")[0] == 'hierarchical_sigmoid_0.b_0'
1481 1482
                op_count += 1
        assert op_count == 1
J
JiabinYang 已提交
1483 1484


Y
Yancey 已提交
1485 1486
if __name__ == "__main__":
    unittest.main()