test_dist_transpiler.py 49.1 KB
Newer Older
Y
Yancey 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

T
tangwei12 已提交
15 16
import math

17
import functools
18
import unittest
19 20
import numpy as np

21
import gc
T
tangwei12 已提交
22

23 24
gc.set_debug(gc.DEBUG_COLLECTABLE)

25
import paddle
26
import paddle.fluid as fluid
27

Y
Yancey 已提交
28

W
Wu Yi 已提交
29
class TranspilerTest(unittest.TestCase):
Y
Yancey 已提交
30
    def setUp(self):
W
Wu Yi 已提交
31 32 33 34 35 36 37 38 39 40 41 42
        self.trainer_id = 0
        self.trainers = 2
        self.pservers = 2
        # NOTE: we do not actually bind this port
        self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
        self.pserver1_ep = "127.0.0.1:6174"
        self.pserver2_ep = "127.0.0.1:6175"
        self.sync_mode = True
        self.transpiler = None

    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
43 44 45 46 47 48 49
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
W
Wu Yi 已提交
50 51
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
52
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
53 54 55 56 57
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
        sgd_optimizer.minimize(avg_cost)

    def get_main_program(self):
        main = fluid.Program()
58
        main.random_seed = 1
W
Wu Yi 已提交
59 60 61 62 63
        with fluid.program_guard(main):
            self.net_conf()
        self.origin_prog = main.clone()
        return main

1
123malin 已提交
64
    def get_trainer(self, config=None, sync_mode=True):
G
gongweibao 已提交
65 66
        src = fluid.default_startup_program().clone()

1
123malin 已提交
67
        t = self._transpiler_instance(config, sync_mode=True)
G
gongweibao 已提交
68

W
Wu Yi 已提交
69
        trainer_main = t.get_trainer_program(wait_port=False)
G
gongweibao 已提交
70 71
        trainer_startup = fluid.default_startup_program()

72 73
        assert src.num_blocks == 1
        assert trainer_startup.num_blocks == src.num_blocks
G
gongweibao 已提交
74 75

        return trainer_main, trainer_startup
W
Wu Yi 已提交
76

Q
qiaolongfei 已提交
77 78
    def get_pserver(self, ep, config=None, sync_mode=True):
        t = self._transpiler_instance(config, sync_mode)
W
Wu Yi 已提交
79 80 81 82
        pserver = t.get_pserver_program(ep)
        startup = t.get_startup_program(ep, pserver)
        return pserver, startup

Q
qiaolongfei 已提交
83
    def _transpiler_instance(self, config=None, sync_mode=True):
W
Wu Yi 已提交
84 85
        if not self.transpiler:
            main = self.get_main_program()
G
gongweibao 已提交
86
            self.transpiler = fluid.DistributeTranspiler(config=config)
87 88 89 90 91 92 93
            self.transpiler.transpile(
                self.trainer_id,
                program=main,
                pservers=self.pserver_eps,
                trainers=self.trainers,
                sync_mode=sync_mode,
            )
G
gongweibao 已提交
94

W
Wu Yi 已提交
95
        return self.transpiler
Y
Yancey 已提交
96

Q
qiaolongfei 已提交
97 98
    def transpiler_test_impl(self):
        pass
W
Wu Yi 已提交
99

Y
Yancey 已提交
100
    def test_transpiler(self):
Q
qiaolongfei 已提交
101 102
        main = fluid.Program()
        startup = fluid.Program()
T
tangwei12 已提交
103 104 105
        with fluid.unique_name.guard():
            with fluid.program_guard(main, startup):
                self.transpiler_test_impl()
106 107 108 109 110 111
        # NOTE: run gc.collect to eliminate pybind side objects to
        # prevent random double-deallocate when inherited in python.
        del self.transpiler
        del main
        del startup
        gc.collect()
Q
qiaolongfei 已提交
112 113 114 115


class TestBasicModel(TranspilerTest):
    def transpiler_test_impl(self):
W
Wu Yi 已提交
116 117 118
        pserver, startup = self.get_pserver(self.pserver1_ep)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep)

G
gongweibao 已提交
119 120
        trainer, trainer_startup = self.get_trainer()

T
tianshuo78520a 已提交
121
        # split var blocks should be in startup program
G
gongweibao 已提交
122 123 124 125 126 127 128 129
        self.assertTrue("fc_w.block0" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w.block1" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w" in trainer_startup.global_block().vars)
        self.assertTrue("fc_b" in trainer_startup.global_block().vars)
        self.assertTrue("fc_w@GRAD" not in trainer_startup.global_block().vars)
        self.assertTrue("fc_b@GRAD" not in trainer_startup.global_block().vars)

        src = [op.type for op in trainer_startup.global_block().ops]
130 131 132 133 134 135 136 137 138
        dst = [
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
        ]
G
gongweibao 已提交
139 140

        self.assertEqual(src, dst)
W
Wu Yi 已提交
141

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
        self.assertEqual(
            [op.type for op in trainer.global_block().ops],
            [
                'mul',
                'elementwise_add',
                'elementwise_sub',
                'square',
                'mean',
                'fill_constant',
                'mean_grad',
                'square_grad',
                'elementwise_sub_grad',
                'elementwise_add_grad',
                'send',
                'mul_grad',
                'split_byref',
                'send',
                'send_barrier',
                'recv',
                'recv',
                'fetch_barrier',
                'concat',
            ],
        )
Y
Yancey 已提交
166 167 168

        self.assertEqual(len(pserver.blocks), 3)
        # block0: listen_and_serv
169 170 171
        self.assertEqual(
            [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]
        )
W
Wu Yi 已提交
172
        # block1~2: optimize pass
173 174 175
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]
        )
Y
Yancey 已提交
176
        # confirm startup program
177 178 179 180
        self.assertEqual(
            [op.type for op in startup.global_block().ops],
            ["fill_constant", "fill_constant", "uniform_random"],
        )
Y
Yancey1989 已提交
181
        # the variable #fc_w will be split into two blocks
Y
Yancey 已提交
182 183
        fc_w_var = startup.global_block().var("fc_w.block1")
        self.assertEqual(fc_w_var.shape, (500, 1000))
W
Wu Yi 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
        # all parameters should be optimized on pserver

        pserver_params = []
        for prog in [pserver, pserver2]:
            for blk in prog.blocks:
                for op in blk.ops:
                    if "Param" in op.input_names:
                        param_name = op.input("Param")[0]
                        is_block_idx = param_name.find(".block")
                        if is_block_idx != -1:
                            origin_param_name = param_name[:is_block_idx]
                        else:
                            origin_param_name = param_name
                        pserver_params.append(origin_param_name)
        trainer_params = []
        for op in self.origin_prog.global_block().ops:
            if "Param" in op.input_names:
                trainer_params.append(op.input("Param")[0])
        self.assertEqual(set(pserver_params), set(trainer_params))


G
gongweibao 已提交
205
class TestBasicModelWithLargeBlockSize(TranspilerTest):
Q
qiaolongfei 已提交
206
    def transpiler_test_impl(self):
G
gongweibao 已提交
207 208 209 210 211 212
        config = fluid.DistributeTranspilerConfig()
        config.min_block_size = 1048576

        pserver, startup = self.get_pserver(self.pserver1_ep, config)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep, config)

G
gongweibao 已提交
213
        trainer, _ = self.get_trainer(config)
G
gongweibao 已提交
214

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
        self.assertEqual(
            [op.type for op in trainer.global_block().ops],
            [
                'mul',
                'elementwise_add',
                'elementwise_sub',
                'square',
                'mean',
                'fill_constant',
                'mean_grad',
                'square_grad',
                'elementwise_sub_grad',
                'elementwise_add_grad',
                'send',
                'mul_grad',
                'send',
                'send_barrier',
                'recv',
                'recv',
                'fetch_barrier',
            ],
        )
G
gongweibao 已提交
237 238 239

        self.assertEqual(len(pserver.blocks), 2)
        # block0: listen_and_serv
240 241 242
        self.assertEqual(
            [op.type for op in pserver.blocks[0].ops], ["listen_and_serv"]
        )
G
gongweibao 已提交
243
        # block1~2: optimize pass
244 245 246
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops], ["sum", "scale", "sgd"]
        )
G
gongweibao 已提交
247
        # confirm startup program
248 249 250 251
        self.assertEqual(
            [op.type for op in startup.global_block().ops],
            ["fill_constant", "fill_constant"],
        )
G
gongweibao 已提交
252 253
        # the variable #fc_w will be split into two blocks
        fc_w_var = startup2.global_block().var("fc_w")
254
        self.assertEqual(fc_w_var.shape, (1000, 1000))
G
gongweibao 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
        # all parameters should be optimized on pserver

        pserver_params = []
        for prog in [pserver, pserver2]:
            for blk in prog.blocks:
                for op in blk.ops:
                    if "Param" in op.input_names:
                        param_name = op.input("Param")[0]
                        is_block_idx = param_name.find(".block")
                        if is_block_idx != -1:
                            origin_param_name = param_name[:is_block_idx]
                        else:
                            origin_param_name = param_name
                        pserver_params.append(origin_param_name)
        trainer_params = []
        for op in self.origin_prog.global_block().ops:
            if "Param" in op.input_names:
                trainer_params.append(op.input("Param")[0])
        self.assertEqual(set(pserver_params), set(trainer_params))


W
Wu Yi 已提交
276 277
class TestNoSliceVar(TranspilerTest):
    def setUp(self):
278
        super().setUp()
W
Wu Yi 已提交
279

Q
qiaolongfei 已提交
280
    def transpiler_test_impl(self):
G
gongweibao 已提交
281 282 283 284 285
        config = fluid.DistributeTranspilerConfig()
        config.slice_var_up = False

        _, startup = self.get_pserver(self.pserver1_ep, config)
        _, startup2 = self.get_pserver(self.pserver2_ep, config)
W
Wu Yi 已提交
286

287
        if "fc_w" in startup.global_block().vars:
W
Wu Yi 已提交
288
            fc_w_var = startup.global_block().vars["fc_w"]
289
        elif "fc_w" in startup2.global_block().vars:
W
Wu Yi 已提交
290 291 292
            fc_w_var = startup2.global_block().vars["fc_w"]

        self.assertEqual(fc_w_var.shape, (1000, 1000))
Y
Yancey 已提交
293 294


W
Wu Yi 已提交
295 296 297
class TestLRDecay(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
298 299 300 301 302 303 304
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
W
Wu Yi 已提交
305 306
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
307
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
308
        sgd_optimizer = fluid.optimizer.SGD(
309 310 311 312 313 314 315
            learning_rate=fluid.layers.exponential_decay(
                learning_rate=1.0,
                decay_steps=2100,
                decay_rate=0.1,
                staircase=True,
            )
        )
W
Wu Yi 已提交
316 317
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
318
    def transpiler_test_impl(self):
W
Wu Yi 已提交
319
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
320
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
321 322 323

        self.assertEqual(len(pserver.blocks), 4)
        lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
324 325 326 327 328 329 330 331 332 333 334 335 336 337
        self.assertEqual(
            lr_decay_ops,
            [
                "increment",
                "cast",
                "fill_constant",
                "elementwise_div",
                "floor",
                "fill_constant",
                "elementwise_pow",
                "fill_constant",
                "elementwise_mul",
            ],
        )
W
Wu Yi 已提交
338 339


T
tangwei12 已提交
340 341 342 343
class TestFakeInit(TranspilerTest):
    def net_conf(self):
        dict_size, embedding_size, neg_num = 10000, 8, 5

344 345 346 347 348 349 350 351 352
        input_word = fluid.layers.data(
            name="input_word", shape=[1], dtype='int64', lod_level=1
        )
        true_word = fluid.layers.data(
            name='true_label', shape=[1], dtype='int64', lod_level=1
        )
        neg_word = fluid.layers.data(
            name="neg_label", shape=[1], dtype='int64', lod_level=1
        )
T
tangwei12 已提交
353 354 355 356 357 358 359
        inputs = [input_word, true_word, neg_word]

        init_width = 0.5 / embedding_size
        input_emb = fluid.layers.embedding(
            input=inputs[0],
            is_sparse=True,
            size=[dict_size, embedding_size],
360 361 362 363 364
            param_attr=fluid.ParamAttr(
                name='emb',
                initializer=fluid.initializer.Uniform(-init_width, init_width),
            ),
        )
T
tangwei12 已提交
365 366 367 368 369 370

        true_emb_w = fluid.layers.embedding(
            input=inputs[1],
            is_sparse=True,
            size=[dict_size, embedding_size],
            param_attr=fluid.ParamAttr(
371 372 373
                name='emb_w', initializer=fluid.initializer.Constant(value=0.0)
            ),
        )
T
tangwei12 已提交
374 375 376 377 378 379

        true_emb_b = fluid.layers.embedding(
            input=inputs[1],
            is_sparse=True,
            size=[dict_size, 1],
            param_attr=fluid.ParamAttr(
380 381 382
                name='emb_b', initializer=fluid.initializer.Constant(value=0.0)
            ),
        )
T
tangwei12 已提交
383

384
        neg_word_reshape = paddle.reshape(inputs[2], shape=[-1, 1])
T
tangwei12 已提交
385 386
        neg_word_reshape.stop_gradient = True

387 388 389 390 391 392
        neg_emb_w = fluid.layers.embedding(
            input=neg_word_reshape,
            is_sparse=True,
            size=[dict_size, embedding_size],
            param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0),
        )
T
tangwei12 已提交
393

394
        neg_emb_w_re = paddle.reshape(
395 396
            neg_emb_w, shape=[-1, neg_num, embedding_size]
        )
T
tangwei12 已提交
397

398 399 400 401 402 403
        neg_emb_b = fluid.layers.embedding(
            input=neg_word_reshape,
            is_sparse=True,
            size=[dict_size, 1],
            param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0),
        )
T
tangwei12 已提交
404

405
        neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num])
T
tangwei12 已提交
406 407

        true_logits = fluid.layers.elementwise_add(
408 409 410 411 412 413 414 415
            fluid.layers.reduce_sum(
                fluid.layers.elementwise_mul(input_emb, true_emb_w),
                dim=1,
                keep_dim=True,
            ),
            true_emb_b,
        )

416
        input_emb_re = paddle.reshape(input_emb, shape=[-1, 1, embedding_size])
417 418 419 420

        neg_matmul = fluid.layers.matmul(
            input_emb_re, neg_emb_w_re, transpose_y=True
        )
421
        neg_matmul_re = paddle.reshape(neg_matmul, shape=[-1, neg_num])
T
tangwei12 已提交
422 423
        neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec)
        # nce loss
424 425 426
        label_ones = fluid.layers.fill_constant_batch_size_like(
            true_logits, shape=[-1, 1], value=1.0, dtype='float32'
        )
T
tangwei12 已提交
427
        label_zeros = fluid.layers.fill_constant_batch_size_like(
428 429
            true_logits, shape=[-1, neg_num], value=0.0, dtype='float32'
        )
T
tangwei12 已提交
430

431
        true_xent = fluid.layers.sigmoid_cross_entropy_with_logits(
432 433
            true_logits, label_ones
        )
434
        neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits(
435 436
            neg_logits, label_zeros
        )
T
tangwei12 已提交
437
        cost = fluid.layers.elementwise_add(
438
            fluid.layers.reduce_sum(true_xent, dim=1),
439 440
            fluid.layers.reduce_sum(neg_xent, dim=1),
        )
T
tangwei12 已提交
441 442 443
        avg_cost = fluid.layers.reduce_mean(cost)

        sgd_optimizer = fluid.optimizer.SGD(
444 445 446 447 448 449 450
            learning_rate=fluid.layers.exponential_decay(
                learning_rate=1.0,
                decay_steps=2100,
                decay_rate=0.1,
                staircase=True,
            )
        )
T
tangwei12 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463
        sgd_optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        trainer, startup = self.get_trainer()

        fake_init_ops = []
        for op in startup.global_block().ops:
            if op.type == "fake_init":
                fake_init_ops.append(op)

        self.assertEqual(len(fake_init_ops), 3)


464 465 466
class TestDecayedAdagrad(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
467 468 469 470 471 472 473
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
474 475
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
476
        avg_cost = paddle.mean(cost)
477 478 479 480 481 482 483 484
        opt = fluid.optimizer.DecayedAdagrad(learning_rate=0.1)
        opt.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        trainer, _ = self.get_trainer()


485 486 487
class TestFtrl(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
488 489 490 491 492 493 494
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
495 496
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
497
        avg_cost = paddle.mean(cost)
498 499 500 501 502 503 504 505
        opt = fluid.optimizer.Ftrl(learning_rate=0.1)
        opt.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        trainer, _ = self.get_trainer()


W
Wu Yi 已提交
506 507 508
class TestLRDecayConditional(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
509 510 511 512 513 514 515
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
W
Wu Yi 已提交
516 517
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
518
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
519
        sgd_optimizer = fluid.optimizer.SGD(
520 521 522 523
            learning_rate=fluid.layers.piecewise_decay(
                [10000, 20000], [1.0, 0.5, 1.0]
            )
        )
W
Wu Yi 已提交
524 525
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
526
    def transpiler_test_impl(self):
W
Wu Yi 已提交
527
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
528
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
529 530 531 532

        serv_op = pserver.blocks[0].ops[0]
        sub_blocks = []
        optimize_blocks = []
G
gongweibao 已提交
533
        for b in serv_op.all_attrs()["optimize_blocks"]:
W
Wu Yi 已提交
534 535 536 537 538 539 540
            optimize_blocks.append(b.idx)
        for b in pserver.blocks:
            if b.idx not in optimize_blocks:
                sub_blocks.append(b.idx)

        self.assertEqual(len(pserver.blocks), 7)
        lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
        self.assertEqual(
            lr_decay_ops,
            [
                "increment",
                "cast",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "conditional_block",
            ],
        )
W
Wu Yi 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
        # test the condition blocks
        for b in sub_blocks:
            if b == 0:
                continue
            block = pserver.blocks[b]
            self.assertEqual([op.type for op in block.ops], ["assign"])


class TestL2Decay(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
577 578 579 580 581
            param_attr=fluid.ParamAttr(
                name='fc_w', regularizer=fluid.regularizer.L2Decay()
            ),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
W
Wu Yi 已提交
582 583
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
584
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
585
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
586 587 588 589 590 591

        def filter(param):
            return param.name == "fc_w"

        clip = fluid.clip.GradientClipByValue(0.1, need_clip=filter)
        sgd_optimizer.minimize(avg_cost, grad_clip=clip)
W
Wu Yi 已提交
592

Q
qiaolongfei 已提交
593
    def transpiler_test_impl(self):
W
Wu Yi 已提交
594
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
595
        trainer, _ = self.get_trainer()
W
Wu Yi 已提交
596 597

        self.assertEqual(len(pserver.blocks), 3)
598 599 600 601 602 603 604 605
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            ["sum", "scale", "clip", "sgd"],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[2].ops],
            ["sum", "scale", "clip", "scale", "sum", "sgd"],
        )
W
Wu Yi 已提交
606 607
        # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer

Y
Yancey 已提交
608

T
typhoonzero 已提交
609 610 611
class TestL2DecayWithPiecewise(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
612 613 614 615 616 617 618
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
T
typhoonzero 已提交
619 620
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
621
        avg_cost = paddle.mean(cost)
T
typhoonzero 已提交
622 623 624 625
        base_lr = 1.0
        bd = [1, 10, 20, 30]
        lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
        sgd_optimizer = fluid.optimizer.Momentum(
626 627 628
            learning_rate=fluid.layers.piecewise_decay(
                boundaries=bd, values=lr
            ),
T
typhoonzero 已提交
629
            momentum=0.9,
630 631
            regularization=fluid.regularizer.L2Decay(1e-4),
        )
T
typhoonzero 已提交
632 633
        sgd_optimizer.minimize(avg_cost)

Q
qiaolongfei 已提交
634
    def transpiler_test_impl(self):
T
typhoonzero 已提交
635
        pserver, startup = self.get_pserver(self.pserver1_ep)
G
gongweibao 已提交
636
        trainer, _ = self.get_trainer()
T
typhoonzero 已提交
637 638

        self.assertEqual(len(pserver.blocks), 9)
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            [
                "increment",
                "cast",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "fill_constant",
                "less_than",
                "logical_not",
                "logical_and",
                "logical_and",
                "conditional_block",
                "fill_constant",
                "conditional_block",
            ],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[7].ops],
            ["sum", "scale", "scale", "sum", "momentum"],
        )
        self.assertEqual(
            [op.type for op in pserver.blocks[8].ops],
            ["sum", "scale", "scale", "sum", "momentum"],
        )
Y
Yancey 已提交
682 683


Q
Qiao Longfei 已提交
684 685 686 687
class TestEmptyPserverOptimizeBlocks(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
        # only one parameter
688 689 690 691 692 693 694
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=False,
        )
Q
Qiao Longfei 已提交
695 696
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
697
        avg_cost = paddle.mean(cost)
Q
Qiao Longfei 已提交
698 699 700 701 702 703 704 705 706 707 708 709 710
        sgd_optimizer = fluid.optimizer.SGD(learning_rate=1.0)
        sgd_optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()
        config.slice_var_up = False

        pserver, startup = self.get_pserver(ep=self.pserver2_ep, config=config)

        self.assertEqual(len(pserver.blocks), 2)
        self.assertEqual(len(pserver.blocks[1].ops), 0)


711
class TestDistLookupTableBase(TranspilerTest):
Q
Qiao Longfei 已提交
712
    def network_with_table(self, is_sparse, is_distributed):
T
tangwei12 已提交
713 714
        self.table_size = 1000
        self.emb_size = 64
T
tangwei12 已提交
715
        self.lookup_table_name = 'shared_w'
T
tangwei12 已提交
716

Q
Qiao Longfei 已提交
717
        def emb_pool(ids, table_name, is_distributed):
718 719 720 721 722 723 724 725
            emb = fluid.layers.embedding(
                input=ids,
                size=[self.table_size, self.emb_size],
                dtype='float32',
                param_attr=table_name,
                is_sparse=is_sparse,
                is_distributed=is_distributed,
            )
726 727 728
            pool = fluid.layers.sequence_pool(input=emb, pool_type='average')
            return pool

729 730 731 732 733 734 735 736 737
        title_ids = fluid.layers.data(
            name='title_ids', shape=[1], dtype='int64', lod_level=1
        )
        brand_ids = fluid.layers.data(
            name='brand_ids', shape=[1], dtype='int64', lod_level=1
        )
        profile_ids = fluid.layers.data(
            name='brand_ids', shape=[1], dtype='int64', lod_level=1
        )
Q
Qiao Longfei 已提交
738 739 740
        title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed)
        brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed)
        profile_emb = emb_pool(profile_ids, "profile_emb", False)
741 742 743 744 745 746 747 748 749 750
        fc0 = fluid.layers.concat(
            input=[title_emb, brand_emb, profile_emb], axis=1
        )
        predict = fluid.layers.fc(
            input=fc0,
            size=2,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
751 752 753

        label = fluid.layers.data(name='label', shape=[1], dtype='int64')
        cost = fluid.layers.cross_entropy(input=predict, label=label)
754
        avg_cost = paddle.mean(cost)
755 756 757 758
        optimizer = fluid.optimizer.Adam(learning_rate=0.003)
        optimizer.minimize(avg_cost)


Q
qiaolongfei 已提交
759 760 761 762 763 764 765
class TestLocalLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

766
        self.assertEqual(len(pserver1.blocks), 4)
Q
qiaolongfei 已提交
767 768
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
769 770 771 772
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
773 774
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
775 776 777 778
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
779

780 781
        # 3 optimize for table 2 adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
782 783 784 785
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
786

G
gongweibao 已提交
787
        trainer, _ = self.get_trainer()
Q
qiaolongfei 已提交
788 789
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
qiaolongfei 已提交
824 825 826 827
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


828 829 830 831 832 833 834
class TestDistLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

835
        self.assertEqual(len(pserver1.blocks), 6)
836 837
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
838 839 840 841
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
842
        # 4 prefetch -> lookup_sparse_table_read for data0
843 844 845 846
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
847
        # 2 optimize for table sgd
848 849 850
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops], ["sum", "sgd"]
        )
851
        # 3 prefetch -> lookup_sparse_table_read for data0
852 853 854 855
        self.assertEqual(
            [op.type for op in pserver1.blocks[4].ops],
            ["lookup_sparse_table_read"],
        )
Q
Qiao Longfei 已提交
856 857 858 859 860 861
        # 5 save table
        self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])

        trainer, trainer_startup = self.get_trainer()
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
            'split_ids',
            'prefetch',
            'merge_ids',
            'sequence_pool',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_ids',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
Qiao Longfei 已提交
897 898 899
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
        startup_ops = [
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'uniform_random',
            'recv',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
            'fake_init',
Q
Qiao Longfei 已提交
922
        ]
923 924 925
        self.assertEqual(
            [op.type for op in trainer_startup.blocks[0].ops], startup_ops
        )
Q
Qiao Longfei 已提交
926 927


Q
qiaolongfei 已提交
928 929 930 931 932 933
class TestAsyncLocalLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()
Q
qiaolongfei 已提交
934
        pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
Q
qiaolongfei 已提交
935

936
        self.assertEqual(len(pserver1.blocks), 4)
Q
qiaolongfei 已提交
937 938
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
939 940 941 942
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
943 944
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
945 946 947 948
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["adam", "scale", "scale"],
        )
949 950
        # 3 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
951 952 953 954
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["adam", "scale", "scale"],
        )
Q
qiaolongfei 已提交
955

G
gongweibao 已提交
956
        trainer, _ = self.get_trainer(config)
Q
qiaolongfei 已提交
957 958
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'recv',
            'recv',
Q
qiaolongfei 已提交
991 992 993 994
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


Q
qiaolongfei 已提交
995 996 997 998 999 1000 1001
class TestAsyncDistLookupTable(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()

Q
qiaolongfei 已提交
1002
        pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
Q
qiaolongfei 已提交
1003

1004
        self.assertEqual(len(pserver1.blocks), 6)
Q
qiaolongfei 已提交
1005 1006
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
1007 1008 1009 1010
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["adam", "scale", "scale"],
        )
1011
        # 2 optimize for table adam
1012 1013 1014 1015
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["adam", "scale", "scale"],
        )
1016 1017
        # 3 optimize for table sgd
        self.assertEqual([op.type for op in pserver1.blocks[3].ops], ["sgd"])
1018
        # 4 prefetch -> lookup_sparse_table_read for data0
1019 1020 1021 1022
        self.assertEqual(
            [op.type for op in pserver1.blocks[4].ops],
            ["lookup_sparse_table_read"],
        )
1023 1024
        # 5 save table
        self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
Q
qiaolongfei 已提交
1025

Q
Qiao Longfei 已提交
1026
        trainer, trainer_startup = self.get_trainer(config)
Q
qiaolongfei 已提交
1027 1028
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
            'split_ids',
            'prefetch',
            'merge_ids',
            'sequence_pool',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_ids',
            'send',
            'recv',
            'recv',
Q
Qiao Longfei 已提交
1062
        ]
Q
qiaolongfei 已提交
1063
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
Q
Qiao Longfei 已提交
1064
        startup_ops = [
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'fill_constant',
            'uniform_random',
            'uniform_random',
            'recv',
            'recv',
            'recv',
            'fetch_barrier',
            'concat',
            'fake_init',
Q
Qiao Longfei 已提交
1087
        ]
1088 1089 1090
        self.assertEqual(
            [op.type for op in trainer_startup.blocks[0].ops], startup_ops
        )
Q
qiaolongfei 已提交
1091 1092


T
tangwei12 已提交
1093
class TestDistLookupTableSliceSize(TestDistLookupTableBase):
T
tangwei12 已提交
1094 1095 1096 1097 1098
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        config = fluid.DistributeTranspilerConfig()
T
tangwei12 已提交
1099
        pserver1, _ = self.get_pserver(self.pserver1_ep, config)
T
tangwei12 已提交
1100 1101 1102

        self.assertTrue(self.transpiler.has_distributed_lookup_table)
        lookup_table_var = pserver1.global_block().vars[
1103 1104
            self.transpiler.table_name
        ]
T
tangwei12 已提交
1105 1106 1107
        row_size = lookup_table_var.shape[0]
        calc_row_size = int(math.ceil(self.table_size / self.pservers))
        self.assertEqual(row_size, calc_row_size)
T
tangwei12 已提交
1108 1109


T
tangwei12 已提交
1110 1111 1112 1113 1114 1115 1116 1117 1118
class TestDistArgsInProgram(TestDistLookupTableBase):
    def net_conf(self):
        self.network_with_table(is_sparse=True, is_distributed=True)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()

        self.assertTrue(trainer._is_distributed)
        self.assertTrue(trainer._is_chief)
1119 1120 1121 1122 1123 1124
        self.assertEqual(
            trainer._distributed_lookup_table, self.lookup_table_name
        )
        self.assertEqual(
            trainer._endpoints, [self.pserver1_ep, self.pserver2_ep]
        )
T
tangwei12 已提交
1125 1126


W
Wu Yi 已提交
1127 1128 1129
class TestRMSPropOptimizer(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
1130 1131 1132 1133 1134 1135 1136
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
W
Wu Yi 已提交
1137 1138
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
1139
        avg_cost = paddle.mean(cost)
W
Wu Yi 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148
        optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
        optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, startup = self.get_pserver(self.pserver1_ep)
        pserver2, startup2 = self.get_pserver(self.pserver2_ep)

        self.assertEqual(len(pserver.blocks), 3)
        # block1~2: optimize pass
1149 1150 1151 1152
        self.assertEqual(
            [op.type for op in pserver.blocks[1].ops],
            ["sum", "scale", "rmsprop"],
        )
W
Wu Yi 已提交
1153 1154 1155 1156 1157 1158 1159
        # the variable #fc_w will be split into two blocks
        fc_w_var = startup.global_block().var("fc_w.block1")
        self.assertEqual(fc_w_var.shape, (500, 1000))
        moment_var = startup.global_block().var("momentum_1")
        self.assertEqual(moment_var.shape, (500, 1000))


T
tangwei12 已提交
1160 1161 1162
class TestLoadSliceVar(TranspilerTest):
    def net_conf(self):
        x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
1163 1164 1165 1166 1167 1168 1169
        y_predict = fluid.layers.fc(
            input=x,
            size=1000,
            act=None,
            param_attr=fluid.ParamAttr(name='fc_w'),
            bias_attr=fluid.ParamAttr(name='fc_b'),
        )
T
tangwei12 已提交
1170 1171
        y = fluid.layers.data(name='y', shape=[1], dtype='float32')
        cost = fluid.layers.square_error_cost(input=y_predict, label=y)
1172
        avg_cost = paddle.mean(cost)
T
tangwei12 已提交
1173 1174 1175 1176 1177 1178 1179
        optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
        optimizer.minimize(avg_cost)

    def transpiler_test_impl(self):
        pserver, _ = self.get_pserver(self.pserver1_ep)
        pserver2, _ = self.get_pserver(self.pserver2_ep)

1180
        vars_ps1 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
1181 1182
            self.pserver1_ep
        )
1183
        vars_ps2 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
1184 1185
            self.pserver2_ep
        )
1186 1187 1188 1189

        self.assertTrue(vars_ps1)
        self.assertTrue(vars_ps2)

1190
        for idx in range(len(vars_ps1)):
1191 1192 1193 1194 1195 1196
            total_numel = 0
            ps1_numel, ps2_numel = 0, 0

            ps1_var = vars_ps1[idx]

            if not ps1_var.is_slice:
1197 1198 1199 1200 1201 1202
                total_numel = functools.reduce(
                    lambda x, y: x * y, vars_ps1[idx].origin.shape
                )
                ps1_numel = functools.reduce(
                    lambda x, y: x * y, vars_ps1[idx].slice.shape
                )
1203 1204 1205 1206 1207 1208 1209
            else:
                ps2_var = None
                for var in vars_ps2:
                    if var.origin.name == ps1_var.origin.name:
                        ps2_var = var
                        break

1210 1211 1212 1213 1214 1215 1216 1217 1218
                total_numel = functools.reduce(
                    lambda x, y: x * y, ps1_var.origin.shape
                )
                ps1_numel = functools.reduce(
                    lambda x, y: x * y, ps1_var.slice.shape
                )
                ps2_numel = functools.reduce(
                    lambda x, y: x * y, ps2_var.slice.shape
                )
1219 1220

            self.assertEqual(total_numel, ps1_numel + ps2_numel)
T
tangwei12 已提交
1221 1222


W
Wu Yi 已提交
1223 1224
class TestNCCL2Transpile(TranspilerTest):
    def test_nccl2_transpile(self):
T
tangwei12 已提交
1225
        if fluid.core.is_compiled_with_cuda():  # test nccl2 only with cuda
J
JiabinYang 已提交
1226 1227 1228 1229 1230 1231 1232
            main = fluid.Program()
            startup = fluid.Program()
            with fluid.program_guard(main, startup):
                self.net_conf()

            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
W
Wu Yi 已提交
1233
            config.wait_port = False
J
JiabinYang 已提交
1234
            t = fluid.DistributeTranspiler(config=config)
1235 1236 1237 1238 1239 1240
            t.transpile(
                0,
                trainers="127.0.0.1:6174,127.0.0.1:6175",
                current_endpoint="127.0.0.1:6174",
                startup_program=startup,
            )
J
JiabinYang 已提交
1241 1242 1243
            print([op.type for op in startup.global_block().ops])
            self.assertEqual(startup.global_block().ops[-1].type, "gen_nccl_id")
            self.assertIsNotNone(startup.global_block().vars.get("NCCLID"))
1244
            gc.collect()
J
JiabinYang 已提交
1245 1246
        else:
            pass
W
Wu Yi 已提交
1247 1248


Q
Qiao Longfei 已提交
1249 1250 1251
# test for remote prefetch
class TestRemoteLookupTable(TestDistLookupTableBase):
    def net_conf(self):
1252
        import os
1253

1254
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
Q
Qiao Longfei 已提交
1255
        self.network_with_table(is_sparse=True, is_distributed=False)
Q
Qiao Longfei 已提交
1256 1257 1258 1259 1260 1261 1262

    def transpiler_test_impl(self):
        pserver1, startup1 = self.get_pserver(self.pserver1_ep)

        self.assertEqual(len(pserver1.blocks), 4)
        # 0 listen_and_serv
        # 1 optimize for fc_w or fc_b adam
1263 1264 1265 1266
        self.assertEqual(
            [op.type for op in pserver1.blocks[1].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1267 1268
        # 2 optimize for table adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
1269 1270 1271 1272
        self.assertEqual(
            [op.type for op in pserver1.blocks[2].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1273 1274 1275

        # 3 optimize for table 2 adam
        # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
1276 1277 1278 1279
        self.assertEqual(
            [op.type for op in pserver1.blocks[3].ops],
            ["sum", "scale", "adam", "scale", "scale"],
        )
Q
Qiao Longfei 已提交
1280 1281 1282 1283

        trainer, _ = self.get_trainer()
        self.assertEqual(len(trainer.blocks), 1)
        ops = [
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'lookup_table',
            'sequence_pool',
            'concat',
            'mul',
            'elementwise_add',
            'cross_entropy2',
            'mean',
            'fill_constant',
            'mean_grad',
            'cross_entropy_grad2',
            'elementwise_add_grad',
            'send',
            'mul_grad',
            'send',
            'concat_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'split_selected_rows',
            'send',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sequence_pool_grad',
            'lookup_table_grad',
            'sum',
            'split_selected_rows',
            'send',
            'send_barrier',
            'recv',
            'recv',
            'fetch_barrier',
Q
Qiao Longfei 已提交
1318 1319 1320 1321
        ]
        self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)


1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
# test for remote prefetch
class TestRemoteNce(TestDistLookupTableBase):
    def network_with_table(self, is_sparse, is_distributed):

        num_total_classes = 20
        sampler = "uniform"
        nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')

        input = fluid.layers.data(name="input", shape=[10], dtype="float32")
        label = fluid.layers.data(name="label", shape=[1], dtype="int64")

1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='nce_w',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 1],
                dtype='float32',
                name='nce_b',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )

        cost = fluid.layers.nce(
            input=input,
            label=label,
            num_total_classes=num_total_classes,
            sampler=sampler,
            custom_dist=nid_freq_arr.tolist(),
            sample_weight=None,
            param_attr='nce_w',
            bias_attr='nce_b',
            seed=1,
            num_neg_samples=5,
            is_sparse=is_sparse,
        )
1367
        avg_cost = paddle.mean(cost)
1368 1369 1370 1371 1372 1373
        # optimizer
        optimizer = fluid.optimizer.Adam(learning_rate=0.003)
        optimizer.minimize(avg_cost)

    def net_conf(self):
        import os
1374

1375 1376 1377 1378 1379
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()
T
tangwei12 已提交
1380

1381 1382
        out_vars = ["nce_w"]
        in_vars = ["nce_b"]
T
tangwei12 已提交
1383 1384 1385

        recv_var_names = []

1386 1387
        for op in trainer.blocks[0].ops:
            if op.type == "recv":
T
tangwei12 已提交
1388 1389 1390 1391 1392 1393 1394
                for var in op.output("Out"):
                    recv_var_names.append(var)

        for out_var in out_vars:
            self.assertFalse(out_var in recv_var_names)
        for in_var in in_vars:
            self.assertTrue(in_var in recv_var_names)
1395 1396


J
JiabinYang 已提交
1397 1398 1399 1400
# test for remote prefetch
class TestRemoteHsigmoid(TestDistLookupTableBase):
    def network_with_table(self, is_sparse, is_distributed):

1401
        num_total_classes = 3
J
JiabinYang 已提交
1402

1403
        input = fluid.layers.data(name="input", shape=[1], dtype="float32")
J
JiabinYang 已提交
1404
        label = fluid.layers.data(name="label", shape=[1], dtype="int64")
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
        path_table = fluid.layers.data(
            name='path_table', shape=[3], dtype='int64'
        )
        path_code = fluid.layers.data(
            name='path_code', shape=[3], dtype='int64'
        )
        w_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[num_total_classes, 10],
                dtype='float32',
                name='hs_w',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )
        b_param = (
            fluid.default_main_program()
            .global_block()
            .create_parameter(
                shape=[3, 1],
                dtype='float32',
                name='hs_b',
                initializer=fluid.initializer.ConstantInitializer(),
            )
        )
J
JiabinYang 已提交
1431

1432
        emb = fluid.layers.embedding(
J
JiabinYang 已提交
1433
            input=input,
1434 1435
            is_sparse=is_sparse,
            size=[3, 3],
1436 1437 1438 1439 1440 1441 1442
            param_attr=fluid.ParamAttr(
                initializer=fluid.initializer.Normal(
                    scale=1 / math.sqrt(num_total_classes)
                )
            ),
        )

1443 1444 1445 1446 1447 1448 1449 1450
        loss = paddle.nn.HSigmoidLoss(
            feature_size=emb.shape[1],
            num_classes=num_total_classes,
            is_custom=True,
            is_sparse=is_sparse,
        )

        cost = loss(
1451 1452 1453 1454 1455
            input=emb,
            label=label,
            path_table=path_table,
            path_code=path_code,
        )
1456
        avg_cost = paddle.mean(cost)
J
JiabinYang 已提交
1457 1458 1459 1460 1461 1462
        # optimizer
        optimizer = fluid.optimizer.SGD(learning_rate=0.003)
        optimizer.minimize(avg_cost)

    def net_conf(self):
        import os
1463

J
JiabinYang 已提交
1464 1465 1466 1467 1468
        os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
        self.network_with_table(is_sparse=True, is_distributed=False)

    def transpiler_test_impl(self):
        trainer, _ = self.get_trainer()
1469
        params_to_check = list()
J
JiabinYang 已提交
1470
        for op in trainer.blocks[0].ops:
1471 1472 1473 1474 1475
            if op.type == "hierarchical_sigmoid":
                params_to_check = [op.input("W")[0], op.input("Bias")[0]]
                for name in ["epmap", "table_names", "epmap"]:
                    assert op.has_attr(name)
                    if name == "epmap":
1476
                        assert op.attr(name)[0] == '127.0.0.1:6174'
1477
                    elif name == "table_names":
1478
                        assert op.attr(name)[0] == 'hierarchical_sigmoid_0.w_0'
1479 1480 1481 1482 1483
                    else:
                        assert op.attr(name) == 3
            elif op.type == "lookup_table":
                params_to_check.append(op.input("W")[0])
            else:
J
JiabinYang 已提交
1484
                pass
1485 1486 1487 1488
        op_count = 0
        for op in trainer.blocks[0].ops:
            if op.type == "recv":
                assert len(op.output("Out")) == 1
1489
                assert op.output("Out")[0] == 'hierarchical_sigmoid_0.b_0'
1490 1491
                op_count += 1
        assert op_count == 1
J
JiabinYang 已提交
1492 1493


Y
Yancey 已提交
1494 1495
if __name__ == "__main__":
    unittest.main()