test_dist_base.py 37.7 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39
DIST_UT_PORT = 0
40

T
typhoonzero 已提交
41

42 43 44 45 46 47 48 49
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
50 51
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
52
    if six.PY2:
53
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
54
    else:
55
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
56 57


58 59 60 61
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
62
class TestDistRunnerBase(object):
W
Wu Yi 已提交
63 64 65
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
66 67
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
68 69 70
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

71
    @staticmethod
W
Wu Yi 已提交
72 73 74 75 76
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
77
                       dc_asgd=False,
78
                       current_endpoint=None,
T
tangwei12 已提交
79 80
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
81
        # NOTE: import fluid until runtime, or else forking processes will cause error.
82
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
83
        config.enable_dc_asgd = dc_asgd
84
        config.sync_mode = sync_mode
T
tangwei12 已提交
85 86
        config.runtime_split_send_recv = hogwild_mode

87 88
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
89
        # config.runtime_split_send_recv = True
90
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
91 92 93 94
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
95
            trainers=trainers,
T
tangwei12 已提交
96
            sync_mode=sync_mode,
97
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
98 99
        return t

W
Wu Yi 已提交
100
    def run_pserver(self, args):
W
Wu Yi 已提交
101
        self.lr = args.lr
102
        self.get_model(batch_size=args.batch_size)
103
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
104 105 106 107 108 109 110 111 112

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
113 114 115
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
116

T
typhoonzero 已提交
117 118 119
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
120
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
121
        exe.run(pserver_prog)
122
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
123

124 125 126 127 128 129 130 131 132 133
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
134
        dist_strategy.fuse_memory_size = 1  # MB
135
        dist_strategy.fuse_laryer_size = 1
136 137 138 139
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
140 141
        if args.sync_batch_norm:
            dist_strategy.sync_batch_norm = True
142 143 144

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
145
        print_to_err("gpu_fleet", "fleet.node_num:")
T
tangwei12 已提交
146 147
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
148 149

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
150
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

167 168 169 170 171 172 173
        eprint("feed_var_list:", feed_var_list)

        # tmp add this code to pass python35 gcc8 CI
        # Fixme(gongweibao, wangxi), need fix fleet api program order
        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

174 175 176 177 178 179 180 181 182 183 184 185 186 187
        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

188
        print_to_err(type(self).__name__, "begin to train on trainer")
189 190 191 192 193 194
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
195 196
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
197 198 199 200 201 202

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

233
    def run_trainer(self, args):
W
Wu Yi 已提交
234
        self.lr = args.lr
W
Wu Yi 已提交
235 236 237
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
238 239 240
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
241 242 243
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
244

W
Wu Yi 已提交
245
        if args.update_method == "pserver":
246
            print_to_err(
247 248
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
249 250 251 252 253 254 255 256 257
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
258
            trainer_prog = t.get_trainer_program()
259
            print_to_err(
260 261
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
262
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
263 264 265
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
266
            config.nccl_comm_num = args.nccl_comm_num
267 268 269
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
270
            print_to_err(
271 272
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
273 274 275 276 277 278 279
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
280
            print_to_err(
281 282
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
283
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
284
        else:
285
            print_to_err(
286 287
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
288
            trainer_prog = fluid.default_main_program()
289
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
290

291 292 293
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

294
        if args.use_cuda:
295 296
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
297 298 299
        else:
            place = fluid.CPUPlace()

300 301
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
302
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
303

W
Wu Yi 已提交
304 305
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
306

W
Wu Yi 已提交
307
        build_stra = fluid.BuildStrategy()
308 309 310
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
311

T
tangwei12 已提交
312 313 314
        if args.hogwild:
            build_stra.async_mode = True

315 316 317
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
318 319 320 321 322
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
323
        pass_builder = None
X
Xin Pan 已提交
324
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
325
            pass_builder = build_stra._finalize_strategy_and_create_passes()
326
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
327
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
328

W
Wu Yi 已提交
329
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
330 331
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
332
        else:
W
Wu Yi 已提交
333
            # case args.update_method == "nccl2_reduce_layer":
334 335
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
336

337
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
338
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
339
            loss_name=avg_cost.name,
W
Wu Yi 已提交
340
            build_strategy=build_stra,
W
Wu Yi 已提交
341
            exec_strategy=exec_strategy)
342
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
343 344 345 346 347 348 349

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
350
        reader_generator = train_reader()
T
typhoonzero 已提交
351

352 353
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
354
            if args.update_method != "local" and args.use_reader_alloc:
355 356 357 358 359 360 361
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
362

363
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
364
        out_losses = []
365
        for i in six.moves.xrange(RUN_STEP):
366 367
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
368
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
369
            out_losses.append(loss[0])
370 371
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
372

373
        print_to_out(out_losses)
T
typhoonzero 已提交
374 375


376 377 378 379 380 381 382 383 384 385
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
386

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
404 405 406
            np.random.seed(seed)
            import random
            random.seed = seed
407 408
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
409

410 411 412 413 414 415
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
416
                print_to_err(
417 418
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
419
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
420
                model = dygraph.parallel.DataParallel(model, strategy)
421
                print_to_err(type(self).__name__, "model built in dygraph")
422
            out_losses = []
423
            print_to_err(type(self).__name__, "begin to run dygraph training")
424 425 426 427 428
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
429
                if step_id % 10 == 0:
430
                    print_to_err(
431
                        type(self).__name__,
432
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
433
                out_losses.append(loss.numpy())
434

Y
Yan Xu 已提交
435 436 437
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
438 439

                loss.backward()
Y
Yan Xu 已提交
440 441
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
442 443 444

                opt.minimize(loss)
                model.clear_gradients()
445
        print_to_out(out_losses)
446 447


T
typhoonzero 已提交
448
def runtime_main(test_class):
W
Wu Yi 已提交
449 450 451 452
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
453 454 455 456
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
457
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
458 459
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
460
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
461 462
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
463
    parser.add_argument('--gpu_fleet_api', action='store_true')
464 465
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
466
    parser.add_argument(
467
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
468 469 470
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
471
    parser.add_argument('--use_cuda', action='store_true')
472
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
473
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
474
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
475
    parser.add_argument('--hogwild', action='store_true')
476
    parser.add_argument('--save_model', action='store_true')
477
    parser.add_argument(
W
Wu Yi 已提交
478
        '--use_reader_alloc', action='store_true', required=False)
479
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
480
    parser.add_argument('--lr', required=False, type=float, default=0.001)
481 482
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
483 484 485 486 487
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
488
    parser.add_argument('--sync_batch_norm', action='store_true')
W
Wu Yi 已提交
489 490

    args = parser.parse_args()
T
typhoonzero 已提交
491 492

    model = test_class()
W
Wu Yi 已提交
493
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
494
        model.run_pserver(args)
495 496
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
497
    else:
498
        model.run_trainer(args)
X
Xin Pan 已提交
499

M
minqiyang 已提交
500

M
minqiyang 已提交
501
import paddle.compat as cpt
Y
Yancey1989 已提交
502 503
import socket
from contextlib import closing
M
minqiyang 已提交
504

X
Xin Pan 已提交
505 506

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
507 508 509
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

510 511 512
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
513
            self._use_dgc = False
514 515 516 517 518 519 520
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
521 522 523 524
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
525

X
Xin Pan 已提交
526 527 528
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
529
        self._port_set = set()
M
minqiyang 已提交
530
        self._python_interp = sys.executable
W
Wu Yi 已提交
531
        self._sync_mode = True
T
tangwei12 已提交
532
        self._hogwild_mode = False
533
        self._enforce_place = None
W
Wu Yi 已提交
534
        self._use_reduce = False
W
Wu Yi 已提交
535
        self._dc_asgd = False  # must use with async mode
536
        self._use_reader_alloc = True
W
Wu Yi 已提交
537
        self._nccl2_mode = False
538
        self._mp_mode = False
W
Wu Yi 已提交
539 540 541 542 543
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
544
        self._lr = 0.001
545
        self._use_dgc = False
546
        self._dygraph = False
547
        self._nccl_comm_num = 1
548
        self._enable_backward_deps = False
549
        self._gpu_fleet_api = False
550 551
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
552
        self._use_hallreduce = False
553
        self._save_model = False
W
Wu Yi 已提交
554
        self._setup_config()
555 556 557 558 559 560 561 562 563 564 565 566 567 568

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
        else:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2

569
        self._after_setup_config()
X
Xin Pan 已提交
570

Y
Yancey1989 已提交
571
    def _find_free_port(self):
Y
Yancey1989 已提交
572 573 574 575
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
576
                print_to_err(
577
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
578 579 580 581 582 583 584
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
585

586 587 588 589 590
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
591
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
592 593 594 595 596 597 598 599
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
600
        ps0_cmd = ps_cmd % \
601 602
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
603
        ps1_cmd = ps_cmd % \
604 605
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
606 607 608 609

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
610

611 612
        print(ps0_cmd)
        print(ps1_cmd)
613 614
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
615

616
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
617
        ps0_proc = subprocess.Popen(
618 619 620 621
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
622
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
623
        ps1_proc = subprocess.Popen(
624 625 626 627
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
628

629
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
630

631 632 633 634 635
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
636
                   batch_merge_repeat=1,
637 638
                   log_name="",
                   gpus="0"):
G
gongweibao 已提交
639

640 641 642 643 644 645 646 647
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

648 649 650 651
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
652 653
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
654

655
        if self.__use_cuda:
656
            cmd += " --use_cuda"
W
Wu Yi 已提交
657
            env_local = {
658
                "CUDA_VISIBLE_DEVICES": gpus,
W
Wu Yi 已提交
659 660 661
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
662 663 664
        else:
            env_local = {'CPU_NUM': '1'}

665 666 667 668
        # not use dgc in single card
        if len(gpus) > 1 and self._use_dgc:
            cmd += " --use_dgc"

W
Wu Yi 已提交
669 670
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
671

672
        if check_error_log:
673
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
674
            local_proc = subprocess.Popen(
675
                cmd.split(" "),
G
gongweibao 已提交
676
                stdout=subprocess.PIPE,
677
                stderr=err_log,
W
Wu Yi 已提交
678
                env=env_local)
G
gongweibao 已提交
679 680
        else:
            local_proc = subprocess.Popen(
681
                cmd.split(" "),
G
gongweibao 已提交
682
                stdout=subprocess.PIPE,
683
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
684
                env=env_local)
G
gongweibao 已提交
685

686 687 688 689 690 691
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
692
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
693

W
Wu Yi 已提交
694
        return pickle.loads(local_out)
695

696
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
697
        # Run dist train to compare with local results
698 699
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
700

X
Xin Pan 已提交
701
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
702

703 704 705 706 707 708 709 710
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
711
        tr0_cmd = tr_cmd % \
712
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
713
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
714
        tr1_cmd = tr_cmd % \
715
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
716
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
717 718 719 720

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
721 722 723
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
724 725 726
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
727 728 729
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
730
        if self.__use_cuda:
731 732 733 734 735 736 737 738 739 740
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
741

W
Wu Yi 已提交
742 743
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
744 745
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
746

747
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
748
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
749
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
750
            stdout=subprocess.PIPE,
G
gongweibao 已提交
751
            stderr=tr0_pipe,
X
Xin Pan 已提交
752
            env=env0)
753
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
754
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
755
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
756
            stdout=subprocess.PIPE,
G
gongweibao 已提交
757
            stderr=tr1_pipe,
X
Xin Pan 已提交
758 759
            env=env1)

760 761 762 763 764 765 766 767 768 769 770 771
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

772 773
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
774

G
gongweibao 已提交
775
        # close trainer file
776 777 778 779
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
780

W
Wu Yi 已提交
781 782
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
783

W
Wu Yi 已提交
784 785
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

786 787 788
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
789 790 791 792 793 794 795
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

796
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
797 798
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
799 800

        if self._use_reduce:
801
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
802
        if self._use_reader_alloc:
803
            tr_cmd += " --use_reader_alloc"
804 805
        if self._save_model:
            tr_cmd += " --save_model"
W
Wu Yi 已提交
806
        if self.__use_cuda:
807 808
            tr_cmd += " --use_cuda"
            env.update({
809
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id % 2),
810
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
811 812 813
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
814
            })
W
Wu Yi 已提交
815
        else:
816
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
817

818
        if self._use_dgc:
819 820 821
            tr_cmd += " --use_dgc"

        if self._mp_mode:
822
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id % 2)}
823 824

        if self._nccl_comm_num > 1:
825
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
826

827 828
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
829

830
        if self._enable_backward_deps:
831
            tr_cmd += " --enable_backward_deps"
832

833 834
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
835 836 837 838
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
839 840
            if hasattr(self, '_sync_batch_norm') and self._sync_batch_norm:
                tr_cmd += " --sync_batch_norm"
841

842 843 844
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

845
        return tr_cmd, env
W
Wu Yi 已提交
846

847
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
848
                           check_error_log, log_name):
849 850
        if self._use_hallreduce:
            self._ps_endpoints = ""
851 852 853 854 855 856 857 858 859 860

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
                        self._find_free_port())
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
861
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
862

863 864 865 866 867 868
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
869

870
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
871

872 873 874 875 876 877 878 879
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
880

881
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
882

883
            print_to_err(
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

902 903 904
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
905
        return pickle.loads(outs[0]), pickle.loads(outs[1])
906

907
    def _get_required_envs(self, check_error_log=False, need_envs={}):
908 909 910 911 912 913
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
914
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
915
            "FLAGS_rpc_retry_bind_port": "50",
916
            "FLAGS_cudnn_deterministic": "1",
917
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
918
            "http_proxy": "",
919 920
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
921 922 923
        }

        if check_error_log:
924
            required_envs["GLOG_vmodule"] = \
925 926
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
927
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,nccl_helper=10,grpc_client=10,grpc_server=10,request_handler_impl=10"
928 929
            required_envs["GLOG_logtostderr"] = "1"

930 931 932 933 934 935 936 937 938
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
939

940 941
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
942
        local_losses \
943
            = self._run_local(model_file, required_envs,
944 945
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
946
        if self._nccl2_mode:
W
Wu Yi 已提交
947 948
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
949 950 951 952 953
                    model_file,
                    required_envs,
                    True,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
954 955
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
956 957 958 959 960
                    model_file,
                    required_envs,
                    False,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
961 962
        else:
            tr0_losses, tr1_losses = self._run_cluster(
963
                model_file, required_envs, check_error_log, log_name=log_name)
964 965

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
966 967 968
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
969
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
970 971
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
972 973 974 975 976 977 978

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
979

980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                gpus="0,1")

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                gpus="0,1")

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)