test_dist_base.py 45.7 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17
import time

18
import ast
X
Xin Pan 已提交
19 20 21 22 23
import unittest
import os
import sys
import signal
import subprocess
24
import six
W
Wu Yi 已提交
25
import argparse
W
Wu Yi 已提交
26
import pickle
27
import random
W
Wu Yi 已提交
28
import numpy as np
29
import time
30 31

import paddle
32
import paddle.fluid as fluid
33
from paddle.fluid import compiler
34 35 36
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
37

38 39 40
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
41
RUN_STEP = 5
42
DEFAULT_BATCH_SIZE = 2
43
DIST_UT_PORT = 0
44

T
typhoonzero 已提交
45

46 47 48 49 50 51 52 53
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
54 55
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
56
    if six.PY2:
57
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
58
    else:
59
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
60 61


62 63 64 65
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
66
class TestDistRunnerBase(object):
W
Wu Yi 已提交
67 68 69
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
70 71
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
72 73 74
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

75
    @staticmethod
W
Wu Yi 已提交
76 77 78 79 80
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
81
                       dc_asgd=False,
82
                       current_endpoint=None,
T
tangwei12 已提交
83 84
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
85
        # NOTE: import fluid until runtime, or else forking processes will cause error.
86
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
87
        config.enable_dc_asgd = dc_asgd
88
        config.sync_mode = sync_mode
T
tangwei12 已提交
89 90
        config.runtime_split_send_recv = hogwild_mode

91 92
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
93
        # config.runtime_split_send_recv = True
94
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
95 96 97 98
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
99
            trainers=trainers,
T
tangwei12 已提交
100
            sync_mode=sync_mode,
101
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
102 103
        return t

W
Wu Yi 已提交
104
    def run_pserver(self, args):
W
Wu Yi 已提交
105
        self.lr = args.lr
106
        self.get_model(batch_size=args.batch_size)
107
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
108 109 110 111 112 113 114 115 116

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
117 118 119
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
120

T
typhoonzero 已提交
121 122 123
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
124
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
125
        exe.run(pserver_prog)
126
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
127

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
    def run_pipeline_trainer(self, args):
        self.lr = args.lr

        dist_strategy = DistributedStrategy()
        test_program, avg_cost, train_reader, test_reader, batch_acc, predict, data_loader = \
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        eprint(type(self).__name__, "device_id: %d." % device_id)
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        data_loader.set_sample_list_generator(train_reader, place)
        data_loader.start()
        print_to_err(type(self).__name__, "begin to train on trainer")
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss = exe.run(fluid.default_main_program(), fetch_list=[avg_cost])
            loss = loss[0] if loss else None
            out_losses.append(loss)
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

189 190 191 192 193 194 195 196 197 198
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
199
        dist_strategy.fuse_memory_size = 1  # MB
200
        dist_strategy.fuse_laryer_size = 1
201 202 203 204
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
205 206
        if args.sync_batch_norm:
            dist_strategy.sync_batch_norm = True
207 208 209

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
210
        print_to_err("gpu_fleet", "fleet.node_num:")
T
tangwei12 已提交
211 212
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
213 214

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
215
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

232 233 234 235 236 237 238
        eprint("feed_var_list:", feed_var_list)

        # tmp add this code to pass python35 gcc8 CI
        # Fixme(gongweibao, wangxi), need fix fleet api program order
        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

239 240 241 242 243 244 245 246 247 248 249 250 251 252
        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

253
        print_to_err(type(self).__name__, "begin to train on trainer")
254 255 256 257 258 259
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
260 261
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
262 263 264 265 266 267

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

298
    def run_trainer(self, args):
W
Wu Yi 已提交
299
        self.lr = args.lr
W
Wu Yi 已提交
300 301 302
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
303 304 305
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
306 307 308
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
309

W
Wu Yi 已提交
310
        if args.update_method == "pserver":
311
            print_to_err(
312 313
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
314 315 316 317 318 319 320 321 322
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
323
            trainer_prog = t.get_trainer_program()
324
            print_to_err(
325 326
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
327
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
328 329 330
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
331
            config.nccl_comm_num = args.nccl_comm_num
332 333 334
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
335
            print_to_err(
336 337
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
338 339 340 341 342 343 344
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
345
            print_to_err(
346 347
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
348
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
349
        else:
350
            print_to_err(
351 352
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
353
            trainer_prog = fluid.default_main_program()
354
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
355

356 357 358
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

359
        if args.use_cuda:
360 361
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
362 363 364
        else:
            place = fluid.CPUPlace()

365 366
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
367
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
368

W
Wu Yi 已提交
369 370
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
371

W
Wu Yi 已提交
372
        build_stra = fluid.BuildStrategy()
373 374 375
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
376

377 378 379 380
        if args.fuse_all_reduce is not None:
            sys.stderr.write('fuse_all_reduce={}'.format(args.fuse_all_reduce))
            build_stra.fuse_all_reduce_ops = args.fuse_all_reduce

T
tangwei12 已提交
381 382 383
        if args.hogwild:
            build_stra.async_mode = True

384 385 386
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
387 388 389 390 391
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
392
        pass_builder = None
X
Xin Pan 已提交
393
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
394
            pass_builder = build_stra._finalize_strategy_and_create_passes()
395
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
396
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
397

W
Wu Yi 已提交
398
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
399 400
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
401
        else:
W
Wu Yi 已提交
402
            # case args.update_method == "nccl2_reduce_layer":
403 404
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
405

406
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
407
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
408
            loss_name=avg_cost.name,
W
Wu Yi 已提交
409
            build_strategy=build_stra,
W
Wu Yi 已提交
410
            exec_strategy=exec_strategy)
411
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
412 413 414 415 416 417 418

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
419
        reader_generator = train_reader()
T
typhoonzero 已提交
420

421 422
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
423
            if args.update_method != "local" and args.use_reader_alloc:
424 425 426 427 428 429 430
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
431

432
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
433
        out_losses = []
434
        for i in six.moves.xrange(RUN_STEP):
435 436
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
437
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
438
            out_losses.append(loss[0])
439 440
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
441

442
        print_to_out(out_losses)
T
typhoonzero 已提交
443 444


445 446 447 448 449 450 451 452 453
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

454 455 456 457 458 459 460 461 462 463
    def _get_data(self, batch, args):
        if args.update_method != "local":
            new_batch = []
            for offset, item in enumerate(batch):
                if offset % 2 == args.trainer_id:
                    new_batch.append(item)
            return new_batch
        else:
            return batch

464
    def run_trainer(self, args):
Y
Yan Xu 已提交
465

466 467 468 469 470 471 472
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
473 474
            np.random.seed(seed)
            import random
475
            random.seed(seed)
476 477
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
478

479 480 481 482 483 484
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
485
                print_to_err(
486 487
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
488
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
489
                model = dygraph.parallel.DataParallel(model, strategy)
490
                print_to_err(type(self).__name__, "model built in dygraph")
491
            out_losses = []
492
            print_to_err(type(self).__name__, "begin to run dygraph training")
493
            for step_id, data in enumerate(train_reader()):
494
                data = self._get_data(data, args)
495 496 497
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
498
                if step_id % 10 == 0:
499
                    print_to_err(
500
                        type(self).__name__,
501
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
502
                out_losses.append(loss.numpy())
503 504 505 506 507

                loss.backward()

                opt.minimize(loss)
                model.clear_gradients()
508
        print_to_out(out_losses)
509

510 511 512 513 514 515 516 517 518
    def run_trainer_with_spawn(self, args):
        # 1. enable dygraph
        paddle.disable_static()

        # 2. init seed
        seed = 90
        paddle.static.default_startup_program().random_seed = seed
        paddle.static.default_main_program().random_seed = seed
        np.random.seed(seed)
519
        random.seed(seed)
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
        # get trainer id
        args.trainer_id = paddle.distributed.get_rank()

        # 3. init parallel env
        if args.update_method == "nccl2":
            paddle.distributed.init_parallel_env()

        # 4. train model
        model, train_reader, opt = self.get_model()
        if args.update_method == "nccl2":
            model = paddle.DataParallel(model)

        out_losses = []
        for step_id, data in enumerate(train_reader()):
            data = self._get_data(data, args)
            if step_id == RUN_STEP:
                break
            loss = self.run_one_loop(model, opt, data)
            out_losses.append(loss.numpy())

            loss.backward()

            opt.minimize(loss)
            model.clear_gradients()
        return out_losses

546 547 548 549 550 551 552 553 554 555 556
    def run_gpu_fleet_api_trainer(self, args):
        import paddle.distributed.fleet as fleet
        import paddle.distributed.fleet.base.role_maker as role_maker
        # 1. enable dygraph
        paddle.disable_static()

        # 2. init seed
        seed = 90
        paddle.static.default_startup_program().random_seed = seed
        paddle.static.default_main_program().random_seed = seed
        np.random.seed(seed)
557
        random.seed(seed)
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
        # get trainer id
        args.trainer_id = paddle.distributed.get_rank()

        # 3. init parallel env
        if args.update_method == "nccl2":
            fleet.init(is_collective=True)

        # 4. train model
        model, train_reader, opt = self.get_model()
        if args.update_method == "nccl2":
            opt = fleet.distributed_optimizer(opt)
            model = fleet.distributed_model(model)

        out_losses = []
        for step_id, data in enumerate(train_reader()):
            data = self._get_data(data, args)
            if step_id == RUN_STEP:
                break
            loss = self.run_one_loop(model, opt, data)
            out_losses.append(loss.numpy())

            loss.backward()

            opt.step()
            opt.clear_grad()
        print_to_out(out_losses)

585

T
typhoonzero 已提交
586
def runtime_main(test_class):
W
Wu Yi 已提交
587 588 589 590
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
591 592 593 594
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
595
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
596 597
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
598
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
599 600
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
601
    parser.add_argument('--use_pipeline', action='store_true')
602
    parser.add_argument('--gpu_fleet_api', action='store_true')
603 604
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
605
    parser.add_argument(
606
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
607 608 609
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
610
    parser.add_argument('--use_cuda', action='store_true')
611
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
612
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
613
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
614
    parser.add_argument('--hogwild', action='store_true')
615
    parser.add_argument('--save_model', action='store_true')
616
    parser.add_argument(
W
Wu Yi 已提交
617
        '--use_reader_alloc', action='store_true', required=False)
618
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
619
    parser.add_argument('--lr', required=False, type=float, default=0.001)
620 621
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
622 623 624 625 626
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
627
    parser.add_argument('--sync_batch_norm', action='store_true')
628 629 630 631 632
    parser.add_argument(
        '--fuse_all_reduce',
        required=False,
        type=ast.literal_eval,
        default=None)
W
Wu Yi 已提交
633 634

    args = parser.parse_args()
T
typhoonzero 已提交
635 636

    model = test_class()
W
Wu Yi 已提交
637
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
638
        model.run_pserver(args)
639 640
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
641 642
    elif args.use_pipeline:
        model.run_pipeline_trainer(args)
T
typhoonzero 已提交
643
    else:
644
        model.run_trainer(args)
X
Xin Pan 已提交
645

M
minqiyang 已提交
646

M
minqiyang 已提交
647
import paddle.compat as cpt
Y
Yancey1989 已提交
648 649
import socket
from contextlib import closing
M
minqiyang 已提交
650

X
Xin Pan 已提交
651 652

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
653 654 655
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

656 657 658
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
659
            self._use_dgc = False
660 661 662 663 664 665 666
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
667 668 669 670
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
671

X
Xin Pan 已提交
672 673 674
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
675
        self._port_set = set()
M
minqiyang 已提交
676
        self._python_interp = sys.executable
W
Wu Yi 已提交
677
        self._sync_mode = True
T
tangwei12 已提交
678
        self._hogwild_mode = False
679
        self._enforce_place = None
W
Wu Yi 已提交
680
        self._use_reduce = False
W
Wu Yi 已提交
681
        self._dc_asgd = False  # must use with async mode
682
        self._use_reader_alloc = True
W
Wu Yi 已提交
683
        self._nccl2_mode = False
684
        self._pipeline_mode = False
685
        self._mp_mode = False
W
Wu Yi 已提交
686 687 688 689 690
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
691
        self._lr = 0.001
692
        self._use_dgc = False
693
        self._dygraph = False
694
        self._nccl_comm_num = 1
695
        self._enable_backward_deps = False
696
        self._gpu_fleet_api = False
697 698
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
699
        self._use_hallreduce = False
700
        self._save_model = False
701
        self._fuse_all_reduce = None
W
Wu Yi 已提交
702
        self._setup_config()
703 704 705 706 707 708 709 710 711 712 713 714 715 716

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
        else:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2

717
        self._after_setup_config()
X
Xin Pan 已提交
718

Y
Yancey1989 已提交
719
    def _find_free_port(self):
Y
Yancey1989 已提交
720 721 722 723
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
724
                print_to_err(
725
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
726 727 728 729 730 731 732
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
733

734 735 736 737 738
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
739
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
740 741 742 743 744 745 746 747
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
748
        ps0_cmd = ps_cmd % \
749 750
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
751
        ps1_cmd = ps_cmd % \
752 753
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
754 755 756 757

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
758

759 760
        print(ps0_cmd)
        print(ps1_cmd)
761 762
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
763

764
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
765
        ps0_proc = subprocess.Popen(
766 767 768 769
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
770
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
771
        ps1_proc = subprocess.Popen(
772 773 774 775
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
776

777
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
778

779 780 781 782 783
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
784
                   batch_merge_repeat=1,
785 786
                   log_name="",
                   gpus="0"):
G
gongweibao 已提交
787

788 789 790 791 792 793
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

794 795
        cmd += " %s --role trainer --update_method local --lr %f" % (model,
                                                                     self._lr)
796

797 798 799 800
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
801 802
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
803

804
        if self.__use_cuda:
805
            cmd += " --use_cuda"
W
Wu Yi 已提交
806
            env_local = {
807
                "CUDA_VISIBLE_DEVICES": gpus,
W
Wu Yi 已提交
808 809 810
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
811 812 813
        else:
            env_local = {'CPU_NUM': '1'}

814 815 816 817
        # not use dgc in single card
        if len(gpus) > 1 and self._use_dgc:
            cmd += " --use_dgc"

W
Wu Yi 已提交
818 819
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
820

821
        if check_error_log:
822
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
823
            local_proc = subprocess.Popen(
824
                cmd.split(" "),
G
gongweibao 已提交
825
                stdout=subprocess.PIPE,
826
                stderr=err_log,
W
Wu Yi 已提交
827
                env=env_local)
G
gongweibao 已提交
828 829
        else:
            local_proc = subprocess.Popen(
830
                cmd.split(" "),
G
gongweibao 已提交
831
                stdout=subprocess.PIPE,
832
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
833
                env=env_local)
G
gongweibao 已提交
834

835 836 837 838 839 840
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
841
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
842

W
Wu Yi 已提交
843
        return pickle.loads(local_out)
844

845
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
846
        # Run dist train to compare with local results
847 848
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
849

X
Xin Pan 已提交
850
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
851

852 853 854 855 856 857 858 859
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
860
        tr0_cmd = tr_cmd % \
861
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
862
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
863
        tr1_cmd = tr_cmd % \
864
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
865
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
866 867 868 869

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
870 871 872
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
873 874 875
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
876 877 878
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
879
        if self.__use_cuda:
880 881 882 883 884 885 886 887 888 889
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
890

W
Wu Yi 已提交
891 892
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
893 894
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
895

896
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
897
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
898
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
899
            stdout=subprocess.PIPE,
G
gongweibao 已提交
900
            stderr=tr0_pipe,
X
Xin Pan 已提交
901
            env=env0)
902
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
903
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
904
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
905
            stdout=subprocess.PIPE,
G
gongweibao 已提交
906
            stderr=tr1_pipe,
X
Xin Pan 已提交
907 908
            env=env1)

909 910 911 912 913 914 915 916 917 918 919 920
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

921 922
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
923

G
gongweibao 已提交
924
        # close trainer file
925 926 927 928
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
929

W
Wu Yi 已提交
930 931
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
932

W
Wu Yi 已提交
933 934
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

935 936 937
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
938 939 940 941 942 943 944
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

945
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
946 947
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
948 949

        if self._use_reduce:
950
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
951
        if self._use_reader_alloc:
952
            tr_cmd += " --use_reader_alloc"
953 954
        if self._save_model:
            tr_cmd += " --save_model"
W
Wu Yi 已提交
955
        if self.__use_cuda:
956 957
            tr_cmd += " --use_cuda"
            env.update({
958
                "FLAGS_selected_gpus": "{}".format(0),
W
WangXi 已提交
959
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id),
960
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
961 962 963
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
964
            })
W
Wu Yi 已提交
965
        else:
966
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
967

968
        if self._use_dgc:
969 970
            tr_cmd += " --use_dgc"

971 972
        if self._pipeline_mode:
            tr_cmd += " --use_pipeline"
973
        if self._mp_mode:
W
WangXi 已提交
974
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id)}
975 976

        if self._nccl_comm_num > 1:
977
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
978

979 980
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
981

982
        if self._enable_backward_deps:
983
            tr_cmd += " --enable_backward_deps"
984

985 986 987
        if self._fuse_all_reduce is not None:
            tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce)

988 989
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
990 991 992 993
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
994 995
            if hasattr(self, '_sync_batch_norm') and self._sync_batch_norm:
                tr_cmd += " --sync_batch_norm"
996

997 998 999
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

1000
        return tr_cmd, env
W
Wu Yi 已提交
1001

1002
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
1003
                           check_error_log, log_name):
1004 1005
        if self._use_hallreduce:
            self._ps_endpoints = ""
1006 1007 1008

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
W
WangXi 已提交
1009
                # NOTE(wangxi). hallreduce test must use 4cards after nccl>=2.7
1010 1011 1012 1013 1014 1015 1016
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
                        self._find_free_port())
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
1017
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
1018

1019 1020 1021 1022 1023 1024
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
1025

1026
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
1027

1028 1029 1030 1031 1032 1033 1034 1035
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
1036

1037
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
1038

1039
            print_to_err(
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

1058 1059 1060
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
1061
        return pickle.loads(outs[0]), pickle.loads(outs[1])
1062

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
    def _run_pipeline(self, model, envs, check_error_log, log_name):
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        update_method = "nccl2"

        trainer_num = len(worker_endpoints)

        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            tr_env['CUDA_VISIBLE_DEVICES'] = "0,1"
            tr_env['NCCL_SHM_DISABLE'] = '1'
            tr_env['FLAGS_selected_gpus'] = str(i)
            tr_env['FLAGS_cudnn_deterministic'] = '0'
            print("tr_cmd:{}, env: {}".format(tr_cmd, tr_env))

            tr_pipe = open("/tmp/" + "tr{}_err.log".format(i), "wb")

            print_to_err(
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
        return pickle.loads(outs[0]), pickle.loads(outs[1])

1108
    def _get_required_envs(self, check_error_log=False, need_envs={}):
1109 1110 1111 1112 1113 1114
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
1115
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
1116
            "FLAGS_rpc_retry_bind_port": "50",
1117
            "FLAGS_cudnn_deterministic": "1",
1118
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
1119
            "http_proxy": "",
1120 1121
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
1122 1123 1124
        }

        if check_error_log:
1125
            required_envs["GLOG_vmodule"] = \
1126 1127
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
W
WangXi 已提交
1128 1129
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,gen_nccl_id_op_help=10,nccl_helper=10,grpc_client=10," \
                "grpc_server=10,request_handler_impl=10"
1130 1131
            required_envs["GLOG_logtostderr"] = "1"

1132 1133 1134 1135 1136 1137 1138 1139 1140
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
1141

1142 1143
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
1144
        local_losses \
1145
            = self._run_local(model_file, required_envs,
1146 1147
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
1148
        if self._nccl2_mode:
W
Wu Yi 已提交
1149 1150
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
1151 1152 1153 1154 1155
                    model_file,
                    required_envs,
                    True,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
1156 1157
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
1158 1159 1160 1161 1162
                    model_file,
                    required_envs,
                    False,
                    check_error_log,
                    log_name=log_name)
1163 1164 1165
        elif self._pipeline_mode:
            tr0_losses, tr1_losses = self._run_pipeline(
                model_file, required_envs, check_error_log, log_name=log_name)
W
Wu Yi 已提交
1166 1167
        else:
            tr0_losses, tr1_losses = self._run_cluster(
1168
                model_file, required_envs, check_error_log, log_name=log_name)
1169 1170

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
1171 1172 1173
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
1174 1175 1176 1177
            if self._pipeline_mode:
                dist_loss = np.array([tr1_loss])
            else:
                dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
1178 1179
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
1180 1181 1182 1183 1184 1185 1186

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
1187

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                gpus="0,1")

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                gpus="0,1")

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)