test_dist_base.py 34.5 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39

T
typhoonzero 已提交
40

41 42 43 44 45 46 47 48
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
49 50
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
51
    if six.PY2:
52
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
53
    else:
54
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
55 56


57 58 59 60
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
61
class TestDistRunnerBase(object):
W
Wu Yi 已提交
62 63 64
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
65 66
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
67 68 69
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

70
    @staticmethod
W
Wu Yi 已提交
71 72 73 74 75
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
76
                       dc_asgd=False,
77
                       current_endpoint=None,
T
tangwei12 已提交
78 79
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
80
        # NOTE: import fluid until runtime, or else forking processes will cause error.
81
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
82
        config.enable_dc_asgd = dc_asgd
83
        config.sync_mode = sync_mode
T
tangwei12 已提交
84 85
        config.runtime_split_send_recv = hogwild_mode

86 87
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
88
        # config.runtime_split_send_recv = True
89
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
90 91 92 93
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
94
            trainers=trainers,
T
tangwei12 已提交
95
            sync_mode=sync_mode,
96
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
97 98
        return t

W
Wu Yi 已提交
99
    def run_pserver(self, args):
W
Wu Yi 已提交
100
        self.lr = args.lr
101
        self.get_model(batch_size=args.batch_size)
102
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
103 104 105 106 107 108 109 110 111

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
112 113 114
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
115

T
typhoonzero 已提交
116 117 118
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
119
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
120
        exe.run(pserver_prog)
121
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
122

123 124 125 126 127 128 129 130 131 132
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
133
        dist_strategy.fuse_memory_size = 1  # MB
134
        dist_strategy.fuse_laryer_size = 1
135 136 137 138
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
139 140 141

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
142
        print_to_err("gpu_fleet", "fleet.node_num:")
T
tangwei12 已提交
143 144
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
145 146

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
147
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

178
        print_to_err(type(self).__name__, "begin to train on trainer")
179 180 181 182 183 184
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
185 186
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
187 188 189 190 191 192

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

193
    def run_trainer(self, args):
W
Wu Yi 已提交
194
        self.lr = args.lr
W
Wu Yi 已提交
195 196 197
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
198 199 200
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
201 202 203
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
204

W
Wu Yi 已提交
205
        if args.update_method == "pserver":
206
            print_to_err(
207 208
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
209 210 211 212 213 214 215 216 217
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
218
            trainer_prog = t.get_trainer_program()
219
            print_to_err(
220 221
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
222
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
223 224 225
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
226
            config.nccl_comm_num = args.nccl_comm_num
227 228 229
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
230
            print_to_err(
231 232
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
233 234 235 236 237 238 239
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
240
            print_to_err(
241 242
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
243
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
244
        else:
245
            print_to_err(
246 247
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
248
            trainer_prog = fluid.default_main_program()
249
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
250

251
        if args.use_cuda:
252 253
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
254 255 256
        else:
            place = fluid.CPUPlace()

257 258
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
259
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
260

W
Wu Yi 已提交
261 262
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
263

W
Wu Yi 已提交
264
        build_stra = fluid.BuildStrategy()
265 266 267
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
268

T
tangwei12 已提交
269 270 271
        if args.hogwild:
            build_stra.async_mode = True

272 273 274
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
275 276 277 278 279
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
280
        pass_builder = None
X
Xin Pan 已提交
281
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
282
            pass_builder = build_stra._finalize_strategy_and_create_passes()
283
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
284
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
285

W
Wu Yi 已提交
286
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
287 288
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
289
        else:
W
Wu Yi 已提交
290
            # case args.update_method == "nccl2_reduce_layer":
291 292
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
293

294 295 296 297
        if args.use_dgc:
            # fuse_all_reduce_ops require that gradients should not be sparse types
            build_stra.fuse_all_reduce_ops = False

298
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
299
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
300
            loss_name=avg_cost.name,
W
Wu Yi 已提交
301
            build_strategy=build_stra,
W
Wu Yi 已提交
302
            exec_strategy=exec_strategy)
303
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
304 305 306 307 308 309 310

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
311
        reader_generator = train_reader()
T
typhoonzero 已提交
312

313 314
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
315
            if args.update_method != "local" and args.use_reader_alloc:
316 317 318 319 320 321 322
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
323

324
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
325
        out_losses = []
326
        for i in six.moves.xrange(RUN_STEP):
327 328
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
329
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
330
            out_losses.append(loss[0])
331 332
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
333

334
        print_to_out(out_losses)
T
typhoonzero 已提交
335 336


337 338 339 340 341 342 343 344 345 346
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
347

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
365 366 367
            np.random.seed(seed)
            import random
            random.seed = seed
368 369
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
370

371 372 373 374 375 376
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
377
                print_to_err(
378 379
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
380
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
381
                model = dygraph.parallel.DataParallel(model, strategy)
382
                print_to_err(type(self).__name__, "model built in dygraph")
383
            out_losses = []
384
            print_to_err(type(self).__name__, "begin to run dygraph training")
385 386 387 388 389
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
390
                if step_id % 10 == 0:
391
                    print_to_err(
392
                        type(self).__name__,
393
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
394
                out_losses.append(loss.numpy())
395

Y
Yan Xu 已提交
396 397 398
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
399 400

                loss.backward()
Y
Yan Xu 已提交
401 402
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
403 404 405

                opt.minimize(loss)
                model.clear_gradients()
406
        print_to_out(out_losses)
407 408


T
typhoonzero 已提交
409
def runtime_main(test_class):
W
Wu Yi 已提交
410 411 412 413
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
414 415 416 417
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
418
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
419 420
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
421
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
422 423
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
424
    parser.add_argument('--gpu_fleet_api', action='store_true')
425 426
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
427
    parser.add_argument(
428
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
429 430 431
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
432
    parser.add_argument('--use_cuda', action='store_true')
433
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
434
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
435
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
436
    parser.add_argument('--hogwild', action='store_true')
437
    parser.add_argument(
W
Wu Yi 已提交
438
        '--use_reader_alloc', action='store_true', required=False)
439
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
440
    parser.add_argument('--lr', required=False, type=float, default=0.001)
441 442
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
443 444 445 446 447
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
448 449

    args = parser.parse_args()
T
typhoonzero 已提交
450 451

    model = test_class()
W
Wu Yi 已提交
452
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
453
        model.run_pserver(args)
454 455
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
456
    else:
457
        model.run_trainer(args)
X
Xin Pan 已提交
458

M
minqiyang 已提交
459

M
minqiyang 已提交
460
import paddle.compat as cpt
Y
Yancey1989 已提交
461 462
import socket
from contextlib import closing
M
minqiyang 已提交
463

X
Xin Pan 已提交
464 465

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
466 467 468
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

469 470 471
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
472
            self._use_dgc = False
473 474 475 476 477 478 479
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
480 481 482 483
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
484

X
Xin Pan 已提交
485 486 487
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
488
        self._port_set = set()
Y
Yancey1989 已提交
489 490
        self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
            self._find_free_port(), self._find_free_port())
M
minqiyang 已提交
491
        self._python_interp = sys.executable
W
Wu Yi 已提交
492
        self._sync_mode = True
T
tangwei12 已提交
493
        self._hogwild_mode = False
494
        self._enforce_place = None
W
Wu Yi 已提交
495
        self._use_reduce = False
W
Wu Yi 已提交
496
        self._dc_asgd = False  # must use with async mode
497
        self._use_reader_alloc = True
W
Wu Yi 已提交
498
        self._nccl2_mode = False
499
        self._mp_mode = False
W
Wu Yi 已提交
500 501 502 503 504
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
505
        self._lr = 0.001
506
        self._use_dgc = False
507
        self._dygraph = False
508
        self._nccl_comm_num = 1
509
        self._enable_backward_deps = False
510
        self._gpu_fleet_api = False
511 512
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
513
        self._use_hallreduce = False
W
Wu Yi 已提交
514
        self._setup_config()
515
        self._after_setup_config()
X
Xin Pan 已提交
516

Y
Yancey1989 已提交
517
    def _find_free_port(self):
Y
Yancey1989 已提交
518 519 520 521
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
522
                print_to_err(
523
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
524 525 526 527 528 529 530
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
531

532 533 534 535 536
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
537
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
538 539 540 541 542 543 544 545
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
546
        ps0_cmd = ps_cmd % \
547 548
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
549
        ps1_cmd = ps_cmd % \
550 551
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
552 553 554 555

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
556

557 558
        print(ps0_cmd)
        print(ps1_cmd)
559 560
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
561

562
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
563
        ps0_proc = subprocess.Popen(
564 565 566 567
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
568
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
569
        ps1_proc = subprocess.Popen(
570 571 572 573
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
574

575
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
576

577 578 579 580 581
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
582
                   batch_merge_repeat=1,
583 584
                   log_name="",
                   gpus="0"):
G
gongweibao 已提交
585

586 587 588 589 590 591 592 593
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

594 595 596 597
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
598 599
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
600

601
        if self.__use_cuda:
602
            cmd += " --use_cuda"
W
Wu Yi 已提交
603
            env_local = {
604
                "CUDA_VISIBLE_DEVICES": gpus,
W
Wu Yi 已提交
605 606 607
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
608 609 610
        else:
            env_local = {'CPU_NUM': '1'}

611 612 613 614
        # not use dgc in single card
        if len(gpus) > 1 and self._use_dgc:
            cmd += " --use_dgc"

W
Wu Yi 已提交
615 616
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
617

618
        if check_error_log:
619
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
620
            local_proc = subprocess.Popen(
621
                cmd.split(" "),
G
gongweibao 已提交
622
                stdout=subprocess.PIPE,
623
                stderr=err_log,
W
Wu Yi 已提交
624
                env=env_local)
G
gongweibao 已提交
625 626
        else:
            local_proc = subprocess.Popen(
627
                cmd.split(" "),
G
gongweibao 已提交
628
                stdout=subprocess.PIPE,
629
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
630
                env=env_local)
G
gongweibao 已提交
631

632 633 634 635 636 637
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
638
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
639

W
Wu Yi 已提交
640
        return pickle.loads(local_out)
641

642
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
643
        # Run dist train to compare with local results
644 645
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
646

X
Xin Pan 已提交
647
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
648

649 650 651 652 653 654 655 656
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
657
        tr0_cmd = tr_cmd % \
658
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
659
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
660
        tr1_cmd = tr_cmd % \
661
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
662
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
663 664 665 666

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
667 668 669
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
670 671 672
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
673 674 675
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
676
        if self.__use_cuda:
677 678 679 680 681 682 683 684 685 686
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
687

W
Wu Yi 已提交
688 689
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
690 691
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
692

693
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
694
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
695
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
696
            stdout=subprocess.PIPE,
G
gongweibao 已提交
697
            stderr=tr0_pipe,
X
Xin Pan 已提交
698
            env=env0)
699
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
700
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
701
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
702
            stdout=subprocess.PIPE,
G
gongweibao 已提交
703
            stderr=tr1_pipe,
X
Xin Pan 已提交
704 705
            env=env1)

706 707 708 709 710 711 712 713 714 715 716 717
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

718 719
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
720

G
gongweibao 已提交
721
        # close trainer file
722 723 724 725
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
726

W
Wu Yi 已提交
727 728
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
729

W
Wu Yi 已提交
730 731
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

732 733 734
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
735 736 737 738 739 740 741
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

742
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
743 744
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
745 746

        if self._use_reduce:
747
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
748
        if self._use_reader_alloc:
749
            tr_cmd += " --use_reader_alloc"
W
Wu Yi 已提交
750
        if self.__use_cuda:
751 752 753 754
            tr_cmd += " --use_cuda"
            env.update({
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id),
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
755 756 757
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
758
            })
W
Wu Yi 已提交
759
        else:
760
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
761

762
        if self._use_dgc:
763 764 765 766
            tr_cmd += " --use_dgc"

        if self._mp_mode:
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id)}
767 768

        if self._nccl_comm_num > 1:
769
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
770

771 772
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
773

774
        if self._enable_backward_deps:
775
            tr_cmd += " --enable_backward_deps"
776

777 778
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
779 780 781 782
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
783

784 785 786
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

787
        return tr_cmd, env
W
Wu Yi 已提交
788

789
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
790
                           check_error_log, log_name):
791 792 793 794 795
        if self._use_hallreduce:
            self._ps_endpoints = ""
            for i in range(0, 4):
                self._ps_endpoints += "127.0.0.1:%s," % (self._find_free_port())
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
796

797 798 799 800 801 802
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
803

804
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
805

806 807 808 809 810 811 812 813
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
814

815
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
816

817
            print_to_err(
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

836 837 838
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
839
        return pickle.loads(outs[0]), pickle.loads(outs[1])
840

841
    def _get_required_envs(self, check_error_log=False, need_envs={}):
842 843 844 845 846 847
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
848
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
849
            "FLAGS_rpc_retry_bind_port": "50",
850
            "FLAGS_cudnn_deterministic": "1",
851
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
852
            "http_proxy": "",
853 854
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
855 856 857
        }

        if check_error_log:
858
            required_envs["GLOG_vmodule"] = \
859 860
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
861
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10"
862 863
            required_envs["GLOG_logtostderr"] = "1"

864 865 866 867 868 869 870 871 872 873 874
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
875
        local_losses \
876
            = self._run_local(model_file, required_envs,
877 878
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
879
        if self._nccl2_mode:
W
Wu Yi 已提交
880 881
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
882 883 884 885 886
                    model_file,
                    required_envs,
                    True,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
887 888
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
889 890 891 892 893
                    model_file,
                    required_envs,
                    False,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
894 895
        else:
            tr0_losses, tr1_losses = self._run_cluster(
896
                model_file, required_envs, check_error_log, log_name=log_name)
897 898

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
899 900 901
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
902
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
903 904
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                gpus="0,1")

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                gpus="0,1")

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)