test_dist_base.py 37.6 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39
DIST_UT_PORT = 0
40

T
typhoonzero 已提交
41

42 43 44 45 46 47 48 49
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
50 51
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
52
    if six.PY2:
53
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
54
    else:
55
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
56 57


58 59 60 61
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
62
class TestDistRunnerBase(object):
W
Wu Yi 已提交
63 64 65
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
66 67
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
68 69 70
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

71
    @staticmethod
W
Wu Yi 已提交
72 73 74 75 76
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
77
                       dc_asgd=False,
78
                       current_endpoint=None,
T
tangwei12 已提交
79 80
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
81
        # NOTE: import fluid until runtime, or else forking processes will cause error.
82
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
83
        config.enable_dc_asgd = dc_asgd
84
        config.sync_mode = sync_mode
T
tangwei12 已提交
85 86
        config.runtime_split_send_recv = hogwild_mode

87 88
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
89
        # config.runtime_split_send_recv = True
90
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
91 92 93 94
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
95
            trainers=trainers,
T
tangwei12 已提交
96
            sync_mode=sync_mode,
97
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
98 99
        return t

W
Wu Yi 已提交
100
    def run_pserver(self, args):
W
Wu Yi 已提交
101
        self.lr = args.lr
102
        self.get_model(batch_size=args.batch_size)
103
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
104 105 106 107 108 109 110 111 112

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
113 114 115
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
116

T
typhoonzero 已提交
117 118 119
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
120
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
121
        exe.run(pserver_prog)
122
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
123

124 125 126 127 128 129 130 131 132 133
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
134
        dist_strategy.fuse_memory_size = 1  # MB
135
        dist_strategy.fuse_laryer_size = 1
136 137 138 139
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
140 141 142

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
143
        print_to_err("gpu_fleet", "fleet.node_num:")
T
tangwei12 已提交
144 145
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
146 147

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
148
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

165 166 167 168 169 170 171
        eprint("feed_var_list:", feed_var_list)

        # tmp add this code to pass python35 gcc8 CI
        # Fixme(gongweibao, wangxi), need fix fleet api program order
        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

172 173 174 175 176 177 178 179 180 181 182 183 184 185
        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

186
        print_to_err(type(self).__name__, "begin to train on trainer")
187 188 189 190 191 192
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
193 194
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
195 196 197 198 199 200

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

231
    def run_trainer(self, args):
W
Wu Yi 已提交
232
        self.lr = args.lr
W
Wu Yi 已提交
233 234 235
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
236 237 238
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
239 240 241
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
242

W
Wu Yi 已提交
243
        if args.update_method == "pserver":
244
            print_to_err(
245 246
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
247 248 249 250 251 252 253 254 255
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
256
            trainer_prog = t.get_trainer_program()
257
            print_to_err(
258 259
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
260
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
261 262 263
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
264
            config.nccl_comm_num = args.nccl_comm_num
265 266 267
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
268
            print_to_err(
269 270
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
271 272 273 274 275 276 277
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
278
            print_to_err(
279 280
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
281
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
282
        else:
283
            print_to_err(
284 285
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
286
            trainer_prog = fluid.default_main_program()
287
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
288

289 290 291
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

292
        if args.use_cuda:
293 294
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
295 296 297
        else:
            place = fluid.CPUPlace()

298 299
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
300
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
301

W
Wu Yi 已提交
302 303
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
304

W
Wu Yi 已提交
305
        build_stra = fluid.BuildStrategy()
306 307 308
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
309

T
tangwei12 已提交
310 311 312
        if args.hogwild:
            build_stra.async_mode = True

313 314 315
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
316 317 318 319 320
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
321
        pass_builder = None
X
Xin Pan 已提交
322
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
323
            pass_builder = build_stra._finalize_strategy_and_create_passes()
324
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
325
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
326

W
Wu Yi 已提交
327
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
328 329
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
330
        else:
W
Wu Yi 已提交
331
            # case args.update_method == "nccl2_reduce_layer":
332 333
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
334

335 336 337 338
        if args.use_dgc:
            # fuse_all_reduce_ops require that gradients should not be sparse types
            build_stra.fuse_all_reduce_ops = False

339
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
340
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
341
            loss_name=avg_cost.name,
W
Wu Yi 已提交
342
            build_strategy=build_stra,
W
Wu Yi 已提交
343
            exec_strategy=exec_strategy)
344
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
345 346 347 348 349 350 351

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
352
        reader_generator = train_reader()
T
typhoonzero 已提交
353

354 355
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
356
            if args.update_method != "local" and args.use_reader_alloc:
357 358 359 360 361 362 363
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
364

365
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
366
        out_losses = []
367
        for i in six.moves.xrange(RUN_STEP):
368 369
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
370
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
371
            out_losses.append(loss[0])
372 373
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
374

375
        print_to_out(out_losses)
T
typhoonzero 已提交
376 377


378 379 380 381 382 383 384 385 386 387
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
388

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
406 407 408
            np.random.seed(seed)
            import random
            random.seed = seed
409 410
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
411

412 413 414 415 416 417
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
418
                print_to_err(
419 420
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
421
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
422
                model = dygraph.parallel.DataParallel(model, strategy)
423
                print_to_err(type(self).__name__, "model built in dygraph")
424
            out_losses = []
425
            print_to_err(type(self).__name__, "begin to run dygraph training")
426 427 428 429 430
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
431
                if step_id % 10 == 0:
432
                    print_to_err(
433
                        type(self).__name__,
434
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
435
                out_losses.append(loss.numpy())
436

Y
Yan Xu 已提交
437 438 439
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
440 441

                loss.backward()
Y
Yan Xu 已提交
442 443
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
444 445 446

                opt.minimize(loss)
                model.clear_gradients()
447
        print_to_out(out_losses)
448 449


T
typhoonzero 已提交
450
def runtime_main(test_class):
W
Wu Yi 已提交
451 452 453 454
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
455 456 457 458
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
459
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
460 461
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
462
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
463 464
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
465
    parser.add_argument('--gpu_fleet_api', action='store_true')
466 467
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
468
    parser.add_argument(
469
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
470 471 472
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
473
    parser.add_argument('--use_cuda', action='store_true')
474
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
475
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
476
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
477
    parser.add_argument('--hogwild', action='store_true')
478
    parser.add_argument('--save_model', action='store_true')
479
    parser.add_argument(
W
Wu Yi 已提交
480
        '--use_reader_alloc', action='store_true', required=False)
481
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
482
    parser.add_argument('--lr', required=False, type=float, default=0.001)
483 484
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
485 486 487 488 489
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
490 491

    args = parser.parse_args()
T
typhoonzero 已提交
492 493

    model = test_class()
W
Wu Yi 已提交
494
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
495
        model.run_pserver(args)
496 497
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
498
    else:
499
        model.run_trainer(args)
X
Xin Pan 已提交
500

M
minqiyang 已提交
501

M
minqiyang 已提交
502
import paddle.compat as cpt
Y
Yancey1989 已提交
503 504
import socket
from contextlib import closing
M
minqiyang 已提交
505

X
Xin Pan 已提交
506 507

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
508 509 510
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

511 512 513
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
514
            self._use_dgc = False
515 516 517 518 519 520 521
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
522 523 524 525
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
526

X
Xin Pan 已提交
527 528 529
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
530
        self._port_set = set()
M
minqiyang 已提交
531
        self._python_interp = sys.executable
W
Wu Yi 已提交
532
        self._sync_mode = True
T
tangwei12 已提交
533
        self._hogwild_mode = False
534
        self._enforce_place = None
W
Wu Yi 已提交
535
        self._use_reduce = False
W
Wu Yi 已提交
536
        self._dc_asgd = False  # must use with async mode
537
        self._use_reader_alloc = True
W
Wu Yi 已提交
538
        self._nccl2_mode = False
539
        self._mp_mode = False
W
Wu Yi 已提交
540 541 542 543 544
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
545
        self._lr = 0.001
546
        self._use_dgc = False
547
        self._dygraph = False
548
        self._nccl_comm_num = 1
549
        self._enable_backward_deps = False
550
        self._gpu_fleet_api = False
551 552
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
553
        self._use_hallreduce = False
554
        self._save_model = False
W
Wu Yi 已提交
555
        self._setup_config()
556 557 558 559 560 561 562 563 564 565 566 567 568 569

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
        else:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2

570
        self._after_setup_config()
X
Xin Pan 已提交
571

Y
Yancey1989 已提交
572
    def _find_free_port(self):
Y
Yancey1989 已提交
573 574 575 576
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
577
                print_to_err(
578
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
579 580 581 582 583 584 585
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
586

587 588 589 590 591
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
592
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
593 594 595 596 597 598 599 600
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
601
        ps0_cmd = ps_cmd % \
602 603
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
604
        ps1_cmd = ps_cmd % \
605 606
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
607 608 609 610

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
611

612 613
        print(ps0_cmd)
        print(ps1_cmd)
614 615
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
616

617
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
618
        ps0_proc = subprocess.Popen(
619 620 621 622
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
623
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
624
        ps1_proc = subprocess.Popen(
625 626 627 628
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
629

630
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
631

632 633 634 635 636
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
637
                   batch_merge_repeat=1,
638 639
                   log_name="",
                   gpus="0"):
G
gongweibao 已提交
640

641 642 643 644 645 646 647 648
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

649 650 651 652
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
653 654
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
655

656
        if self.__use_cuda:
657
            cmd += " --use_cuda"
W
Wu Yi 已提交
658
            env_local = {
659
                "CUDA_VISIBLE_DEVICES": gpus,
W
Wu Yi 已提交
660 661 662
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
663 664 665
        else:
            env_local = {'CPU_NUM': '1'}

666 667 668 669
        # not use dgc in single card
        if len(gpus) > 1 and self._use_dgc:
            cmd += " --use_dgc"

W
Wu Yi 已提交
670 671
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
672

673
        if check_error_log:
674
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
675
            local_proc = subprocess.Popen(
676
                cmd.split(" "),
G
gongweibao 已提交
677
                stdout=subprocess.PIPE,
678
                stderr=err_log,
W
Wu Yi 已提交
679
                env=env_local)
G
gongweibao 已提交
680 681
        else:
            local_proc = subprocess.Popen(
682
                cmd.split(" "),
G
gongweibao 已提交
683
                stdout=subprocess.PIPE,
684
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
685
                env=env_local)
G
gongweibao 已提交
686

687 688 689 690 691 692
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
693
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
694

W
Wu Yi 已提交
695
        return pickle.loads(local_out)
696

697
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
698
        # Run dist train to compare with local results
699 700
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
701

X
Xin Pan 已提交
702
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
703

704 705 706 707 708 709 710 711
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
712
        tr0_cmd = tr_cmd % \
713
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
714
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
715
        tr1_cmd = tr_cmd % \
716
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
717
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
718 719 720 721

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
722 723 724
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
725 726 727
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
728 729 730
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
731
        if self.__use_cuda:
732 733 734 735 736 737 738 739 740 741
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
742

W
Wu Yi 已提交
743 744
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
745 746
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
747

748
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
749
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
750
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
751
            stdout=subprocess.PIPE,
G
gongweibao 已提交
752
            stderr=tr0_pipe,
X
Xin Pan 已提交
753
            env=env0)
754
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
755
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
756
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
757
            stdout=subprocess.PIPE,
G
gongweibao 已提交
758
            stderr=tr1_pipe,
X
Xin Pan 已提交
759 760
            env=env1)

761 762 763 764 765 766 767 768 769 770 771 772
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

773 774
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
775

G
gongweibao 已提交
776
        # close trainer file
777 778 779 780
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
781

W
Wu Yi 已提交
782 783
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
784

W
Wu Yi 已提交
785 786
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

787 788 789
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
790 791 792 793 794 795 796
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

797
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
798 799
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
800 801

        if self._use_reduce:
802
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
803
        if self._use_reader_alloc:
804
            tr_cmd += " --use_reader_alloc"
805 806
        if self._save_model:
            tr_cmd += " --save_model"
W
Wu Yi 已提交
807
        if self.__use_cuda:
808 809
            tr_cmd += " --use_cuda"
            env.update({
810
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id % 2),
811
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
812 813 814
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
815
            })
W
Wu Yi 已提交
816
        else:
817
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
818

819
        if self._use_dgc:
820 821 822
            tr_cmd += " --use_dgc"

        if self._mp_mode:
823
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id % 2)}
824 825

        if self._nccl_comm_num > 1:
826
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
827

828 829
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
830

831
        if self._enable_backward_deps:
832
            tr_cmd += " --enable_backward_deps"
833

834 835
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
836 837 838 839
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
840

841 842 843
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

844
        return tr_cmd, env
W
Wu Yi 已提交
845

846
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
847
                           check_error_log, log_name):
848 849
        if self._use_hallreduce:
            self._ps_endpoints = ""
850 851 852 853 854 855 856 857 858 859

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
                        self._find_free_port())
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
860
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
861

862 863 864 865 866 867
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
868

869
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
870

871 872 873 874 875 876 877 878
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
879

880
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
881

882
            print_to_err(
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

901 902 903
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
904
        return pickle.loads(outs[0]), pickle.loads(outs[1])
905

906
    def _get_required_envs(self, check_error_log=False, need_envs={}):
907 908 909 910 911 912
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
913
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
914
            "FLAGS_rpc_retry_bind_port": "50",
915
            "FLAGS_cudnn_deterministic": "1",
916
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
917
            "http_proxy": "",
918 919
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
920 921 922
        }

        if check_error_log:
923
            required_envs["GLOG_vmodule"] = \
924 925
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
926
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,nccl_helper=10,grpc_client=10,grpc_server=10,request_handler_impl=10"
927 928
            required_envs["GLOG_logtostderr"] = "1"

929 930 931 932 933 934 935 936 937 938 939
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
940
        local_losses \
941
            = self._run_local(model_file, required_envs,
942 943
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
944
        if self._nccl2_mode:
W
Wu Yi 已提交
945 946
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
947 948 949 950 951
                    model_file,
                    required_envs,
                    True,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
952 953
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
954 955 956 957 958
                    model_file,
                    required_envs,
                    False,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
959 960
        else:
            tr0_losses, tr1_losses = self._run_cluster(
961
                model_file, required_envs, check_error_log, log_name=log_name)
962 963

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
964 965 966
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
967
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
968 969
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                gpus="0,1")

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                gpus="0,1")

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)