test_dist_base.py 37.3 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39
DIST_UT_PORT = 0
40

T
typhoonzero 已提交
41

42 43 44 45 46 47 48 49
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
50 51
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
52
    if six.PY2:
53
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
54
    else:
55
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
56 57


58 59 60 61
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
62
class TestDistRunnerBase(object):
W
Wu Yi 已提交
63 64 65
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
66 67
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
68 69 70
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

71
    @staticmethod
W
Wu Yi 已提交
72 73 74 75 76
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
77
                       dc_asgd=False,
78
                       current_endpoint=None,
T
tangwei12 已提交
79 80
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
81
        # NOTE: import fluid until runtime, or else forking processes will cause error.
82
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
83
        config.enable_dc_asgd = dc_asgd
84
        config.sync_mode = sync_mode
T
tangwei12 已提交
85 86
        config.runtime_split_send_recv = hogwild_mode

87 88
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
89
        # config.runtime_split_send_recv = True
90
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
91 92 93 94
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
95
            trainers=trainers,
T
tangwei12 已提交
96
            sync_mode=sync_mode,
97
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
98 99
        return t

W
Wu Yi 已提交
100
    def run_pserver(self, args):
W
Wu Yi 已提交
101
        self.lr = args.lr
102
        self.get_model(batch_size=args.batch_size)
103
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
104 105 106 107 108 109 110 111 112

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
113 114 115
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
116

T
typhoonzero 已提交
117 118 119
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
120
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
121
        exe.run(pserver_prog)
122
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
123

124 125 126 127 128 129 130 131 132 133
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
134
        dist_strategy.fuse_memory_size = 1  # MB
135
        dist_strategy.fuse_laryer_size = 1
136 137 138 139
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
140 141 142

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
143
        print_to_err("gpu_fleet", "fleet.node_num:")
T
tangwei12 已提交
144 145
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
146 147

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
148
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

179
        print_to_err(type(self).__name__, "begin to train on trainer")
180 181 182 183 184 185
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
186 187
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
188 189 190 191 192 193

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

224
    def run_trainer(self, args):
W
Wu Yi 已提交
225
        self.lr = args.lr
W
Wu Yi 已提交
226 227 228
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
229 230 231
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
232 233 234
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
235

W
Wu Yi 已提交
236
        if args.update_method == "pserver":
237
            print_to_err(
238 239
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
240 241 242 243 244 245 246 247 248
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
249
            trainer_prog = t.get_trainer_program()
250
            print_to_err(
251 252
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
253
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
254 255 256
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
257
            config.nccl_comm_num = args.nccl_comm_num
258 259 260
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
261
            print_to_err(
262 263
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
264 265 266 267 268 269 270
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
271
            print_to_err(
272 273
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
274
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
275
        else:
276
            print_to_err(
277 278
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
279
            trainer_prog = fluid.default_main_program()
280
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
281

282 283 284
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

285
        if args.use_cuda:
286 287
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
288 289 290
        else:
            place = fluid.CPUPlace()

291 292
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
293
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
294

W
Wu Yi 已提交
295 296
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
297

W
Wu Yi 已提交
298
        build_stra = fluid.BuildStrategy()
299 300 301
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
302

T
tangwei12 已提交
303 304 305
        if args.hogwild:
            build_stra.async_mode = True

306 307 308
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
309 310 311 312 313
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
314
        pass_builder = None
X
Xin Pan 已提交
315
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
316
            pass_builder = build_stra._finalize_strategy_and_create_passes()
317
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
318
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
319

W
Wu Yi 已提交
320
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
321 322
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
323
        else:
W
Wu Yi 已提交
324
            # case args.update_method == "nccl2_reduce_layer":
325 326
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
327

328 329 330 331
        if args.use_dgc:
            # fuse_all_reduce_ops require that gradients should not be sparse types
            build_stra.fuse_all_reduce_ops = False

332
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
333
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
334
            loss_name=avg_cost.name,
W
Wu Yi 已提交
335
            build_strategy=build_stra,
W
Wu Yi 已提交
336
            exec_strategy=exec_strategy)
337
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
338 339 340 341 342 343 344

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
345
        reader_generator = train_reader()
T
typhoonzero 已提交
346

347 348
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
349
            if args.update_method != "local" and args.use_reader_alloc:
350 351 352 353 354 355 356
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
357

358
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
359
        out_losses = []
360
        for i in six.moves.xrange(RUN_STEP):
361 362
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
363
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
364
            out_losses.append(loss[0])
365 366
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
367

368
        print_to_out(out_losses)
T
typhoonzero 已提交
369 370


371 372 373 374 375 376 377 378 379 380
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
399 400 401
            np.random.seed(seed)
            import random
            random.seed = seed
402 403
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
404

405 406 407 408 409 410
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
411
                print_to_err(
412 413
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
414
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
415
                model = dygraph.parallel.DataParallel(model, strategy)
416
                print_to_err(type(self).__name__, "model built in dygraph")
417
            out_losses = []
418
            print_to_err(type(self).__name__, "begin to run dygraph training")
419 420 421 422 423
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
424
                if step_id % 10 == 0:
425
                    print_to_err(
426
                        type(self).__name__,
427
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
428
                out_losses.append(loss.numpy())
429

Y
Yan Xu 已提交
430 431 432
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
433 434

                loss.backward()
Y
Yan Xu 已提交
435 436
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
437 438 439

                opt.minimize(loss)
                model.clear_gradients()
440
        print_to_out(out_losses)
441 442


T
typhoonzero 已提交
443
def runtime_main(test_class):
W
Wu Yi 已提交
444 445 446 447
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
448 449 450 451
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
452
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
453 454
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
455
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
456 457
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
458
    parser.add_argument('--gpu_fleet_api', action='store_true')
459 460
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
461
    parser.add_argument(
462
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
463 464 465
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
466
    parser.add_argument('--use_cuda', action='store_true')
467
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
468
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
469
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
470
    parser.add_argument('--hogwild', action='store_true')
471
    parser.add_argument('--save_model', action='store_true')
472
    parser.add_argument(
W
Wu Yi 已提交
473
        '--use_reader_alloc', action='store_true', required=False)
474
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
475
    parser.add_argument('--lr', required=False, type=float, default=0.001)
476 477
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
478 479 480 481 482
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
483 484

    args = parser.parse_args()
T
typhoonzero 已提交
485 486

    model = test_class()
W
Wu Yi 已提交
487
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
488
        model.run_pserver(args)
489 490
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
491
    else:
492
        model.run_trainer(args)
X
Xin Pan 已提交
493

M
minqiyang 已提交
494

M
minqiyang 已提交
495
import paddle.compat as cpt
Y
Yancey1989 已提交
496 497
import socket
from contextlib import closing
M
minqiyang 已提交
498

X
Xin Pan 已提交
499 500

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
501 502 503
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

504 505 506
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
507
            self._use_dgc = False
508 509 510 511 512 513 514
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
515 516 517 518
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
519

X
Xin Pan 已提交
520 521 522
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
523
        self._port_set = set()
M
minqiyang 已提交
524
        self._python_interp = sys.executable
W
Wu Yi 已提交
525
        self._sync_mode = True
T
tangwei12 已提交
526
        self._hogwild_mode = False
527
        self._enforce_place = None
W
Wu Yi 已提交
528
        self._use_reduce = False
W
Wu Yi 已提交
529
        self._dc_asgd = False  # must use with async mode
530
        self._use_reader_alloc = True
W
Wu Yi 已提交
531
        self._nccl2_mode = False
532
        self._mp_mode = False
W
Wu Yi 已提交
533 534 535 536 537
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
538
        self._lr = 0.001
539
        self._use_dgc = False
540
        self._dygraph = False
541
        self._nccl_comm_num = 1
542
        self._enable_backward_deps = False
543
        self._gpu_fleet_api = False
544 545
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
546
        self._use_hallreduce = False
547
        self._save_model = False
W
Wu Yi 已提交
548
        self._setup_config()
549 550 551 552 553 554 555 556 557 558 559 560 561 562

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
        else:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2

563
        self._after_setup_config()
X
Xin Pan 已提交
564

Y
Yancey1989 已提交
565
    def _find_free_port(self):
Y
Yancey1989 已提交
566 567 568 569
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
570
                print_to_err(
571
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
572 573 574 575 576 577 578
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
579

580 581 582 583 584
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
585
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
586 587 588 589 590 591 592 593
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
594
        ps0_cmd = ps_cmd % \
595 596
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
597
        ps1_cmd = ps_cmd % \
598 599
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
600 601 602 603

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
604

605 606
        print(ps0_cmd)
        print(ps1_cmd)
607 608
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
609

610
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
611
        ps0_proc = subprocess.Popen(
612 613 614 615
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
616
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
617
        ps1_proc = subprocess.Popen(
618 619 620 621
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
622

623
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
624

625 626 627 628 629
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
630
                   batch_merge_repeat=1,
631 632
                   log_name="",
                   gpus="0"):
G
gongweibao 已提交
633

634 635 636 637 638 639 640 641
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

642 643 644 645
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
646 647
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
648

649
        if self.__use_cuda:
650
            cmd += " --use_cuda"
W
Wu Yi 已提交
651
            env_local = {
652
                "CUDA_VISIBLE_DEVICES": gpus,
W
Wu Yi 已提交
653 654 655
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
656 657 658
        else:
            env_local = {'CPU_NUM': '1'}

659 660 661 662
        # not use dgc in single card
        if len(gpus) > 1 and self._use_dgc:
            cmd += " --use_dgc"

W
Wu Yi 已提交
663 664
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
665

666
        if check_error_log:
667
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
668
            local_proc = subprocess.Popen(
669
                cmd.split(" "),
G
gongweibao 已提交
670
                stdout=subprocess.PIPE,
671
                stderr=err_log,
W
Wu Yi 已提交
672
                env=env_local)
G
gongweibao 已提交
673 674
        else:
            local_proc = subprocess.Popen(
675
                cmd.split(" "),
G
gongweibao 已提交
676
                stdout=subprocess.PIPE,
677
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
678
                env=env_local)
G
gongweibao 已提交
679

680 681 682 683 684 685
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
686
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
687

W
Wu Yi 已提交
688
        return pickle.loads(local_out)
689

690
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
691
        # Run dist train to compare with local results
692 693
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
694

X
Xin Pan 已提交
695
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
696

697 698 699 700 701 702 703 704
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
705
        tr0_cmd = tr_cmd % \
706
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
707
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
708
        tr1_cmd = tr_cmd % \
709
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
710
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
711 712 713 714

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
715 716 717
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
718 719 720
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
721 722 723
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
724
        if self.__use_cuda:
725 726 727 728 729 730 731 732 733 734
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
735

W
Wu Yi 已提交
736 737
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
738 739
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
740

741
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
742
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
743
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
744
            stdout=subprocess.PIPE,
G
gongweibao 已提交
745
            stderr=tr0_pipe,
X
Xin Pan 已提交
746
            env=env0)
747
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
748
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
749
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
750
            stdout=subprocess.PIPE,
G
gongweibao 已提交
751
            stderr=tr1_pipe,
X
Xin Pan 已提交
752 753
            env=env1)

754 755 756 757 758 759 760 761 762 763 764 765
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

766 767
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
768

G
gongweibao 已提交
769
        # close trainer file
770 771 772 773
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
774

W
Wu Yi 已提交
775 776
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
777

W
Wu Yi 已提交
778 779
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

780 781 782
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
783 784 785 786 787 788 789
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

790
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
791 792
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
793 794

        if self._use_reduce:
795
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
796
        if self._use_reader_alloc:
797
            tr_cmd += " --use_reader_alloc"
798 799
        if self._save_model:
            tr_cmd += " --save_model"
W
Wu Yi 已提交
800
        if self.__use_cuda:
801 802
            tr_cmd += " --use_cuda"
            env.update({
803
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id % 2),
804
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
805 806 807
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
808
            })
W
Wu Yi 已提交
809
        else:
810
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
811

812
        if self._use_dgc:
813 814 815
            tr_cmd += " --use_dgc"

        if self._mp_mode:
816
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id % 2)}
817 818

        if self._nccl_comm_num > 1:
819
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
820

821 822
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
823

824
        if self._enable_backward_deps:
825
            tr_cmd += " --enable_backward_deps"
826

827 828
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
829 830 831 832
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
833

834 835 836
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

837
        return tr_cmd, env
W
Wu Yi 已提交
838

839
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
840
                           check_error_log, log_name):
841 842
        if self._use_hallreduce:
            self._ps_endpoints = ""
843 844 845 846 847 848 849 850 851 852

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
                        self._find_free_port())
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
853
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
854

855 856 857 858 859 860
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
861

862
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
863

864 865 866 867 868 869 870 871
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
872

873
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
874

875
            print_to_err(
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

894 895 896
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
897
        return pickle.loads(outs[0]), pickle.loads(outs[1])
898

899
    def _get_required_envs(self, check_error_log=False, need_envs={}):
900 901 902 903 904 905
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
906
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
907
            "FLAGS_rpc_retry_bind_port": "50",
908
            "FLAGS_cudnn_deterministic": "1",
909
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
910
            "http_proxy": "",
911 912
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
913 914 915
        }

        if check_error_log:
916
            required_envs["GLOG_vmodule"] = \
917 918
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
919
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,nccl_helper=10,grpc_client=10,grpc_server=10,request_handler_impl=10"
920 921
            required_envs["GLOG_logtostderr"] = "1"

922 923 924 925 926 927 928 929 930 931 932
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
933
        local_losses \
934
            = self._run_local(model_file, required_envs,
935 936
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
937
        if self._nccl2_mode:
W
Wu Yi 已提交
938 939
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
940 941 942 943 944
                    model_file,
                    required_envs,
                    True,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
945 946
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
947 948 949 950 951
                    model_file,
                    required_envs,
                    False,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
952 953
        else:
            tr0_losses, tr1_losses = self._run_cluster(
954
                model_file, required_envs, check_error_log, log_name=log_name)
955 956

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
957 958 959
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
960
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
961 962
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                gpus="0,1")

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                gpus="0,1")

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)