test_dist_base.py 35.3 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39
DIST_UT_PORT = 0
40

T
typhoonzero 已提交
41

42 43 44 45 46 47 48 49
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
50 51
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
52
    if six.PY2:
53
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
54
    else:
55
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
56 57


58 59 60 61
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
62
class TestDistRunnerBase(object):
W
Wu Yi 已提交
63 64 65
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
66 67
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
68 69 70
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

71
    @staticmethod
W
Wu Yi 已提交
72 73 74 75 76
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
77
                       dc_asgd=False,
78
                       current_endpoint=None,
T
tangwei12 已提交
79 80
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
81
        # NOTE: import fluid until runtime, or else forking processes will cause error.
82
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
83
        config.enable_dc_asgd = dc_asgd
84
        config.sync_mode = sync_mode
T
tangwei12 已提交
85 86
        config.runtime_split_send_recv = hogwild_mode

87 88
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
89
        # config.runtime_split_send_recv = True
90
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
91 92 93 94
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
95
            trainers=trainers,
T
tangwei12 已提交
96
            sync_mode=sync_mode,
97
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
98 99
        return t

W
Wu Yi 已提交
100
    def run_pserver(self, args):
W
Wu Yi 已提交
101
        self.lr = args.lr
102
        self.get_model(batch_size=args.batch_size)
103
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
104 105 106 107 108 109 110 111 112

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
113 114 115
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
116

T
typhoonzero 已提交
117 118 119
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
120
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
121
        exe.run(pserver_prog)
122
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
123

124 125 126 127 128 129 130 131 132 133
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
134
        dist_strategy.fuse_memory_size = 1  # MB
135
        dist_strategy.fuse_laryer_size = 1
136 137 138 139
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
140 141 142

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
143
        print_to_err("gpu_fleet", "fleet.node_num:")
T
tangwei12 已提交
144 145
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
146 147

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
148
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

179
        print_to_err(type(self).__name__, "begin to train on trainer")
180 181 182 183 184 185
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
186 187
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
188 189 190 191 192 193

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

194
    def run_trainer(self, args):
W
Wu Yi 已提交
195
        self.lr = args.lr
W
Wu Yi 已提交
196 197 198
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
199 200 201
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
202 203 204
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
205

W
Wu Yi 已提交
206
        if args.update_method == "pserver":
207
            print_to_err(
208 209
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
210 211 212 213 214 215 216 217 218
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
219
            trainer_prog = t.get_trainer_program()
220
            print_to_err(
221 222
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
223
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
224 225 226
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
227
            config.nccl_comm_num = args.nccl_comm_num
228 229 230
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
231
            print_to_err(
232 233
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
234 235 236 237 238 239 240
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
241
            print_to_err(
242 243
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
244
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
245
        else:
246
            print_to_err(
247 248
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
249
            trainer_prog = fluid.default_main_program()
250
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
251

252 253 254
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

255
        if args.use_cuda:
256 257
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
258 259 260
        else:
            place = fluid.CPUPlace()

261 262
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
263
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
264

W
Wu Yi 已提交
265 266
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
267

W
Wu Yi 已提交
268
        build_stra = fluid.BuildStrategy()
269 270 271
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
272

T
tangwei12 已提交
273 274 275
        if args.hogwild:
            build_stra.async_mode = True

276 277 278
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
279 280 281 282 283
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
284
        pass_builder = None
X
Xin Pan 已提交
285
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
286
            pass_builder = build_stra._finalize_strategy_and_create_passes()
287
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
288
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
289

W
Wu Yi 已提交
290
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
291 292
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
293
        else:
W
Wu Yi 已提交
294
            # case args.update_method == "nccl2_reduce_layer":
295 296
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
297

298 299 300 301
        if args.use_dgc:
            # fuse_all_reduce_ops require that gradients should not be sparse types
            build_stra.fuse_all_reduce_ops = False

302
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
303
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
304
            loss_name=avg_cost.name,
W
Wu Yi 已提交
305
            build_strategy=build_stra,
W
Wu Yi 已提交
306
            exec_strategy=exec_strategy)
307
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
308 309 310 311 312 313 314

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
315
        reader_generator = train_reader()
T
typhoonzero 已提交
316

317 318
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
319
            if args.update_method != "local" and args.use_reader_alloc:
320 321 322 323 324 325 326
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
327

328
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
329
        out_losses = []
330
        for i in six.moves.xrange(RUN_STEP):
331 332
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
333
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
334
            out_losses.append(loss[0])
335 336
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
337

338
        print_to_out(out_losses)
T
typhoonzero 已提交
339 340


341 342 343 344 345 346 347 348 349 350
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
351

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
369 370 371
            np.random.seed(seed)
            import random
            random.seed = seed
372 373
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
374

375 376 377 378 379 380
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
381
                print_to_err(
382 383
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
384
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
385
                model = dygraph.parallel.DataParallel(model, strategy)
386
                print_to_err(type(self).__name__, "model built in dygraph")
387
            out_losses = []
388
            print_to_err(type(self).__name__, "begin to run dygraph training")
389 390 391 392 393
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
394
                if step_id % 10 == 0:
395
                    print_to_err(
396
                        type(self).__name__,
397
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
398
                out_losses.append(loss.numpy())
399

Y
Yan Xu 已提交
400 401 402
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
403 404

                loss.backward()
Y
Yan Xu 已提交
405 406
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
407 408 409

                opt.minimize(loss)
                model.clear_gradients()
410
        print_to_out(out_losses)
411 412


T
typhoonzero 已提交
413
def runtime_main(test_class):
W
Wu Yi 已提交
414 415 416 417
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
418 419 420 421
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
422
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
423 424
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
425
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
426 427
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
428
    parser.add_argument('--gpu_fleet_api', action='store_true')
429 430
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
431
    parser.add_argument(
432
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
433 434 435
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
436
    parser.add_argument('--use_cuda', action='store_true')
437
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
438
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
439
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
440
    parser.add_argument('--hogwild', action='store_true')
441
    parser.add_argument(
W
Wu Yi 已提交
442
        '--use_reader_alloc', action='store_true', required=False)
443
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
444
    parser.add_argument('--lr', required=False, type=float, default=0.001)
445 446
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
447 448 449 450 451
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
452 453

    args = parser.parse_args()
T
typhoonzero 已提交
454 455

    model = test_class()
W
Wu Yi 已提交
456
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
457
        model.run_pserver(args)
458 459
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
460
    else:
461
        model.run_trainer(args)
X
Xin Pan 已提交
462

M
minqiyang 已提交
463

M
minqiyang 已提交
464
import paddle.compat as cpt
Y
Yancey1989 已提交
465 466
import socket
from contextlib import closing
M
minqiyang 已提交
467

X
Xin Pan 已提交
468 469

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
470 471 472
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

473 474 475
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
476
            self._use_dgc = False
477 478 479 480 481 482 483
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
484 485 486 487
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
488

X
Xin Pan 已提交
489 490 491
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
492
        self._port_set = set()
M
minqiyang 已提交
493
        self._python_interp = sys.executable
W
Wu Yi 已提交
494
        self._sync_mode = True
T
tangwei12 已提交
495
        self._hogwild_mode = False
496
        self._enforce_place = None
W
Wu Yi 已提交
497
        self._use_reduce = False
W
Wu Yi 已提交
498
        self._dc_asgd = False  # must use with async mode
499
        self._use_reader_alloc = True
W
Wu Yi 已提交
500
        self._nccl2_mode = False
501
        self._mp_mode = False
W
Wu Yi 已提交
502 503 504 505 506
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
507
        self._lr = 0.001
508
        self._use_dgc = False
509
        self._dygraph = False
510
        self._nccl_comm_num = 1
511
        self._enable_backward_deps = False
512
        self._gpu_fleet_api = False
513 514
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
515
        self._use_hallreduce = False
W
Wu Yi 已提交
516
        self._setup_config()
517 518 519 520 521 522 523 524 525 526 527 528 529 530

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
        else:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2

531
        self._after_setup_config()
X
Xin Pan 已提交
532

Y
Yancey1989 已提交
533
    def _find_free_port(self):
Y
Yancey1989 已提交
534 535 536 537
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
538
                print_to_err(
539
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
540 541 542 543 544 545 546
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
547

548 549 550 551 552
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
553
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
554 555 556 557 558 559 560 561
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
562
        ps0_cmd = ps_cmd % \
563 564
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
565
        ps1_cmd = ps_cmd % \
566 567
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
568 569 570 571

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
572

573 574
        print(ps0_cmd)
        print(ps1_cmd)
575 576
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
577

578
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
579
        ps0_proc = subprocess.Popen(
580 581 582 583
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
584
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
585
        ps1_proc = subprocess.Popen(
586 587 588 589
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
590

591
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
592

593 594 595 596 597
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
598
                   batch_merge_repeat=1,
599 600
                   log_name="",
                   gpus="0"):
G
gongweibao 已提交
601

602 603 604 605 606 607 608 609
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

610 611 612 613
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
614 615
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
616

617
        if self.__use_cuda:
618
            cmd += " --use_cuda"
W
Wu Yi 已提交
619
            env_local = {
620
                "CUDA_VISIBLE_DEVICES": gpus,
W
Wu Yi 已提交
621 622 623
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
624 625 626
        else:
            env_local = {'CPU_NUM': '1'}

627 628 629 630
        # not use dgc in single card
        if len(gpus) > 1 and self._use_dgc:
            cmd += " --use_dgc"

W
Wu Yi 已提交
631 632
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
633

634
        if check_error_log:
635
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
636
            local_proc = subprocess.Popen(
637
                cmd.split(" "),
G
gongweibao 已提交
638
                stdout=subprocess.PIPE,
639
                stderr=err_log,
W
Wu Yi 已提交
640
                env=env_local)
G
gongweibao 已提交
641 642
        else:
            local_proc = subprocess.Popen(
643
                cmd.split(" "),
G
gongweibao 已提交
644
                stdout=subprocess.PIPE,
645
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
646
                env=env_local)
G
gongweibao 已提交
647

648 649 650 651 652 653
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
654
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
655

W
Wu Yi 已提交
656
        return pickle.loads(local_out)
657

658
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
659
        # Run dist train to compare with local results
660 661
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
662

X
Xin Pan 已提交
663
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
664

665 666 667 668 669 670 671 672
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
673
        tr0_cmd = tr_cmd % \
674
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
675
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
676
        tr1_cmd = tr_cmd % \
677
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
678
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
679 680 681 682

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
683 684 685
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
686 687 688
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
689 690 691
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
692
        if self.__use_cuda:
693 694 695 696 697 698 699 700 701 702
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
703

W
Wu Yi 已提交
704 705
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
706 707
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
708

709
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
710
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
711
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
712
            stdout=subprocess.PIPE,
G
gongweibao 已提交
713
            stderr=tr0_pipe,
X
Xin Pan 已提交
714
            env=env0)
715
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
716
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
717
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
718
            stdout=subprocess.PIPE,
G
gongweibao 已提交
719
            stderr=tr1_pipe,
X
Xin Pan 已提交
720 721
            env=env1)

722 723 724 725 726 727 728 729 730 731 732 733
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

734 735
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
736

G
gongweibao 已提交
737
        # close trainer file
738 739 740 741
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
742

W
Wu Yi 已提交
743 744
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
745

W
Wu Yi 已提交
746 747
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

748 749 750
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
751 752 753 754 755 756 757
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

758
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
759 760
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
761 762

        if self._use_reduce:
763
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
764
        if self._use_reader_alloc:
765
            tr_cmd += " --use_reader_alloc"
W
Wu Yi 已提交
766
        if self.__use_cuda:
767 768 769 770
            tr_cmd += " --use_cuda"
            env.update({
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id),
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
771 772 773
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
774
            })
W
Wu Yi 已提交
775
        else:
776
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
777

778
        if self._use_dgc:
779 780 781 782
            tr_cmd += " --use_dgc"

        if self._mp_mode:
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id)}
783 784

        if self._nccl_comm_num > 1:
785
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
786

787 788
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
789

790
        if self._enable_backward_deps:
791
            tr_cmd += " --enable_backward_deps"
792

793 794
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
795 796 797 798
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
799

800 801 802
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

803
        return tr_cmd, env
W
Wu Yi 已提交
804

805
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
806
                           check_error_log, log_name):
807 808
        if self._use_hallreduce:
            self._ps_endpoints = ""
809 810 811 812 813 814 815 816 817 818

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
                        self._find_free_port())
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
819
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
820

821 822 823 824 825 826
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
827

828
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
829

830 831 832 833 834 835 836 837
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
838

839
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
840

841
            print_to_err(
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

860 861 862
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
863
        return pickle.loads(outs[0]), pickle.loads(outs[1])
864

865
    def _get_required_envs(self, check_error_log=False, need_envs={}):
866 867 868 869 870 871
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
872
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
873
            "FLAGS_rpc_retry_bind_port": "50",
874
            "FLAGS_cudnn_deterministic": "1",
875
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
876
            "http_proxy": "",
877 878
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
879 880 881
        }

        if check_error_log:
882
            required_envs["GLOG_vmodule"] = \
883 884
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
885
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,nccl_helper=10,grpc_client=10,grpc_server=10,request_handler_impl=10"
886 887
            required_envs["GLOG_logtostderr"] = "1"

888 889 890 891 892 893 894 895 896 897 898
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
899
        local_losses \
900
            = self._run_local(model_file, required_envs,
901 902
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
903
        if self._nccl2_mode:
W
Wu Yi 已提交
904 905
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
906 907 908 909 910
                    model_file,
                    required_envs,
                    True,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
911 912
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
913 914 915 916 917
                    model_file,
                    required_envs,
                    False,
                    check_error_log,
                    log_name=log_name)
W
Wu Yi 已提交
918 919
        else:
            tr0_losses, tr1_losses = self._run_cluster(
920
                model_file, required_envs, check_error_log, log_name=log_name)
921 922

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
923 924 925
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
926
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
927 928
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                gpus="0,1")

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                gpus="0,1")

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)