test_dist_base.py 51.6 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17
import time

18
import ast
X
Xin Pan 已提交
19 20 21 22 23
import unittest
import os
import sys
import signal
import subprocess
24
import six
W
Wu Yi 已提交
25
import argparse
W
Wu Yi 已提交
26
import pickle
27
import random
W
Wu Yi 已提交
28
import numpy as np
29
import time
30 31

import paddle
32
import paddle.fluid as fluid
33
from paddle.fluid import compiler
34 35 36
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
37

38 39 40
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
41
RUN_STEP = 5
42
DEFAULT_BATCH_SIZE = 2
43
DIST_UT_PORT = 0
44

T
typhoonzero 已提交
45

46
def print_to_out(out_losses):
T
tianshuo78520a 已提交
47
    sys.stdout.buffer.write(pickle.dumps(out_losses))
48 49 50


def print_to_err(class_name, log_str):
51 52
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
T
tianshuo78520a 已提交
53
    sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
54 55


56 57 58 59
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
60
class TestDistRunnerBase(object):
W
Wu Yi 已提交
61 62 63
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
64 65
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
66 67 68
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

69
    @staticmethod
W
Wu Yi 已提交
70 71 72 73 74
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
75
                       dc_asgd=False,
76
                       current_endpoint=None,
T
tangwei12 已提交
77 78
                       nccl_comm_num=1,
                       hogwild_mode=False):
T
typhoonzero 已提交
79
        # NOTE: import fluid until runtime, or else forking processes will cause error.
80
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
81
        config.enable_dc_asgd = dc_asgd
82
        config.sync_mode = sync_mode
T
tangwei12 已提交
83 84
        config.runtime_split_send_recv = hogwild_mode

85 86
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
87
        # config.runtime_split_send_recv = True
88
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
89 90 91 92
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
93
            trainers=trainers,
T
tangwei12 已提交
94
            sync_mode=sync_mode,
95
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
96 97
        return t

W
Wu Yi 已提交
98
    def run_pserver(self, args):
W
Wu Yi 已提交
99
        self.lr = args.lr
100
        self.get_model(batch_size=args.batch_size)
101
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
102 103 104 105 106 107 108 109 110

        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild)
W
Wu Yi 已提交
111 112 113
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
114

T
typhoonzero 已提交
115 116 117
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
118
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
119
        exe.run(pserver_prog)
120
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
121

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
    def run_pipeline_trainer(self, args):
        self.lr = args.lr

        dist_strategy = DistributedStrategy()
        test_program, avg_cost, train_reader, test_reader, batch_acc, predict, data_loader = \
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        eprint(type(self).__name__, "device_id: %d." % device_id)
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        data_loader.set_sample_list_generator(train_reader, place)
        data_loader.start()
        print_to_err(type(self).__name__, "begin to train on trainer")
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss = exe.run(fluid.default_main_program(), fetch_list=[avg_cost])
            loss = loss[0] if loss else None
            out_losses.append(loss)
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")

T
tianshuo78520a 已提交
148
        sys.stdout.buffer.write(pickle.dumps(out_losses))
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
    def run_use_fleet_api_20_trainer(self, args):
        """
        1. remove codes for DistributedStrategy and leave the DistributedStrategy part to get_model()
        2. to run with fleet 2.0 api, set flags _use_fleet_api and _use_fleet_api_20 to True
        3. for now, not support test for model save
        """
        assert args.update_method == "nccl2" or "bkcl"

        self.lr = args.lr
        print_to_err("use_fleet 2.0", "fleet.node_num:")

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
            self.get_model(batch_size=args.batch_size)

        if fluid.core.is_compiled_with_cuda():
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
        elif fluid.core.is_compiled_with_xpu():
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
            raise ValueError(
                "fleet dygraph api must in paddlepaddle-xpu or paddlepaddle-gpu."
            )

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var
            for var in fluid.default_main_program().global_block().vars.values()
            if var.is_data
        ]

        eprint("feed_var_list:", feed_var_list)

        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

        print_to_err(type(self).__name__, "begin to train on trainer")
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(fluid.default_main_program(),
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
        print_to_err(type(self).__name__, "dist losses: {}".format(out_losses))

T
tianshuo78520a 已提交
245
        sys.stdout.buffer.write(pickle.dumps(out_losses))
246

247 248
    def run_use_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2" or "bkcl"
249 250 251 252 253 254 255 256

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
257
        dist_strategy.fuse_memory_size = 1  # MB
258
        dist_strategy.fuse_laryer_size = 1
259 260 261 262
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
263 264
        if args.sync_batch_norm:
            dist_strategy.sync_batch_norm = True
265 266 267

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
268
        print_to_err("use_fleet", "fleet.node_num:")
T
tangwei12 已提交
269 270
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
271 272

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
T
tangwei12 已提交
273
            self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)
274 275 276 277

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

278 279 280 281 282 283 284 285 286 287
        if fluid.core.is_compiled_with_cuda():
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
        elif fluid.core.is_compiled_with_xpu():
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
            raise ValueError(
                "fleet dygraph api must in paddlepaddle-xpu or paddlepaddle-gpu."
            )
288 289 290 291 292 293 294 295 296 297

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

298 299 300 301 302 303 304
        eprint("feed_var_list:", feed_var_list)

        # tmp add this code to pass python35 gcc8 CI
        # Fixme(gongweibao, wangxi), need fix fleet api program order
        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

305 306 307 308 309 310 311 312 313 314 315 316 317 318
        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

319
        print_to_err(type(self).__name__, "begin to train on trainer")
320 321 322 323 324 325
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
326 327
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
328

T
tianshuo78520a 已提交
329
        sys.stdout.buffer.write(pickle.dumps(out_losses))
330

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer")
            else:
                model_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_persistables_2")
                model_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_persistables_2")
                infer_save_dir_fluid = os.path.join(model_save_dir,
                                                    "fluid_infer_2")
                infer_save_dir_fleet = os.path.join(model_save_dir,
                                                    "fleet_infer_2")
            fluid.io.save_persistables(exe, model_save_dir_fluid,
                                       fleet._origin_program)
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
            fluid.io.save_inference_model(infer_save_dir_fluid,
                                          feeded_var_names, [avg_cost], exe,
                                          fleet._origin_program)
            fleet.save_inference_model(exe, infer_save_dir_fleet,
                                       feeded_var_names, [avg_cost])

361
    def run_trainer(self, args):
W
Wu Yi 已提交
362
        self.lr = args.lr
W
Wu Yi 已提交
363 364 365
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
366 367 368
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
369 370 371
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
372

W
Wu Yi 已提交
373
        if args.update_method == "pserver":
374
            print_to_err(
375 376
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
T
tangwei12 已提交
377 378 379 380 381 382 383 384 385
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild)

T
typhoonzero 已提交
386
            trainer_prog = t.get_trainer_program()
387
            print_to_err(
388 389
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
390
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
391 392 393
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
394
            config.nccl_comm_num = args.nccl_comm_num
395 396 397
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
398
            print_to_err(
399 400
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
401 402 403 404 405 406 407
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
408
            print_to_err(
409 410
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
411
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
412
        else:
413
            print_to_err(
414 415
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
416
            trainer_prog = fluid.default_main_program()
417
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
418

419 420 421
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

422
        if args.use_cuda:
423 424
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
425 426 427
        else:
            place = fluid.CPUPlace()

428 429
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
430
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
431

W
Wu Yi 已提交
432 433
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
434

W
Wu Yi 已提交
435
        build_stra = fluid.BuildStrategy()
436 437 438
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
439

440 441 442 443
        if args.fuse_all_reduce is not None:
            sys.stderr.write('fuse_all_reduce={}'.format(args.fuse_all_reduce))
            build_stra.fuse_all_reduce_ops = args.fuse_all_reduce

T
tangwei12 已提交
444 445 446
        if args.hogwild:
            build_stra.async_mode = True

447 448 449
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
450 451 452 453 454
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
455
        pass_builder = None
X
Xin Pan 已提交
456
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
457
            pass_builder = build_stra._finalize_strategy_and_create_passes()
458
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
459
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
460

W
Wu Yi 已提交
461
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
462 463
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
464
        else:
W
Wu Yi 已提交
465
            # case args.update_method == "nccl2_reduce_layer":
466 467
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
468

469
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
470
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
471
            loss_name=avg_cost.name,
W
Wu Yi 已提交
472
            build_strategy=build_stra,
W
Wu Yi 已提交
473
            exec_strategy=exec_strategy)
474
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
475 476 477 478 479 480 481

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
482
        reader_generator = train_reader()
T
typhoonzero 已提交
483

484 485
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
486
            if args.update_method != "local" and args.use_reader_alloc:
487 488 489 490 491 492 493
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
494

495
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
496
        out_losses = []
497
        for i in six.moves.xrange(RUN_STEP):
498 499
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
500
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
501
            out_losses.append(loss[0])
502 503
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
504

505
        print_to_out(out_losses)
T
typhoonzero 已提交
506 507


508 509 510 511 512 513 514 515 516
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

517 518 519 520 521 522 523 524 525 526
    def _get_data(self, batch, args):
        if args.update_method != "local":
            new_batch = []
            for offset, item in enumerate(batch):
                if offset % 2 == args.trainer_id:
                    new_batch.append(item)
            return new_batch
        else:
            return batch

527
    def run_trainer(self, args):
Y
Yan Xu 已提交
528

529
        seed = 90
530 531 532 533 534 535 536 537
        if fluid.core.is_compiled_with_cuda():
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
        elif fluid.core.is_compiled_with_xpu():
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
            assert ("Only support CUDAPlace or XPUPlace for now.")
538 539 540 541

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
542 543
            np.random.seed(seed)
            import random
544
            random.seed(seed)
545 546
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
547

548 549
            #if args.update_method == "nccl2":
            if args.update_method == "nccl2" or args.update_method == "bkcl":
550 551 552 553 554
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
555
                print_to_err(
556 557
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
558
                dygraph.parallel.prepare_context(strategy)
559 560 561 562 563 564
                if not args.find_unused_parameters:
                    model = dygraph.parallel.DataParallel(
                        model, strategy, find_unused_parameters=False)
                else:
                    model = dygraph.parallel.DataParallel(
                        model, strategy, find_unused_parameters=True)
565
                print_to_err(type(self).__name__, "model built in dygraph")
566
            out_losses = []
567
            print_to_err(type(self).__name__, "begin to run dygraph training")
568
            for step_id, data in enumerate(train_reader()):
569
                data = self._get_data(data, args)
570 571 572
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
573
                if step_id % 10 == 0:
574
                    print_to_err(
575
                        type(self).__name__,
576
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
577
                out_losses.append(loss.numpy())
578 579 580 581

                loss.backward()

                opt.minimize(loss)
582 583
                if not args.accumulate_gradient:
                    model.clear_gradients()
584
        print_to_out(out_losses)
585

586 587 588 589 590 591 592 593 594
    def run_trainer_with_spawn(self, args):
        # 1. enable dygraph
        paddle.disable_static()

        # 2. init seed
        seed = 90
        paddle.static.default_startup_program().random_seed = seed
        paddle.static.default_main_program().random_seed = seed
        np.random.seed(seed)
595
        random.seed(seed)
596 597 598 599 600 601 602 603 604 605
        # get trainer id
        args.trainer_id = paddle.distributed.get_rank()

        # 3. init parallel env
        if args.update_method == "nccl2":
            paddle.distributed.init_parallel_env()

        # 4. train model
        model, train_reader, opt = self.get_model()
        if args.update_method == "nccl2":
606 607 608 609
            if args.find_unused_parameters:
                model = paddle.DataParallel(model, find_unused_parameters=True)
            else:
                model = paddle.DataParallel(model, find_unused_parameters=False)
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

        out_losses = []
        for step_id, data in enumerate(train_reader()):
            data = self._get_data(data, args)
            if step_id == RUN_STEP:
                break
            loss = self.run_one_loop(model, opt, data)
            out_losses.append(loss.numpy())

            loss.backward()

            opt.minimize(loss)
            model.clear_gradients()
        return out_losses

625
    def run_use_fleet_api_trainer(self, args):
626 627 628 629 630 631 632 633 634 635
        import paddle.distributed.fleet as fleet
        import paddle.distributed.fleet.base.role_maker as role_maker
        # 1. enable dygraph
        paddle.disable_static()

        # 2. init seed
        seed = 90
        paddle.static.default_startup_program().random_seed = seed
        paddle.static.default_main_program().random_seed = seed
        np.random.seed(seed)
636
        random.seed(seed)
637 638 639
        # get trainer id
        args.trainer_id = paddle.distributed.get_rank()

640 641
        # set strategy
        strategy = fleet.DistributedStrategy()
642 643
        if args.find_unused_parameters:
            strategy.find_unused_parameters = True
644

645
        # 3. init parallel env
646
        if args.update_method == "nccl2" or "bkcl":
647
            fleet.init(is_collective=True, strategy=strategy)
648 649 650

        # 4. train model
        model, train_reader, opt = self.get_model()
651
        if args.update_method == "nccl2" or "bkcl":
652 653 654 655 656 657 658 659 660 661 662 663 664 665
            opt = fleet.distributed_optimizer(opt)
            model = fleet.distributed_model(model)

        out_losses = []
        for step_id, data in enumerate(train_reader()):
            data = self._get_data(data, args)
            if step_id == RUN_STEP:
                break
            loss = self.run_one_loop(model, opt, data)
            out_losses.append(loss.numpy())

            loss.backward()

            opt.step()
666 667
            if not args.accumulate_gradient:
                opt.clear_grad()
668 669
        print_to_out(out_losses)

670

T
typhoonzero 已提交
671
def runtime_main(test_class):
W
Wu Yi 已提交
672 673 674 675
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
676 677 678 679
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
680
        choices=["pserver", "nccl2", "bkcl", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
681 682
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
683
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
684 685
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
686
    parser.add_argument('--use_pipeline', action='store_true')
687
    parser.add_argument('--use_fleet_api', action='store_true')
688
    parser.add_argument('--use_fleet_api_20', action='store_true')
689 690
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
691
    parser.add_argument(
692
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
693 694 695
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
696
    parser.add_argument('--use_cuda', action='store_true')
697
    parser.add_argument('--use_xpu', action='store_true')
698
    parser.add_argument('--use_dgc', action='store_true')
699
    parser.add_argument('--accumulate_gradient', action='store_true')
700
    parser.add_argument('--find_unused_parameters', action='store_true')
W
Wu Yi 已提交
701
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
702
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
703
    parser.add_argument('--hogwild', action='store_true')
704
    parser.add_argument('--save_model', action='store_true')
705
    parser.add_argument(
W
Wu Yi 已提交
706
        '--use_reader_alloc', action='store_true', required=False)
707
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
708
    parser.add_argument('--lr', required=False, type=float, default=0.001)
709 710
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
711 712 713 714 715
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
716
    parser.add_argument('--sync_batch_norm', action='store_true')
717 718 719 720 721
    parser.add_argument(
        '--fuse_all_reduce',
        required=False,
        type=ast.literal_eval,
        default=None)
W
Wu Yi 已提交
722 723

    args = parser.parse_args()
T
typhoonzero 已提交
724 725

    model = test_class()
W
Wu Yi 已提交
726
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
727
        model.run_pserver(args)
728 729
    elif args.use_fleet_api:
        model.run_use_fleet_api_trainer(args)
730 731
    elif args.use_fleet_api_20:
        model.run_use_fleet_api_20_trainer(args)
732 733
    elif args.use_pipeline:
        model.run_pipeline_trainer(args)
T
typhoonzero 已提交
734
    else:
735
        model.run_trainer(args)
X
Xin Pan 已提交
736

M
minqiyang 已提交
737

M
minqiyang 已提交
738
import paddle.compat as cpt
Y
Yancey1989 已提交
739 740
import socket
from contextlib import closing
M
minqiyang 已提交
741

X
Xin Pan 已提交
742 743

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
744 745 746
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

747 748 749
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
750
            self.__use_xpu = False
751
            self._use_dgc = False
752 753
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
754 755 756 757 758
            self.__use_xpu = False
        elif self._enforce_place == "XPU":
            self.__use_cuda = False
            self.__use_xpu = True
            self._use_dgc = False
759 760 761 762 763
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
764 765 766 767
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
768

X
Xin Pan 已提交
769 770 771
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
772
        self._port_set = set()
M
minqiyang 已提交
773
        self._python_interp = sys.executable
W
Wu Yi 已提交
774
        self._sync_mode = True
T
tangwei12 已提交
775
        self._hogwild_mode = False
776
        self._enforce_place = None
W
Wu Yi 已提交
777
        self._use_reduce = False
W
Wu Yi 已提交
778
        self._dc_asgd = False  # must use with async mode
779
        self._use_reader_alloc = True
W
Wu Yi 已提交
780
        self._nccl2_mode = False
781
        self._bkcl_mode = False
782
        self._pipeline_mode = False
783
        self._mp_mode = False
W
Wu Yi 已提交
784 785 786 787 788
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
789
        self._lr = 0.001
790
        self._use_dgc = False
791
        self._dygraph = False
792
        self._nccl_comm_num = 1
793
        self._enable_backward_deps = False
794
        self._use_fleet_api = False
795
        self._use_fleet_api_20 = False
796 797
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
798
        self._use_hallreduce = False
799
        self._save_model = False
800
        self._fuse_all_reduce = None
801
        self._accumulate_gradient = False
802
        self._find_unused_parameters = False
W
Wu Yi 已提交
803
        self._setup_config()
804 805 806 807 808 809 810 811 812 813 814 815 816 817

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
        else:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2

818
        self._after_setup_config()
X
Xin Pan 已提交
819

Y
Yancey1989 已提交
820
    def _find_free_port(self):
Y
Yancey1989 已提交
821 822 823 824
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
825
                print_to_err(
826
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
827 828 829 830 831 832 833
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
834

835 836 837 838 839
    def start_pserver(self,
                      model_file,
                      check_error_log,
                      required_envs,
                      log_name=""):
X
Xin Pan 已提交
840
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
841 842 843 844 845 846 847 848
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
849
        ps0_cmd = ps_cmd % \
850 851
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
852
        ps1_cmd = ps_cmd % \
853 854
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
855 856 857 858

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
859

860 861
        print(ps0_cmd)
        print(ps1_cmd)
862 863
        ps0_pipe = open(log_name + "_ps0_err.log", "wb")
        ps1_pipe = open(log_name + "_ps1_err.log", "wb")
G
gongweibao 已提交
864

865
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
866
        ps0_proc = subprocess.Popen(
867 868 869 870
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
871
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
872
        ps1_proc = subprocess.Popen(
873 874 875 876
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
877

878
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
879

880 881 882 883 884
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
885
                   batch_merge_repeat=1,
886
                   log_name="",
887
                   devices="0"):
G
gongweibao 已提交
888

889 890 891 892 893 894
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

895 896
        cmd += " %s --role trainer --update_method local --lr %f" % (model,
                                                                     self._lr)
897

898 899 900 901
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
902 903
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
904

905
        if self.__use_cuda:
906
            cmd += " --use_cuda"
W
Wu Yi 已提交
907
            env_local = {
908 909 910 911 912 913 914 915
                "CUDA_VISIBLE_DEVICES": devices,
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
        elif self.__use_xpu:
            cmd += " --use_xpu"
            env_local = {
                "FLAGS_selected_xpus": devices,
W
Wu Yi 已提交
916 917 918
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
919 920 921
        else:
            env_local = {'CPU_NUM': '1'}

922
        # not use dgc in single card
923
        if len(devices) > 1 and self._use_dgc:
924 925
            cmd += " --use_dgc"

926 927 928
        if self._accumulate_gradient:
            cmd += " --accumulate_gradient"

929 930 931
        if self._find_unused_parameters:
            cmd += " --find_unused_parameters"

W
Wu Yi 已提交
932 933
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
934

935
        if check_error_log:
936
            err_log = open(log_name + "_local.log", "wb")
G
gongweibao 已提交
937
            local_proc = subprocess.Popen(
938
                cmd.split(" "),
G
gongweibao 已提交
939
                stdout=subprocess.PIPE,
940
                stderr=err_log,
W
Wu Yi 已提交
941
                env=env_local)
G
gongweibao 已提交
942 943
        else:
            local_proc = subprocess.Popen(
944
                cmd.split(" "),
G
gongweibao 已提交
945
                stdout=subprocess.PIPE,
946
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
947
                env=env_local)
G
gongweibao 已提交
948

949 950 951 952 953 954
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
955
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
956

W
Wu Yi 已提交
957
        return pickle.loads(local_out)
958

959
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
960
        # Run dist train to compare with local results
961 962
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name)
W
Wu Yi 已提交
963

X
Xin Pan 已提交
964
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
965

966 967 968 969 970 971 972 973
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
974
        tr0_cmd = tr_cmd % \
975
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
976
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
977
        tr1_cmd = tr_cmd % \
978
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
979
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
980 981 982 983

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
984 985 986
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
987 988 989
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
990 991 992
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
993
        if self.__use_cuda:
994 995 996 997 998 999 1000 1001 1002 1003
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
1004

W
Wu Yi 已提交
1005 1006
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
1007 1008
        tr0_pipe = open(log_name + "_tr0_err.log", "wb")
        tr1_pipe = open(log_name + "_tr1_err.log", "wb")
G
gongweibao 已提交
1009

1010
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
1011
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
1012
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
1013
            stdout=subprocess.PIPE,
G
gongweibao 已提交
1014
            stderr=tr0_pipe,
X
Xin Pan 已提交
1015
            env=env0)
1016
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
1017
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
1018
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
1019
            stdout=subprocess.PIPE,
G
gongweibao 已提交
1020
            stderr=tr1_pipe,
X
Xin Pan 已提交
1021 1022
            env=env1)

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

1035 1036
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
1037

G
gongweibao 已提交
1038
        # close trainer file
1039 1040 1041 1042
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
1043

W
Wu Yi 已提交
1044 1045
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
1046

W
Wu Yi 已提交
1047 1048
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

1049 1050 1051
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
1052 1053 1054 1055 1056 1057 1058
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

1059
        tr_cmd = tr_cmd % \
T
tangwei12 已提交
1060 1061
                 (self._python_interp, model, self._ps_endpoints,
                  trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
1062 1063

        if self._use_reduce:
1064
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
1065
        if self._use_reader_alloc:
1066
            tr_cmd += " --use_reader_alloc"
1067 1068
        if self._save_model:
            tr_cmd += " --save_model"
W
Wu Yi 已提交
1069
        if self.__use_cuda:
1070 1071
            tr_cmd += " --use_cuda"
            env.update({
1072
                "FLAGS_selected_gpus": "{}".format(0),
W
WangXi 已提交
1073
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id),
1074
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
1075 1076 1077
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
1078
            })
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
        # TODO(liuyuhui):XPU_VISIBLE_DEVICES is not working right now,
        # will update it after Badiu Kunlun partners' support.
        elif self.__use_xpu:
            tr_cmd += " --use_xpu"
            env.update({
                "FLAGS_selected_xpus": "{}".format(trainer_id),
                #"XPU_VISIBLE_DEVICES": "{}".format(trainer_id + 1),
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
                "GLOG_v": "2",
            })
W
Wu Yi 已提交
1092
        else:
1093
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
1094

1095
        if self._use_dgc:
1096 1097
            tr_cmd += " --use_dgc"

1098 1099 1100
        if self._accumulate_gradient:
            tr_cmd += " --accumulate_gradient"

1101 1102 1103
        if self._find_unused_parameters:
            tr_cmd += " --find_unused_parameters"

1104 1105
        if self._pipeline_mode:
            tr_cmd += " --use_pipeline"
1106
        if self._mp_mode:
W
WangXi 已提交
1107
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id)}
1108 1109

        if self._nccl_comm_num > 1:
1110
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
1111

1112 1113
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
1114

1115
        if self._enable_backward_deps:
1116
            tr_cmd += " --enable_backward_deps"
1117

1118 1119 1120
        if self._fuse_all_reduce is not None:
            tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce)

1121
        if self._use_fleet_api:
1122
            tr_cmd += " --use_fleet_api_20" if self._use_fleet_api_20 else " --use_fleet_api"
1123 1124 1125 1126
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
1127 1128
            if hasattr(self, '_sync_batch_norm') and self._sync_batch_norm:
                tr_cmd += " --sync_batch_norm"
1129

1130 1131 1132
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

1133
        return tr_cmd, env
W
Wu Yi 已提交
1134

1135 1136
    def _run_cluster_nccl2(self, model, envs, update_method, check_error_log,
                           log_name):
1137 1138
        if self._use_hallreduce:
            self._ps_endpoints = ""
1139 1140 1141

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
W
WangXi 已提交
1142
                # NOTE(wangxi). hallreduce test must use 4cards after nccl>=2.7
1143 1144 1145 1146 1147 1148 1149
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
                        self._find_free_port())
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
1150
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
1151

1152 1153
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
W
Wu Yi 已提交
1154

1155
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
1156

1157 1158 1159 1160 1161 1162 1163 1164
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
1165

1166
            tr_pipe = open(log_name + "_tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
1167

1168
            print_to_err(
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

1187 1188 1189
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
1190

1191
        return pickle.loads(outs[0]), pickle.loads(outs[1])
1192

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
    def _run_pipeline(self, model, envs, check_error_log, log_name):
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        update_method = "nccl2"

        trainer_num = len(worker_endpoints)

        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            tr_env['CUDA_VISIBLE_DEVICES'] = "0,1"
            tr_env['NCCL_SHM_DISABLE'] = '1'
            tr_env['FLAGS_selected_gpus'] = str(i)
            tr_env['FLAGS_cudnn_deterministic'] = '0'
            print("tr_cmd:{}, env: {}".format(tr_cmd, tr_env))

            tr_pipe = open("/tmp/" + "tr{}_err.log".format(i), "wb")

            print_to_err(
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
        return pickle.loads(outs[0]), pickle.loads(outs[1])

1238
    def _get_required_envs(self, check_error_log=False, need_envs={}):
1239 1240 1241 1242 1243 1244
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
1245
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
1246
            "FLAGS_rpc_retry_bind_port": "50",
1247
            "FLAGS_cudnn_deterministic": "1",
1248
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
1249
            "http_proxy": "",
1250 1251
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
1252 1253 1254
        }

        if check_error_log:
1255
            required_envs["GLOG_vmodule"] = \
1256 1257
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10," \
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10," \
W
WangXi 已提交
1258 1259
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,gen_nccl_id_op_help=10,nccl_helper=10,grpc_client=10," \
                "grpc_server=10,request_handler_impl=10"
1260 1261
            required_envs["GLOG_logtostderr"] = "1"

1262 1263 1264 1265 1266 1267 1268 1269 1270
        required_envs.update(need_envs)
        return required_envs

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={},
                         log_name=""):
1271

1272 1273
        required_envs = self._get_required_envs(check_error_log, need_envs)

T
tangwei12 已提交
1274
        local_losses \
1275
            = self._run_local(model_file, required_envs,
1276 1277
                              check_error_log, log_name=log_name)

W
Wu Yi 已提交
1278
        if self._nccl2_mode:
W
Wu Yi 已提交
1279 1280
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
1281 1282
                    model_file,
                    required_envs,
1283 1284
                    update_method="nccl2_reduce_layer",
                    check_error_log=check_error_log,
1285
                    log_name=log_name)
W
Wu Yi 已提交
1286 1287
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
1288 1289
                    model_file,
                    required_envs,
1290 1291
                    update_method='nccl2',
                    check_error_log=check_error_log,
1292
                    log_name=log_name)
1293 1294 1295 1296 1297 1298 1299 1300
        elif self._bkcl_mode:
            tr0_losses, tr1_losses = self._run_cluster_nccl2(
                model_file,
                required_envs,
                update_method='bkcl',
                check_error_log=check_error_log,
                log_name=log_name)

1301 1302 1303
        elif self._pipeline_mode:
            tr0_losses, tr1_losses = self._run_pipeline(
                model_file, required_envs, check_error_log, log_name=log_name)
W
Wu Yi 已提交
1304 1305
        else:
            tr0_losses, tr1_losses = self._run_cluster(
1306
                model_file, required_envs, check_error_log, log_name=log_name)
1307 1308

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
1309 1310 1311
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
1312 1313 1314 1315
            if self._pipeline_mode:
                dist_loss = np.array([tr1_loss])
            else:
                dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
1316 1317
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
1318 1319 1320 1321 1322 1323 1324

    def check_with_place_multi_cards(self,
                                     model_file,
                                     delta=1e-3,
                                     check_error_log=False,
                                     need_envs={},
                                     log_name=""):
1325

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
1337
                devices="0,1")
1338 1339 1340 1341 1342 1343 1344

            self._use_dgc = False
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
1345
                devices="0,1")
1346 1347 1348 1349 1350 1351 1352 1353

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)