test_dist_base.py 62.5 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import argparse
16
import ast
X
Xin Pan 已提交
17
import os
W
Wu Yi 已提交
18
import pickle
19
import random
K
Kim Yann 已提交
20
import socket
21 22 23
import subprocess
import sys
import tempfile
24
import time
25
import unittest
K
Kim Yann 已提交
26
from contextlib import closing
27 28

import numpy as np
29 30

import paddle
31
from paddle import fluid
32 33 34
from paddle.distributed.fleet.meta_optimizers import (
    RawProgramOptimizer as RawProgram,
)
35
from paddle.fluid import compiler
36
from paddle.incubate.distributed.fleet import role_maker
meteor135's avatar
meteor135 已提交
37 38 39 40
from paddle.incubate.distributed.fleet.collective import (
    DistributedStrategy,
    fleet,
)
41

Y
Yan Xu 已提交
42
RUN_STEP = 5
43
DEFAULT_BATCH_SIZE = 2
44
DIST_UT_PORT = 0
45

T
typhoonzero 已提交
46

47
def print_to_out(out_losses):
T
tianshuo78520a 已提交
48
    sys.stdout.buffer.write(pickle.dumps(out_losses))
49 50 51


def print_to_err(class_name, log_str):
52 53
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
T
tianshuo78520a 已提交
54
    sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
55 56


57 58 59 60
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
def _insert_comm_op(opt, loss, build_strategy=None):
    opt = RawProgram(opt)
    role = paddle.distributed.fleet.base.role_maker.PaddleCloudRoleMaker(
        is_collective=True
    )
    strategy = paddle.distributed.fleet.DistributedStrategy()
    if build_strategy is not None:
        strategy.build_strategy = build_strategy
    opt._set_basic_info(loss, role, opt, strategy)

    # following code is a copy of RawProgramOptimizer.minimize except init_comm_group
    opt.endpoints = opt.role_maker._get_trainer_endpoints()
    opt.current_endpoint = opt.endpoints[opt.role_maker._worker_index()]
    opt.rank = opt.role_maker._worker_index()
    opt.nranks = opt.role_maker._worker_num()
    startup_program = paddle.static.default_startup_program()
    opt.startup_program = startup_program

    block = loss.block
    program = block.program
    opt.main_program = program

    optimize_ops, params_grads = opt.inner_opt.minimize(loss, startup_program)

    opt.main_program = program
    if opt.nranks > 1:
        opt._transpile_main_program(loss)


90
class TestDistRunnerBase:
91 92 93 94 95 96 97 98
    def get_model(
        self,
        batch_size=DEFAULT_BATCH_SIZE,
        lr=0.1,
        single_device=False,
        use_dgc=False,
        dist_strategy=None,
    ):
T
typhoonzero 已提交
99
        raise NotImplementedError(
100 101
            "get_model should be implemented by child classes."
        )
T
typhoonzero 已提交
102

103
    @staticmethod
104 105 106 107 108 109 110 111 112 113 114
    def get_transpiler(
        trainer_id,
        main_program,
        pserver_endpoints,
        trainers,
        sync_mode,
        dc_asgd=False,
        current_endpoint=None,
        nccl_comm_num=1,
        hogwild_mode=False,
    ):
T
typhoonzero 已提交
115
        # NOTE: import fluid until runtime, or else forking processes will cause error.
116
        config = paddle.distributed.transpiler.DistributeTranspilerConfig()
W
Wu Yi 已提交
117
        config.enable_dc_asgd = dc_asgd
118
        config.sync_mode = sync_mode
T
tangwei12 已提交
119 120
        config.runtime_split_send_recv = hogwild_mode

121 122
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
123
        # config.runtime_split_send_recv = True
124
        t = paddle.distributed.transpiler.DistributeTranspiler(config=config)
125 126 127 128 129 130 131 132
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
            trainers=trainers,
            sync_mode=sync_mode,
            current_endpoint=current_endpoint,
        )
T
typhoonzero 已提交
133 134
        return t

135 136
    @staticmethod
    def get_lr_scheduler(program):
137 138
        lr_scheduler = None
        if hasattr(program, 'lr_scheduler'):
139
            from paddle.optimizer.lr import LRScheduler
140

141 142 143
            lr_scheduler = program.lr_scheduler
            assert isinstance(lr_scheduler, LRScheduler), "must be LRScheduler"
        return lr_scheduler
144

W
Wu Yi 已提交
145
    def run_pserver(self, args):
W
Wu Yi 已提交
146
        self.lr = args.lr
147
        self.get_model(batch_size=args.batch_size)
148
        # NOTE: pserver should not call memory optimize
T
tangwei12 已提交
149

150 151 152 153 154 155 156 157 158
        t = self.get_transpiler(
            trainer_id=args.trainer_id,
            main_program=fluid.default_main_program(),
            pserver_endpoints=args.endpoints,
            trainers=args.trainers,
            sync_mode=args.sync_mode,
            dc_asgd=args.dc_asgd,
            hogwild_mode=args.hogwild,
        )
W
Wu Yi 已提交
159
        pserver_prog = t.get_pserver_program(args.current_endpoint)
160 161 162
        startup_prog = t.get_startup_program(
            args.current_endpoint, pserver_prog
        )
Y
Yancey1989 已提交
163

T
typhoonzero 已提交
164 165 166
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
167
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
168
        exe.run(pserver_prog)
169
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
170

171 172 173 174
    def run_pipeline_trainer(self, args):
        self.lr = args.lr

        dist_strategy = DistributedStrategy()
175 176 177 178 179 180 181 182 183 184 185
        (
            test_program,
            avg_cost,
            train_reader,
            test_reader,
            batch_acc,
            predict,
            data_loader,
        ) = self.get_model(
            batch_size=args.batch_size, dist_strategy=dist_strategy
        )
186 187 188 189 190 191 192 193 194 195 196 197 198

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        eprint(type(self).__name__, "device_id: %d." % device_id)
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        data_loader.set_sample_list_generator(train_reader, place)
        data_loader.start()
        print_to_err(type(self).__name__, "begin to train on trainer")
        out_losses = []
199 200

        main_program = fluid.default_main_program()
201
        lr_scheduler = self.get_lr_scheduler(main_program)
202
        for i in range(RUN_STEP):
203
            loss = exe.run(main_program, fetch_list=[avg_cost])
204 205 206
            loss = loss[0] if loss else None
            out_losses.append(loss)
            print_to_err(type(self).__name__, "run step %d finished" % i)
207 208
            if lr_scheduler is not None:
                lr_scheduler.step()
209

210
        data_loader.reset()
211 212
        print_to_err(type(self).__name__, "trainer run finished")

T
tianshuo78520a 已提交
213
        sys.stdout.buffer.write(pickle.dumps(out_losses))
214

215 216 217 218 219 220 221 222 223 224 225
    def run_use_fleet_api_20_trainer(self, args):
        """
        1. remove codes for DistributedStrategy and leave the DistributedStrategy part to get_model()
        2. to run with fleet 2.0 api, set flags _use_fleet_api and _use_fleet_api_20 to True
        3. for now, not support test for model save
        """
        assert args.update_method == "nccl2" or "bkcl"

        self.lr = args.lr
        print_to_err("use_fleet 2.0", "fleet.node_num:")

226 227 228 229 230 231 232 233
        (
            test_program,
            avg_cost,
            train_reader,
            test_reader,
            batch_acc,
            predict,
        ) = self.get_model(batch_size=args.batch_size)
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

        if fluid.core.is_compiled_with_cuda():
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
        elif fluid.core.is_compiled_with_xpu():
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
            raise ValueError(
                "fleet dygraph api must in paddlepaddle-xpu or paddlepaddle-gpu."
            )

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
251 252
            var
            for var in fluid.default_main_program().global_block().vars.values()
253 254 255 256 257 258 259 260 261 262 263 264 265
            if var.is_data
        ]

        eprint("feed_var_list:", feed_var_list)

        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
266 267 268 269
            if (
                paddle.distributed.get_world_size() == 1
                and args.update_method == 'gloo'
            ):  # Gloo single mode
X
xiongkun 已提交
270 271 272
                return origin_batch

            elif args.update_method != "local" and args.use_reader_alloc:
273 274 275 276 277 278 279 280 281 282
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

        print_to_err(type(self).__name__, "begin to train on trainer")
        out_losses = []
283
        for i in range(RUN_STEP):
284 285 286 287 288
            (loss,) = exe.run(
                fluid.default_main_program(),
                fetch_list=[avg_cost.name],
                feed=feeder.feed(get_data()),
            )
289 290 291
            out_losses.append(loss[0])
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
292
        print_to_err(type(self).__name__, f"dist losses: {out_losses}")
293

T
tianshuo78520a 已提交
294
        sys.stdout.buffer.write(pickle.dumps(out_losses))
295

296 297
    def run_use_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2" or "bkcl"
298 299 300 301 302 303 304 305

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
T
tangwei12 已提交
306
        dist_strategy.fuse_memory_size = 1  # MB
307
        dist_strategy.fuse_laryer_size = 1
308 309 310 311
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
312 313
        if args.sync_batch_norm:
            dist_strategy.sync_batch_norm = True
314 315 316

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
317
        print_to_err("use_fleet", "fleet.node_num:")
T
tangwei12 已提交
318 319
        # "fleet.node_id:", fleet.node_id(),
        # "fleet.trainer_num:", fleet.worker_num())
320

321 322 323 324 325 326 327 328 329 330
        (
            test_program,
            avg_cost,
            train_reader,
            test_reader,
            batch_acc,
            predict,
        ) = self.get_model(
            batch_size=args.batch_size, dist_strategy=dist_strategy
        )
331 332 333 334

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

335 336 337 338 339 340 341 342 343 344
        if fluid.core.is_compiled_with_cuda():
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
        elif fluid.core.is_compiled_with_xpu():
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
            raise ValueError(
                "fleet dygraph api must in paddlepaddle-xpu or paddlepaddle-gpu."
            )
345 346 347 348 349 350

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
351 352
            var
            for var in trainer_prog.global_block().vars.values()
353 354 355
            if var.is_data
        ]

356 357 358 359 360 361 362
        eprint("feed_var_list:", feed_var_list)

        # tmp add this code to pass python35 gcc8 CI
        # Fixme(gongweibao, wangxi), need fix fleet api program order
        if feed_var_list[0].name == 'label':
            feed_var_list = feed_var_list[::-1]

363 364 365 366 367 368 369 370 371 372 373 374 375 376
        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

377
        print_to_err(type(self).__name__, "begin to train on trainer")
378
        out_losses = []
379
        for i in range(RUN_STEP):
380 381 382 383 384
            (loss,) = exe.run(
                dist_prog,
                fetch_list=[avg_cost.name],
                feed=feeder.feed(get_data()),
            )
385
            out_losses.append(loss[0])
386 387
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
388

T
tianshuo78520a 已提交
389
        sys.stdout.buffer.write(pickle.dumps(out_losses))
390

391 392 393
        if args.save_model:
            model_save_dir = "/tmp"
            if fleet.worker_index() == 0:
394 395 396 397 398 399 400 401 402 403 404 405
                model_save_dir_fluid = os.path.join(
                    model_save_dir, "fluid_persistables"
                )
                model_save_dir_fleet = os.path.join(
                    model_save_dir, "fleet_persistables"
                )
                infer_save_dir_fluid = os.path.join(
                    model_save_dir, "fluid_infer"
                )
                infer_save_dir_fleet = os.path.join(
                    model_save_dir, "fleet_infer"
                )
406
            else:
407 408 409 410 411 412 413 414 415 416 417 418
                model_save_dir_fluid = os.path.join(
                    model_save_dir, "fluid_persistables_2"
                )
                model_save_dir_fleet = os.path.join(
                    model_save_dir, "fleet_persistables_2"
                )
                infer_save_dir_fluid = os.path.join(
                    model_save_dir, "fluid_infer_2"
                )
                infer_save_dir_fleet = os.path.join(
                    model_save_dir, "fleet_infer_2"
                )
419
            paddle.distributed.io.save_persistables(
420 421
                exe, model_save_dir_fluid, fleet._origin_program
            )
422 423
            fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
            feeded_var_names = [var.name for var in feed_var_list]
424 425 426 427 428 429 430 431 432 433
            fluid.io.save_inference_model(
                infer_save_dir_fluid,
                feeded_var_names,
                [avg_cost],
                exe,
                fleet._origin_program,
            )
            fleet.save_inference_model(
                exe, infer_save_dir_fleet, feeded_var_names, [avg_cost]
            )
434

435
    def run_trainer(self, args):
436 437 438 439 440 441 442 443 444 445 446
        from io import StringIO

        old_stdout = sys.stdout
        sys.stdout = StringIO()

        build_stra = fluid.BuildStrategy()
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False

        if args.fuse_all_reduce is not None:
447
            sys.stderr.write(f'fuse_all_reduce={args.fuse_all_reduce}')
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
            build_stra.fuse_all_reduce_ops = args.fuse_all_reduce

        if args.hogwild:
            build_stra.async_mode = True

        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

        if args.use_reduce:
            build_stra.reduce_strategy = (
                fluid.BuildStrategy.ReduceStrategy.Reduce
            )
        else:
            build_stra.reduce_strategy = (
                fluid.BuildStrategy.ReduceStrategy.AllReduce
            )
        pass_builder = None
        if args.batch_merge_repeat > 1:
            pass_builder = build_stra._finalize_strategy_and_create_passes()
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
            mypass.set("num_repeats", args.batch_merge_repeat)

        if (
            args.update_method == "nccl2"
            or args.update_method == "nccl2_reduce_layer"
        ):
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
        else:
            # case args.update_method == "nccl2_reduce_layer":
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0

W
Wu Yi 已提交
481
        self.lr = args.lr
W
Wu Yi 已提交
482
        if args.nccl2_reduce_layer_local_run:
483 484 485 486 487 488 489 490
            (
                test_program,
                avg_cost,
                train_reader,
                test_reader,
                batch_acc,
                predict,
            ) = self.get_model(batch_size=args.batch_size, single_device=True)
491
        elif args.use_dgc:
492 493 494 495 496 497 498
            (
                test_program,
                avg_cost,
                train_reader,
                test_reader,
                batch_acc,
                predict,
499 500 501 502 503
            ) = self.get_model(
                batch_size=args.batch_size,
                use_dgc=args.use_dgc,
                build_strategy=build_stra,
            )
W
Wu Yi 已提交
504
        else:
505 506 507 508 509 510 511 512
            (
                test_program,
                avg_cost,
                train_reader,
                test_reader,
                batch_acc,
                predict,
            ) = self.get_model(batch_size=args.batch_size)
513

W
Wu Yi 已提交
514
        if args.update_method == "pserver":
515
            print_to_err(
516
                type(self).__name__,
517 518 519 520 521 522 523 524 525 526 527
                "begin to run transpile on trainer with pserver mode",
            )
            t = self.get_transpiler(
                trainer_id=args.trainer_id,
                main_program=fluid.default_main_program(),
                pserver_endpoints=args.endpoints,
                trainers=args.trainers,
                sync_mode=args.sync_mode,
                dc_asgd=args.dc_asgd,
                hogwild_mode=args.hogwild,
            )
T
tangwei12 已提交
528

T
typhoonzero 已提交
529
            trainer_prog = t.get_trainer_program()
530
            print_to_err(
531
                type(self).__name__,
532 533 534 535 536 537
                "get trainer program done with pserver mode.",
            )
        elif (
            args.update_method == "nccl2"
            or args.update_method == "nccl2_reduce_layer"
        ):
W
Wu Yi 已提交
538
            # transpile for nccl2
539
            config = paddle.distributed.transpiler.DistributeTranspilerConfig()
W
Wu Yi 已提交
540
            config.mode = "nccl2"
541
            config.nccl_comm_num = args.nccl_comm_num
542 543
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
544 545 546
                config.hierarchical_allreduce_inter_nranks = (
                    args.hallreduce_inter_nranks
                )
547
            print_to_err(
548
                type(self).__name__,
549 550
                "begin to run transpile on trainer with nccl2 mode",
            )
551 552 553
            nccl2_t = paddle.distributed.transpiler.DistributeTranspiler(
                config=config
            )
554 555 556 557 558 559 560
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint,
            )
561
            print_to_err(
562 563
                type(self).__name__, "get trainer program done. with nccl2 mode"
            )
W
Wu Yi 已提交
564
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
565
        else:
566
            print_to_err(
567
                type(self).__name__,
568 569
                "do nothing about main program, just use it",
            )
T
typhoonzero 已提交
570
            trainer_prog = fluid.default_main_program()
571
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
572

573 574 575
        # FIXME(gongwb):wait pserver initialization.
        time.sleep(1)

576
        if args.use_cuda:
577 578
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
579 580 581
        else:
            place = fluid.CPUPlace()

582 583
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
584
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
585

W
Wu Yi 已提交
586 587
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
588

589
        print_to_err(type(self).__name__, "begin to compile with data parallel")
590 591
        binary = compiler.CompiledProgram(
            trainer_prog, build_strategy=build_stra
592
        )
593
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
594 595

        feed_var_list = [
596 597
            var
            for var in trainer_prog.global_block().vars.values()
T
typhoonzero 已提交
598 599 600 601
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
602
        reader_generator = train_reader()
T
typhoonzero 已提交
603

604 605
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
606
            if args.update_method != "local" and args.use_reader_alloc:
607 608 609 610 611 612 613
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
614

615
        lr_scheduler = self.get_lr_scheduler(trainer_prog)
616
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
617
        out_losses = []
618
        for i in range(RUN_STEP):
619 620 621
            (loss,) = exe.run(
                binary, fetch_list=[avg_cost.name], feed=feeder.feed(get_data())
            )
W
Wu Yi 已提交
622
            out_losses.append(loss[0])
623
            print_to_err(type(self).__name__, "run step %d finished" % i)
624 625 626
            if lr_scheduler is not None:
                lr_scheduler.step()

627 628
        print_to_err(type(self).__name__, "trainer run finished\n")
        # print_to_err(type(self).__name__, "out_losses")
629

630
        sys.stdout = old_stdout
631
        print_to_out(out_losses)
T
typhoonzero 已提交
632 633


634
class TestParallelDyGraphRunnerBase:
635 636
    def get_model(self):
        raise NotImplementedError(
637 638
            "get_model should be implemented by child classes."
        )
639 640 641

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
642 643
            "train_one_loop should be implemented by the child classes."
        )
644

645
    def _get_data(self, batch, args):
646 647 648 649
        if (
            paddle.distributed.get_world_size() == 1
            and args.update_method == 'gloo'
        ):  # Gloo single mode
X
xiongkun 已提交
650 651
            return batch
        elif args.update_method != "local":
652
            new_batch = []
653

654 655 656
            # NOTE(@xiongkun03) args.diff_batch means batch length is different:
            # such as : batch = [2,3,4,5], then the first rank will get [2]  and
            # the second rank will get [3,4,5].
657 658
            # this function is for test sparse_embedding_differ_length
            if hasattr(args, "diff_batch") and args.diff_batch:
659 660 661
                assert (
                    len(batch) > 2
                ), "in differ_batch mode, len(batch) must > 2."
662 663 664
                if paddle.distributed.get_rank() == 0:
                    new_batch.append(batch[0])
                elif paddle.distributed.get_rank() == 1:
665
                    new_batch.extend(list(batch[1:]))
666 667 668 669 670 671 672 673 674 675
                else:
                    raise NotImplementedError(
                        "Current TestParallelDyGraphRunnerBase don't support world_size > 2"
                    )
                return new_batch
            else:
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
676 677 678
        else:
            return batch

679 680
    def run_trainer(self, args):
        seed = 90
X
xiongkun 已提交
681 682 683
        if args.update_method == 'gloo':
            place = fluid.CPUPlace()
        elif fluid.core.is_compiled_with_cuda():
684 685 686 687 688 689
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
        elif fluid.core.is_compiled_with_xpu():
            device_id = int(os.getenv("FLAGS_selected_xpus", "0"))
            place = fluid.XPUPlace(device_id)
        else:
690
            assert "Only support CUDAPlace or XPUPlace or CPU(Gloo) for now."
691 692 693 694

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
695 696
            np.random.seed(seed)
            import random
697

698
            random.seed(seed)
699 700
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
701

702 703 704 705 706 707 708
            # if args.update_method == "nccl2":
            if (
                args.update_method == "nccl2"
                or args.update_method == "bkcl"
                or args.update_method == "hccl"
                or args.update_method == "cncl"
            ):
Q
qizhaoaoe 已提交
709
                strategy = paddle.distributed.parallel.ParallelStrategy()
710 711 712 713
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
714
                paddle.distributed.init_parallel_env()
715
                print_to_err(
716
                    type(self).__name__,
717 718
                    "begin to prepare context in dygraph with nccl2",
                )
719
                if not args.find_unused_parameters:
Q
qizhaoaoe 已提交
720
                    model = paddle.DataParallel(
721 722
                        model, strategy, find_unused_parameters=False
                    )
723
                else:
Q
qizhaoaoe 已提交
724
                    model = paddle.DataParallel(
725 726
                        model, strategy, find_unused_parameters=True
                    )
727
                print_to_err(type(self).__name__, "model built in dygraph")
X
xiongkun 已提交
728 729 730 731

            elif args.update_method == "gloo":
                paddle.distributed.init_parallel_env()
                if not args.find_unused_parameters:
Q
qizhaoaoe 已提交
732
                    model = paddle.DataParallel(
733 734
                        model, find_unused_parameters=False
                    )
X
xiongkun 已提交
735
                else:
Q
qizhaoaoe 已提交
736
                    model = paddle.DataParallel(
737 738
                        model, find_unused_parameters=True
                    )
X
xiongkun 已提交
739

740
            out_losses = []
741
            print_to_err(type(self).__name__, "begin to run dygraph training")
742
            for step_id, data in enumerate(train_reader()):
743
                data = self._get_data(data, args)
744 745 746
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
747
                if step_id % 10 == 0:
748
                    print_to_err(
749
                        type(self).__name__,
750 751
                        "loss at step %d: %f" % (step_id, loss.numpy()),
                    )
Y
Yan Xu 已提交
752
                out_losses.append(loss.numpy())
753 754 755 756

                loss.backward()

                opt.minimize(loss)
757 758
                if not args.accumulate_gradient:
                    model.clear_gradients()
759
        print_to_out(out_losses)
760

761 762 763 764 765 766 767 768 769
    def run_trainer_with_spawn(self, args):
        # 1. enable dygraph
        paddle.disable_static()

        # 2. init seed
        seed = 90
        paddle.static.default_startup_program().random_seed = seed
        paddle.static.default_main_program().random_seed = seed
        np.random.seed(seed)
770
        random.seed(seed)
771
        # get trainer id
L
LiYuRio 已提交
772 773
        paddle.distributed.parallel._get_global_parallel_env()
        args.trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
774 775

        # 3. init parallel env
X
xiongkun 已提交
776
        if args.update_method in ["nccl2", "gloo"]:
777 778 779 780
            paddle.distributed.init_parallel_env()

        # 4. train model
        model, train_reader, opt = self.get_model()
X
xiongkun 已提交
781
        if args.update_method in ["nccl2", "gloo"]:
782
            model = paddle.DataParallel(
783 784
                model, find_unused_parameters=args.find_unused_parameters
            )
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799

        out_losses = []
        for step_id, data in enumerate(train_reader()):
            data = self._get_data(data, args)
            if step_id == RUN_STEP:
                break
            loss = self.run_one_loop(model, opt, data)
            out_losses.append(loss.numpy())

            loss.backward()

            opt.minimize(loss)
            model.clear_gradients()
        return out_losses

800
    def run_use_fleet_api_trainer(self, args):
801
        from paddle.distributed import fleet
802

803 804 805 806 807 808 809 810
        # 1. enable dygraph
        paddle.disable_static()

        # 2. init seed
        seed = 90
        paddle.static.default_startup_program().random_seed = seed
        paddle.static.default_main_program().random_seed = seed
        np.random.seed(seed)
811
        random.seed(seed)
812
        # get trainer id
L
LiYuRio 已提交
813 814
        paddle.distributed.parallel._get_global_parallel_env()
        args.trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
815

816 817
        # set strategy
        strategy = fleet.DistributedStrategy()
818 819
        if args.find_unused_parameters:
            strategy.find_unused_parameters = True
820

821
        # 3. init parallel env
822
        if args.update_method == "nccl2" or "bkcl" or "hccl":
823
            fleet.init(is_collective=True, strategy=strategy)
824 825 826

        # 4. train model
        model, train_reader, opt = self.get_model()
827
        if args.update_method == "nccl2" or "bkcl" or "hccl":
828 829 830 831 832 833 834 835 836 837 838 839 840 841
            opt = fleet.distributed_optimizer(opt)
            model = fleet.distributed_model(model)

        out_losses = []
        for step_id, data in enumerate(train_reader()):
            data = self._get_data(data, args)
            if step_id == RUN_STEP:
                break
            loss = self.run_one_loop(model, opt, data)
            out_losses.append(loss.numpy())

            loss.backward()

            opt.step()
842 843
            if not args.accumulate_gradient:
                opt.clear_grad()
844 845
        print_to_out(out_losses)

846

T
typhoonzero 已提交
847
def runtime_main(test_class):
W
Wu Yi 已提交
848
    parser = argparse.ArgumentParser(description='Run dist test.')
849 850 851
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer']
    )
W
Wu Yi 已提交
852
    parser.add_argument('--endpoints', type=str, required=False, default="")
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
        choices=[
            "pserver",
            "nccl2",
            "bkcl",
            "local",
            "nccl2_reduce_layer",
            "gloo",
            "hccl",
            "cncl",
        ],
    )
W
Wu Yi 已提交
868 869
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
870
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
871 872
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
873
    parser.add_argument('--use_pipeline', action='store_true')
874
    parser.add_argument('--use_fleet_api', action='store_true')
875
    parser.add_argument('--use_fleet_api_20', action='store_true')
876
    parser.add_argument('--use_local_sgd', action='store_true')
877
    parser.add_argument('--diff_batch', action='store_true')
878
    parser.add_argument('--ut4grad_allreduce', action='store_true')
879 880 881 882 883 884
    parser.add_argument(
        '--hallreduce_inter_nranks', type=int, required=False, default=2
    )
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default=""
    )
W
Wu Yi 已提交
885
    parser.add_argument('--sync_mode', action='store_true')
886
    parser.add_argument('--use_cuda', action='store_true')
X
xiongkun 已提交
887
    parser.add_argument('--use_cpu', action='store_true')
888
    parser.add_argument('--use_xpu', action='store_true')
889
    parser.add_argument('--use_dgc', action='store_true')
890
    parser.add_argument('--accumulate_gradient', action='store_true')
891
    parser.add_argument('--find_unused_parameters', action='store_true')
W
Wu Yi 已提交
892
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
893
    parser.add_argument('--dc_asgd', action='store_true')
T
tangwei12 已提交
894
    parser.add_argument('--hogwild', action='store_true')
895
    parser.add_argument('--save_model', action='store_true')
896 897 898
    parser.add_argument(
        '--use_reader_alloc', action='store_true', required=False
    )
899
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
900
    parser.add_argument('--lr', required=False, type=float, default=0.001)
901 902 903 904 905 906 907 908 909
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1
    )
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False,
    )
910
    parser.add_argument('--sync_batch_norm', action='store_true')
911 912 913
    parser.add_argument(
        '--fuse_all_reduce', required=False, type=ast.literal_eval, default=None
    )
W
Wu Yi 已提交
914 915

    args = parser.parse_args()
T
typhoonzero 已提交
916

X
xiongkun 已提交
917 918 919
    if args.update_method == 'gloo':
        paddle.set_device("cpu")

T
typhoonzero 已提交
920
    model = test_class()
W
Wu Yi 已提交
921
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
922
        model.run_pserver(args)
923 924
    elif args.use_fleet_api:
        model.run_use_fleet_api_trainer(args)
925 926
    elif args.use_fleet_api_20:
        model.run_use_fleet_api_20_trainer(args)
927 928
    elif args.use_pipeline:
        model.run_pipeline_trainer(args)
T
typhoonzero 已提交
929
    else:
930
        model.run_trainer(args)
X
Xin Pan 已提交
931

M
minqiyang 已提交
932

X
Xin Pan 已提交
933
class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
934 935 936
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

937 938 939
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
940
            self.__use_xpu = False
941
            self._use_dgc = False
942 943
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
944 945 946 947 948
            self.__use_xpu = False
        elif self._enforce_place == "XPU":
            self.__use_cuda = False
            self.__use_xpu = True
            self._use_dgc = False
949 950 951 952 953
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
954 955 956 957
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
958

X
Xin Pan 已提交
959 960 961
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
962
        self._port_set = set()
M
minqiyang 已提交
963
        self._python_interp = sys.executable
W
Wu Yi 已提交
964
        self._sync_mode = True
T
tangwei12 已提交
965
        self._hogwild_mode = False
966
        self._enforce_place = None
W
Wu Yi 已提交
967
        self._use_reduce = False
W
Wu Yi 已提交
968
        self._dc_asgd = False  # must use with async mode
969
        self._use_reader_alloc = True
W
Wu Yi 已提交
970
        self._nccl2_mode = False
971
        self._bkcl_mode = False
X
xiongkun 已提交
972
        self._gloo_mode = False  # now, support gloo backend
973
        self._hccl_mode = False
974
        self._cncl_mode = False
975
        self._pipeline_mode = False
976
        self._mp_mode = False
977
        self._diff_batch = False
W
Wu Yi 已提交
978 979 980 981 982
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
983
        self._lr = 0.001
984
        self._use_dgc = False
985
        self._dygraph = False
986
        self._nccl_comm_num = 1
987
        self._enable_backward_deps = False
988
        self._use_fleet_api = False
989
        self._use_fleet_api_20 = False
990 991
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
992
        self._use_hallreduce = False
993
        self._save_model = False
994
        self._fuse_all_reduce = None
995
        self._accumulate_gradient = False
996
        self._find_unused_parameters = False
W
Wu Yi 已提交
997
        self._setup_config()
998 999 1000 1001 1002 1003

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT == 0:
1004
            self._ps_endpoints = "127.0.0.1:{},127.0.0.1:{}".format(
1005 1006 1007
                self._find_free_port(),
                self._find_free_port(),
            )
1008
        else:
1009
            self._ps_endpoints = "127.0.0.1:{},127.0.0.1:{}".format(
1010 1011 1012
                DIST_UT_PORT,
                DIST_UT_PORT + 1,
            )
1013
            DIST_UT_PORT += 2
1014
            self._dist_port = DIST_UT_PORT
1015

1016
        self._after_setup_config()
X
Xin Pan 已提交
1017

1018 1019 1020 1021 1022
        self.temp_dir = tempfile.TemporaryDirectory()

    def tearDown(self):
        self.temp_dir.cleanup()

Y
Yancey1989 已提交
1023
    def _find_free_port(self):
Y
Yancey1989 已提交
1024
        def __free_port():
1025 1026 1027
            with closing(
                socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            ) as s:
Y
Yancey1989 已提交
1028
                s.bind(('', 0))
1029
                print_to_err(
1030 1031
                    type(self).__name__, "socket name: %s" % s.getsockname()[1]
                )
Y
Yancey1989 已提交
1032 1033 1034 1035 1036 1037 1038
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
1039

1040 1041 1042
    def start_pserver(
        self, model_file, check_error_log, required_envs, log_name=""
    ):
X
Xin Pan 已提交
1043
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
1044 1045 1046 1047 1048 1049 1050 1051
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
        ps0_cmd = ps_cmd % (
            self._python_interp,
            model_file,
            self._ps_endpoints,
            ps0_ep,
            self._trainers,
        )
        ps1_cmd = ps_cmd % (
            self._python_interp,
            model_file,
            self._ps_endpoints,
            ps1_ep,
            self._trainers,
        )
W
Wu Yi 已提交
1066 1067 1068 1069

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
1070

1071 1072
        print(ps0_cmd)
        print(ps1_cmd)
1073 1074 1075 1076
        path0 = os.path.join(self.temp_dir.name, log_name + "_ps0_err.log")
        path1 = os.path.join(self.temp_dir.name, log_name + "_ps1_err.log")
        ps0_pipe = open(path0, "wb")
        ps1_pipe = open(path1, "wb")
G
gongweibao 已提交
1077

1078
        print_to_err(type(self).__name__, "going to start pserver process 0")
1079 1080 1081 1082 1083 1084
        ps0_proc = subprocess.Popen(
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs,
        )
1085
        print_to_err(type(self).__name__, "going to start pserver process 1")
1086 1087 1088 1089 1090 1091
        ps1_proc = subprocess.Popen(
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs,
        )
G
gongweibao 已提交
1092

1093
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
1094

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
    def _run_local(
        self,
        model,
        envs,
        check_error_log=False,
        batch_size=DEFAULT_BATCH_SIZE,
        batch_merge_repeat=1,
        log_name="",
        devices="1",
    ):
G
gongweibao 已提交
1105

1106 1107 1108 1109 1110 1111
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

1112
        cmd += " {} --role trainer --update_method local --lr {:f}".format(
1113 1114 1115
            model,
            self._lr,
        )
1116

1117 1118 1119 1120
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
1121 1122
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
1123

1124
        if self.__use_cuda:
1125
            cmd += " --use_cuda"
W
Wu Yi 已提交
1126
            env_local = {
1127 1128
                "CUDA_VISIBLE_DEVICES": devices,
                "PADDLE_TRAINERS_NUM": "1",
1129
                "PADDLE_TRAINER_ID": "0",
1130 1131 1132 1133 1134
            }
        elif self.__use_xpu:
            cmd += " --use_xpu"
            env_local = {
                "FLAGS_selected_xpus": devices,
W
Wu Yi 已提交
1135
                "PADDLE_TRAINERS_NUM": "1",
1136
                "PADDLE_TRAINER_ID": "0",
W
Wu Yi 已提交
1137
            }
1138 1139 1140
        else:
            env_local = {'CPU_NUM': '1'}

1141
        # not use dgc in single card
1142
        if len(devices) > 1 and self._use_dgc:
1143 1144
            cmd += " --use_dgc"

1145 1146 1147
        if self._accumulate_gradient:
            cmd += " --accumulate_gradient"

1148 1149 1150
        if self._find_unused_parameters:
            cmd += " --find_unused_parameters"

W
Wu Yi 已提交
1151
        env_local.update(envs)
1152
        print(f"local_cmd: {cmd}, env: {env_local}")
G
gongweibao 已提交
1153

1154
        if check_error_log:
1155 1156
            path = os.path.join(self.temp_dir.name, log_name + "_local.log")
            err_log = open(path, "wb")
1157 1158 1159 1160 1161 1162
            local_proc = subprocess.Popen(
                cmd.split(" "),
                stdout=subprocess.PIPE,
                stderr=err_log,
                env=env_local,
            )
G
gongweibao 已提交
1163
        else:
1164 1165 1166 1167 1168 1169
            local_proc = subprocess.Popen(
                cmd.split(" "),
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                env=env_local,
            )
G
gongweibao 已提交
1170

1171 1172 1173 1174 1175 1176
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
1177
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
1178

W
Wu Yi 已提交
1179
        return pickle.loads(local_out)
1180

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
    def _run_local_gloo(
        self,
        model,
        envs,
        check_error_log=False,
        batch_size=DEFAULT_BATCH_SIZE,
        batch_merge_repeat=1,
        log_name="",
        devices="0",
    ):
X
xiongkun 已提交
1191 1192
        saved_endpoints = self._ps_endpoints
        self._ps_endpoints = self._ps_endpoints.split(',')[0]
1193 1194 1195
        result = self._run_cluster_gloo(
            model, envs, 'gloo', check_error_log, log_name
        )
X
xiongkun 已提交
1196 1197 1198
        self._ps_endpoints = saved_endpoints
        return result

1199
    def _run_cluster(self, model, envs, check_error_log, log_name):
X
Xin Pan 已提交
1200
        # Run dist train to compare with local results
1201 1202 1203
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(
            model, check_error_log, envs, log_name=log_name
        )
W
Wu Yi 已提交
1204

X
Xin Pan 已提交
1205
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
1206

1207 1208 1209 1210 1211 1212 1213 1214
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
        tr0_cmd = tr_cmd % (
            self._python_interp,
            model,
            self._ps_endpoints,
            0,
            ps0_ep,
            self._trainers,
            self._lr,
        )
        tr1_cmd = tr_cmd % (
            self._python_interp,
            model,
            self._ps_endpoints,
            1,
            ps1_ep,
            self._trainers,
            self._lr,
        )
W
Wu Yi 已提交
1233 1234 1235 1236

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
T
tangwei12 已提交
1237 1238 1239
        if self._hogwild_mode:
            tr0_cmd += " --hogwild"
            tr1_cmd += " --hogwild"
W
Wu Yi 已提交
1240 1241 1242
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
1243 1244 1245
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
1246
        if self.__use_cuda:
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
1257

1258 1259
        print(f"tr0_cmd: {tr0_cmd}, env: {env0}")
        print(f"tr1_cmd: {tr1_cmd}, env: {env1}")
1260 1261 1262 1263 1264

        path0 = os.path.join(self.temp_dir.name, log_name + "_tr0_err.log")
        path1 = os.path.join(self.temp_dir.name, log_name + "_tr1_err.log")
        tr0_pipe = open(path0, "wb")
        tr1_pipe = open(path1, "wb")
G
gongweibao 已提交
1265

1266
        print_to_err(type(self).__name__, "going to start trainer process 0")
1267 1268 1269 1270 1271 1272
        tr0_proc = subprocess.Popen(
            tr0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr0_pipe,
            env=env0,
        )
1273
        print_to_err(type(self).__name__, "going to start trainer process 1")
1274 1275 1276 1277 1278 1279
        tr1_proc = subprocess.Popen(
            tr1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr1_pipe,
            env=env1,
        )
X
Xin Pan 已提交
1280

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

1293 1294
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
1295

G
gongweibao 已提交
1296
        # close trainer file
1297 1298 1299 1300
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
1301

W
Wu Yi 已提交
1302 1303
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
1304

W
Wu Yi 已提交
1305 1306
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

1307 1308 1309
    def _get_gloo_trainer_cmd(
        self, model, ep, update_method, trainer_id, trainer_num
    ):
X
xiongkun 已提交
1310 1311 1312 1313 1314 1315 1316 1317
        env = {}
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

1318 1319 1320 1321 1322 1323 1324 1325 1326
        tr_cmd = tr_cmd % (
            self._python_interp,
            model,
            self._ps_endpoints,
            trainer_id,
            ep,
            update_method,
            self._lr,
        )
X
xiongkun 已提交
1327 1328 1329 1330 1331

        if self._use_reduce:
            tr_cmd += " --use_reduce"
        if self._use_reader_alloc:
            tr_cmd += " --use_reader_alloc"
1332 1333
        # assert self._use_reduce == False, "gloo not support _use_reduce"
        # assert self._use_reader_alloc == False, "gloo not support _use_reduce"
X
xiongkun 已提交
1334 1335
        if self._save_model:
            tr_cmd += " --save_model"
1336 1337
        if self._diff_batch:
            tr_cmd += " --diff_batch"
X
xiongkun 已提交
1338 1339
        self.__use_cuda = False
        self.__use_xpu = False
1340 1341
        assert not self.__use_cuda, "gloo not support use cuda"
        assert not self.__use_xpu, "gloo not support use xpu"
X
xiongkun 已提交
1342
        tr_cmd += " --use_cpu"
1343 1344
        env.update(
            {
1345 1346
                "PADDLE_TRAINERS_NUM": f"{trainer_num}",
                "PADDLE_TRAINER_ID": f"{trainer_id}",
1347 1348 1349 1350 1351 1352 1353
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
                "PADDLE_CURRENT_ENDPOINT": ep,
                "PADDLE_DISTRI_BACKEND": "gloo",
                "GLOG_v": "2",
            }
        )
X
xiongkun 已提交
1354

1355
        assert not self._use_dgc, "gloo not support use dgc"
1356

X
xiongkun 已提交
1357 1358 1359 1360 1361 1362
        if self._accumulate_gradient:
            tr_cmd += " --accumulate_gradient"

        if self._find_unused_parameters:
            tr_cmd += " --find_unused_parameters"

1363
        assert not self._pipeline_mode, "gloo not support use pipeline"
X
xiongkun 已提交
1364 1365 1366 1367 1368

        if self._enable_backward_deps:  # build strategy, save it
            tr_cmd += " --enable_backward_deps"

        if self._fuse_all_reduce is not None:
1369
            tr_cmd += f" --fuse_all_reduce {self._fuse_all_reduce}"
X
xiongkun 已提交
1370

1371 1372
        assert not self._use_fleet_api, "gloo not support use fleet api"
        assert not self._use_fleet_api_20, "gloo not support use fleet api"
X
xiongkun 已提交
1373 1374
        return tr_cmd, env

1375 1376 1377
    def _get_nccl2_trainer_cmd(
        self, model, ep, update_method, trainer_id, trainer_num
    ):
1378
        env = {}
1379 1380 1381 1382 1383 1384 1385
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

1386 1387 1388 1389 1390 1391 1392 1393 1394
        tr_cmd = tr_cmd % (
            self._python_interp,
            model,
            self._ps_endpoints,
            trainer_id,
            ep,
            update_method,
            self._lr,
        )
W
Wu Yi 已提交
1395 1396

        if self._use_reduce:
1397
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
1398
        if self._use_reader_alloc:
1399
            tr_cmd += " --use_reader_alloc"
1400 1401
        if self._save_model:
            tr_cmd += " --save_model"
W
Wu Yi 已提交
1402
        if self.__use_cuda:
1403
            tr_cmd += " --use_cuda"
1404 1405
            env.update(
                {
1406 1407 1408 1409
                    "FLAGS_selected_gpus": f"{0}",
                    "CUDA_VISIBLE_DEVICES": f"{trainer_id}",
                    "PADDLE_TRAINERS_NUM": f"{trainer_num}",
                    "PADDLE_TRAINER_ID": f"{trainer_id}",
1410 1411 1412 1413
                    "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                    "PADDLE_CURRENT_ENDPOINT": ep,
                }
            )
1414 1415 1416 1417
        # TODO(liuyuhui):XPU_VISIBLE_DEVICES is not working right now,
        # will update it after Badiu Kunlun partners' support.
        elif self.__use_xpu:
            tr_cmd += " --use_xpu"
1418 1419
            env.update(
                {
1420
                    "FLAGS_selected_xpus": f"{trainer_id}",
1421
                    # "XPU_VISIBLE_DEVICES": "{}".format(trainer_id + 1),
1422 1423
                    "PADDLE_TRAINERS_NUM": f"{trainer_num}",
                    "PADDLE_TRAINER_ID": f"{trainer_id}",
1424 1425 1426 1427 1428
                    "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                    "PADDLE_CURRENT_ENDPOINT": ep,
                    "GLOG_v": "2",
                }
            )
W
Wu Yi 已提交
1429
        else:
1430
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
1431

1432
        if self._use_dgc:
1433 1434
            tr_cmd += " --use_dgc"

1435 1436 1437
        if self._accumulate_gradient:
            tr_cmd += " --accumulate_gradient"

1438 1439 1440
        if self._find_unused_parameters:
            tr_cmd += " --find_unused_parameters"

1441 1442
        if self._pipeline_mode:
            tr_cmd += " --use_pipeline"
1443
        if self._mp_mode:
1444
            env = {"FLAGS_selected_gpus": f"{trainer_id}"}
1445 1446

        if self._nccl_comm_num > 1:
1447
            tr_cmd += f" --nccl_comm_num {self._nccl_comm_num}"
1448

1449 1450
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
1451

1452
        if self._enable_backward_deps:
1453
            tr_cmd += " --enable_backward_deps"
1454

1455
        if self._fuse_all_reduce is not None:
1456
            tr_cmd += f" --fuse_all_reduce {self._fuse_all_reduce}"
1457

1458
        if self._use_fleet_api:
1459 1460 1461 1462 1463
            tr_cmd += (
                " --use_fleet_api_20"
                if self._use_fleet_api_20
                else " --use_fleet_api"
            )
1464 1465 1466 1467
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
1468 1469
            if hasattr(self, '_sync_batch_norm') and self._sync_batch_norm:
                tr_cmd += " --sync_batch_norm"
1470

1471 1472 1473
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

1474
        return tr_cmd, env
W
Wu Yi 已提交
1475

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
    def _run_cluster_gloo(
        self, model, envs, update_method, check_error_log, log_name
    ):
        assert update_method == "gloo", (
            "_run_cluster_gloo must have update_method: gloo, but get %s"
            % update_method
        )
        assert (
            not self._use_hallreduce
        ), "_run_cluster_gloo must have _use_hallreduce = false"
X
xiongkun 已提交
1486 1487 1488 1489 1490 1491 1492 1493

        worker_endpoints = self._ps_endpoints.split(",")

        trainer_num = len(worker_endpoints)

        procs = []
        pipes = []
        for i in range(0, trainer_num):
1494 1495 1496
            tr_cmd, tr_env = self._get_gloo_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num
            )
X
xiongkun 已提交
1497 1498 1499
            tr_env.update(envs)
            tr_env["GLOG_vmodule"] = 'gloo_context=4'
            tr_env["GLOG_v"] = '3'
1500 1501 1502 1503 1504
            print(
                "use_hallreduce:{} tr_cmd:{}, env: {}".format(
                    self._use_hallreduce, tr_cmd, tr_env
                )
            )
X
xiongkun 已提交
1505

1506
            path = os.path.join(
1507
                self.temp_dir.name, log_name + f"_tr{i}_err.log"
1508
            )
1509
            tr_pipe = open(path, "wb")
X
xiongkun 已提交
1510 1511 1512

            print_to_err(
                type(self).__name__,
1513
                f"going to start process {i} with nccl2",
1514 1515 1516 1517 1518 1519 1520
            )
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env,
            )
X
xiongkun 已提交
1521 1522 1523 1524 1525 1526 1527 1528 1529

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
1530
            sys.stderr.write(f'trainer {i} stderr: {tr_err}\n')
X
xiongkun 已提交
1531 1532

        if trainer_num == 1:
1533 1534
            if check_error_log:
                print("outs[0]:", outs[0])
X
xiongkun 已提交
1535 1536 1537 1538 1539 1540 1541 1542
            return pickle.loads(outs[0])

        else:
            if check_error_log:
                print("outs[0]:", outs[0])
                print("outs[1]:", outs[1])
            return pickle.loads(outs[0]), pickle.loads(outs[1])

1543 1544 1545
    def _run_cluster_nccl2(
        self, model, envs, update_method, check_error_log, log_name
    ):
1546 1547
        if self._use_hallreduce:
            self._ps_endpoints = ""
1548 1549 1550

            global DIST_UT_PORT
            if DIST_UT_PORT == 0:
W
WangXi 已提交
1551
                # NOTE(wangxi). hallreduce test must use 4cards after nccl>=2.7
1552 1553
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (
1554 1555
                        self._find_free_port()
                    )
1556 1557 1558 1559
            else:
                for i in range(0, 4):
                    self._ps_endpoints += "127.0.0.1:%s," % (DIST_UT_PORT + i)
                DIST_UT_PORT += 4
1560
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
1561

1562 1563
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
W
Wu Yi 已提交
1564

1565
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
1566

1567 1568 1569 1570
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
1571 1572
                model, worker_endpoints[i], update_method, i, trainer_num
            )
1573
            tr_env.update(envs)
1574 1575 1576 1577 1578
            print(
                "use_hallreduce:{} tr_cmd:{}, env: {}".format(
                    self._use_hallreduce, tr_cmd, tr_env
                )
            )
W
Wu Yi 已提交
1579

1580
            path = os.path.join(
1581
                self.temp_dir.name, log_name + f"_tr{i}_err.log"
1582
            )
1583
            tr_pipe = open(path, "wb")
W
Wu Yi 已提交
1584

1585
            print_to_err(
1586
                type(self).__name__,
1587
                f"going to start process {i} with nccl2",
1588 1589 1590 1591 1592 1593 1594
            )
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env,
            )
1595 1596 1597 1598 1599 1600 1601 1602 1603

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
1604
            sys.stderr.write(f'trainer {i} stderr: {tr_err}\n')
1605

1606 1607 1608
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
1609

1610
        return pickle.loads(outs[0]), pickle.loads(outs[1])
1611

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622
    def _run_pipeline(self, model, envs, check_error_log, log_name):
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        update_method = "nccl2"

        trainer_num = len(worker_endpoints)

        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
1623 1624
                model, worker_endpoints[i], update_method, i, trainer_num
            )
1625 1626 1627 1628 1629
            tr_env.update(envs)
            tr_env['CUDA_VISIBLE_DEVICES'] = "0,1"
            tr_env['NCCL_SHM_DISABLE'] = '1'
            tr_env['FLAGS_selected_gpus'] = str(i)
            tr_env['FLAGS_cudnn_deterministic'] = '0'
1630
            print(f"tr_cmd:{tr_cmd}, env: {tr_env}")
1631

1632
            path = os.path.join(self.temp_dir.name + f"tr{i}_err.log")
1633
            tr_pipe = open(path, "wb")
1634 1635 1636

            print_to_err(
                type(self).__name__,
1637
                f"going to start process {i} with nccl2",
1638 1639 1640 1641 1642 1643 1644
            )
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env,
            )
1645 1646 1647 1648 1649 1650 1651 1652 1653

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
1654
            sys.stderr.write(f'trainer {i} stderr: {tr_err}\n')
1655 1656 1657 1658 1659 1660

        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
        return pickle.loads(outs[0]), pickle.loads(outs[1])

1661
    def _get_required_envs(self, check_error_log=False, need_envs={}):
1662 1663 1664 1665 1666 1667
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
1668
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
1669
            "FLAGS_rpc_retry_bind_port": "50",
1670
            "FLAGS_cudnn_deterministic": "1",
1671
            "FLAGS_rpc_disable_reuse_port": "1",
W
Wu Yi 已提交
1672
            "http_proxy": "",
1673
            "NCCL_P2P_DISABLE": "1",
1674
            "NCCL_SHM_DISABLE": "1",
1675
            "FLAGS_new_executor_static_build": "1",
1676 1677 1678
        }

        if check_error_log:
1679 1680 1681 1682
            required_envs["GLOG_vmodule"] = (
                "fused_all_reduce_op_handle=10,all_reduce_op_handle=10,alloc_continuous_space_op=10,fuse_all_reduce_op_pass=10,"
                "alloc_continuous_space_for_grad_pass=10,fast_threaded_ssa_graph_executor=10,executor=10,operator=10,"
                "sparse_all_reduce_op_handle=10,gen_nccl_id_op=10,gen_nccl_id_op_help=10,nccl_helper=10,grpc_client=10,"
1683
                "grpc_server=10,request_handler_impl=10,section_worker=10"
1684
            )
1685 1686
            required_envs["GLOG_logtostderr"] = "1"

1687 1688
        if os.getenv('NVIDIA_TF32_OVERRIDE', '') is not None:
            required_envs['NVIDIA_TF32_OVERRIDE'] = os.getenv(
1689 1690
                'NVIDIA_TF32_OVERRIDE', ''
            )
1691

1692 1693 1694
        required_envs.update(need_envs)
        return required_envs

1695 1696 1697 1698 1699 1700 1701 1702
    def check_with_place(
        self,
        model_file,
        delta=1e-3,
        check_error_log=False,
        need_envs={},
        log_name="",
    ):
1703
        if self._dygraph and (self._gloo_mode or self._nccl2_mode):
1704 1705 1706 1707 1708 1709 1710
            self.check_with_place_func(
                model_file=model_file,
                delta=delta,
                check_error_log=check_error_log,
                need_envs=need_envs,
                log_name=log_name,
            )
1711
        else:
1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
            self.check_with_place_func(
                model_file=model_file,
                delta=delta,
                check_error_log=check_error_log,
                need_envs=need_envs,
                log_name=log_name,
            )

    def check_with_place_func(
        self,
        model_file,
        delta=1e-3,
        check_error_log=False,
        need_envs={},
        log_name="",
    ):
1728 1729
        required_envs = self._get_required_envs(check_error_log, need_envs)

X
xiongkun 已提交
1730
        if self._gloo_mode:
1731 1732 1733
            local_losses = self._run_local_gloo(
                model_file, required_envs, check_error_log, log_name=log_name
            )
X
xiongkun 已提交
1734
        else:
1735 1736 1737
            local_losses = self._run_local(
                model_file, required_envs, check_error_log, log_name=log_name
            )
1738

W
Wu Yi 已提交
1739
        if self._nccl2_mode:
W
Wu Yi 已提交
1740 1741
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
1742 1743
                    model_file,
                    required_envs,
1744 1745
                    update_method="nccl2_reduce_layer",
                    check_error_log=check_error_log,
1746 1747
                    log_name=log_name,
                )
W
Wu Yi 已提交
1748 1749
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
1750 1751
                    model_file,
                    required_envs,
1752 1753
                    update_method='nccl2',
                    check_error_log=check_error_log,
1754 1755
                    log_name=log_name,
                )
1756 1757 1758 1759 1760 1761
        elif self._bkcl_mode:
            tr0_losses, tr1_losses = self._run_cluster_nccl2(
                model_file,
                required_envs,
                update_method='bkcl',
                check_error_log=check_error_log,
1762 1763
                log_name=log_name,
            )
X
xiongkun 已提交
1764 1765 1766 1767 1768 1769 1770
        elif self._gloo_mode:
            # gloo mode, cpu only parallel train @xiongkun03
            tr0_losses, tr1_losses = self._run_cluster_gloo(
                model_file,
                required_envs,
                update_method='gloo',
                check_error_log=check_error_log,
1771 1772
                log_name=log_name,
            )
1773 1774 1775 1776 1777 1778
        elif self._hccl_mode:
            tr0_losses, tr1_losses = self._run_cluster_nccl2(
                model_file,
                required_envs,
                update_method='hccl',
                check_error_log=check_error_log,
1779 1780
                log_name=log_name,
            )
1781 1782 1783 1784 1785 1786
        elif self._cncl_mode:
            tr0_losses, tr1_losses = self._run_cluster_nccl2(
                model_file,
                required_envs,
                update_method='cncl',
                check_error_log=check_error_log,
1787 1788
                log_name=log_name,
            )
1789
        elif self._pipeline_mode:
1790 1791 1792
            tr0_losses, tr1_losses = self._run_pipeline(
                model_file, required_envs, check_error_log, log_name=log_name
            )
W
Wu Yi 已提交
1793
        else:
1794 1795 1796
            tr0_losses, tr1_losses = self._run_cluster(
                model_file, required_envs, check_error_log, log_name=log_name
            )
1797 1798

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
1799 1800 1801
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
1802 1803 1804 1805
            if self._pipeline_mode:
                dist_loss = np.array([tr1_loss])
            else:
                dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
1806 1807
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)
1808

1809 1810 1811 1812 1813 1814 1815 1816
    def check_with_place_multi_cards(
        self,
        model_file,
        delta=1e-3,
        check_error_log=False,
        need_envs={},
        log_name="",
    ):
1817

1818 1819 1820 1821 1822 1823
        # need open p2p or shm otherwise multi cards mode will hang
        need_envs.update({"NCCL_P2P_DISABLE": "0", "NCCL_SHM_DISABLE": "0"})

        required_envs = self._get_required_envs(check_error_log, need_envs)

        if self._use_dgc:
1824 1825 1826 1827 1828 1829 1830
            multi_cards_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_dgc_2cards",
                devices="0,1",
            )
1831 1832

            self._use_dgc = False
1833 1834 1835 1836 1837 1838 1839
            base_losses = self._run_local(
                model_file,
                required_envs,
                check_error_log,
                log_name=log_name + "_base_2cards",
                devices="0,1",
            )
1840 1841 1842 1843 1844 1845 1846 1847

            self._use_dgc = True

            for step_id in range(RUN_STEP):
                base_loss = base_losses[step_id]
                multi_cards_loss = multi_cards_losses[step_id]
                print("=======", base_loss, ":", multi_cards_loss, "=======")
                self.assertAlmostEqual(base_loss, multi_cards_loss, delta=delta)