test_dist_base.py 30.6 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39

T
typhoonzero 已提交
40

41 42 43
def my_print(class_name, log_str):
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
44
    if six.PY2:
45
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
46
    else:
47
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
48 49


50 51 52 53
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
54
class TestDistRunnerBase(object):
W
Wu Yi 已提交
55 56 57
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
58 59
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
60 61 62
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

63
    @staticmethod
W
Wu Yi 已提交
64 65 66 67 68
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
69
                       dc_asgd=False,
70 71
                       current_endpoint=None,
                       nccl_comm_num=1):
T
typhoonzero 已提交
72
        # NOTE: import fluid until runtime, or else forking processes will cause error.
73
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
74
        config.enable_dc_asgd = dc_asgd
75
        config.sync_mode = sync_mode
76 77
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
78
        # config.runtime_split_send_recv = True
79
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
80 81 82 83
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
84
            trainers=trainers,
85
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
86 87
        return t

W
Wu Yi 已提交
88
    def run_pserver(self, args):
W
Wu Yi 已提交
89
        self.lr = args.lr
90
        self.get_model(batch_size=args.batch_size)
91
        # NOTE: pserver should not call memory optimize
W
Wu Yi 已提交
92 93
        t = self.get_transpiler(args.trainer_id,
                                fluid.default_main_program(), args.endpoints,
W
Wu Yi 已提交
94
                                args.trainers, args.sync_mode, args.dc_asgd)
W
Wu Yi 已提交
95 96 97
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
98

T
typhoonzero 已提交
99 100 101
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
102
        my_print(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
103
        exe.run(pserver_prog)
104
        my_print(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
105

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
        dist_strategy.fuse_memory_size = 1  #MB
        dist_strategy.fuse_laryer_size = 1

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
        my_print("gpu_fleet", "fleet.node_num:")
        #"fleet.node_id:", fleet.node_id(),
        #"fleet.trainer_num:", fleet.worker_num())

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

        my_print(type(self).__name__, "begin to train on trainer")
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
            my_print(type(self).__name__, "run step %d finished" % i)
        my_print(type(self).__name__, "trainer run finished")

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

172
    def run_trainer(self, args):
W
Wu Yi 已提交
173
        self.lr = args.lr
W
Wu Yi 已提交
174 175 176
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
177 178 179
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
180 181 182
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
183

W
Wu Yi 已提交
184
        if args.update_method == "pserver":
185 186 187
            my_print(
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
W
Wu Yi 已提交
188 189 190
            t = self.get_transpiler(args.trainer_id,
                                    fluid.default_main_program(),
                                    args.endpoints, args.trainers,
W
Wu Yi 已提交
191
                                    args.sync_mode, args.dc_asgd)
T
typhoonzero 已提交
192
            trainer_prog = t.get_trainer_program()
193 194 195
            my_print(
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
196
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
197 198 199
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
200
            config.nccl_comm_num = args.nccl_comm_num
201 202 203
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
204 205 206
            my_print(
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
207 208 209 210 211 212 213
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
214 215 216
            my_print(
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
217
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
218
        else:
219 220 221
            my_print(
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
222
            trainer_prog = fluid.default_main_program()
223
            my_print(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
224

225
        if args.use_cuda:
226 227
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
228 229 230
        else:
            place = fluid.CPUPlace()

231 232
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
233
        my_print(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
234

W
Wu Yi 已提交
235 236
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
237

W
Wu Yi 已提交
238
        build_stra = fluid.BuildStrategy()
239 240 241
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
242

243 244 245
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
246 247 248 249 250
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
251
        pass_builder = None
X
Xin Pan 已提交
252
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
253
            pass_builder = build_stra._finalize_strategy_and_create_passes()
254
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
255
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
256

W
Wu Yi 已提交
257
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
258 259
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
260
        else:
W
Wu Yi 已提交
261
            # case args.update_method == "nccl2_reduce_layer":
262 263
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
264

265
        my_print(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
266
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
267
            loss_name=avg_cost.name,
W
Wu Yi 已提交
268
            build_strategy=build_stra,
W
Wu Yi 已提交
269
            exec_strategy=exec_strategy)
270
        my_print(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
271 272 273 274 275 276 277

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
278
        reader_generator = train_reader()
T
typhoonzero 已提交
279

280 281
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
282
            if args.update_method != "local" and args.use_reader_alloc:
283 284 285 286 287 288 289
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
290

291
        my_print(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
292
        out_losses = []
293
        for i in six.moves.xrange(RUN_STEP):
294 295
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
296
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
297
            out_losses.append(loss[0])
298 299 300
            my_print(type(self).__name__, "run step %d finished" % i)
        my_print(type(self).__name__, "trainer run finished")

W
Wu Yi 已提交
301 302 303 304
        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))
T
typhoonzero 已提交
305 306


307 308 309 310 311 312 313 314 315 316
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
317

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
335 336 337
            np.random.seed(seed)
            import random
            random.seed = seed
338 339
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
340

341 342 343 344 345 346
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
347 348 349
                my_print(
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
350
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
351
                model = dygraph.parallel.DataParallel(model, strategy)
352
                my_print(type(self).__name__, "model built in dygraph")
353
            out_losses = []
354
            my_print(type(self).__name__, "begin to run dygraph training")
355 356 357 358 359
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
360
                if step_id % 10 == 0:
361 362 363
                    my_print(
                        type(self).__name__,
                        "loss at step %d: %f" % (step_id, loss))
Y
Yan Xu 已提交
364
                out_losses.append(loss.numpy())
365

Y
Yan Xu 已提交
366 367 368
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
369 370

                loss.backward()
Y
Yan Xu 已提交
371 372
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
373 374 375

                opt.minimize(loss)
                model.clear_gradients()
376
            my_print(type(self).__name__, pickle.dumps(out_losses))
377 378


T
typhoonzero 已提交
379
def runtime_main(test_class):
W
Wu Yi 已提交
380 381 382 383
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
384 385 386 387
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
388
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
389 390
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
391
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
392 393
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
394
    parser.add_argument('--gpu_fleet_api', action='store_true')
395
    parser.add_argument(
396
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
397 398 399
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
400
    parser.add_argument('--use_cuda', action='store_true')
401
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
402
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
403
    parser.add_argument('--dc_asgd', action='store_true')
404
    parser.add_argument(
W
Wu Yi 已提交
405
        '--use_reader_alloc', action='store_true', required=False)
406
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
407
    parser.add_argument('--lr', required=False, type=float, default=0.001)
408 409
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
410 411 412 413 414
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
415 416

    args = parser.parse_args()
T
typhoonzero 已提交
417 418

    model = test_class()
W
Wu Yi 已提交
419
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
420
        model.run_pserver(args)
421 422
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
423
    else:
424
        model.run_trainer(args)
X
Xin Pan 已提交
425

M
minqiyang 已提交
426

M
minqiyang 已提交
427
import paddle.compat as cpt
Y
Yancey1989 已提交
428 429
import socket
from contextlib import closing
M
minqiyang 已提交
430

X
Xin Pan 已提交
431 432

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
433 434 435
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

436 437 438
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
439
            self._use_dgc = False
440 441 442 443 444 445 446
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
447 448 449 450
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
451

X
Xin Pan 已提交
452 453 454
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
455
        self._port_set = set()
Y
Yancey1989 已提交
456 457
        self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
            self._find_free_port(), self._find_free_port())
M
minqiyang 已提交
458
        self._python_interp = sys.executable
W
Wu Yi 已提交
459
        self._sync_mode = True
460
        self._enforce_place = None
W
Wu Yi 已提交
461
        self._use_reduce = False
W
Wu Yi 已提交
462
        self._dc_asgd = False  # must use with async mode
463
        self._use_reader_alloc = True
W
Wu Yi 已提交
464
        self._nccl2_mode = False
465
        self._mp_mode = False
W
Wu Yi 已提交
466 467 468 469 470
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
471
        self._lr = 0.001
472
        self._use_dgc = False
473
        self._dygraph = False
474
        self._nccl_comm_num = 1
475
        self._enable_backward_deps = False
476
        self._gpu_fleet_api = False
477
        self._use_hallreduce = False
W
Wu Yi 已提交
478
        self._setup_config()
479
        self._after_setup_config()
X
Xin Pan 已提交
480

Y
Yancey1989 已提交
481
    def _find_free_port(self):
Y
Yancey1989 已提交
482 483 484 485
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
486 487
                my_print(
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
488 489 490 491 492 493 494
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
495

496
    def start_pserver(self, model_file, check_error_log, required_envs):
X
Xin Pan 已提交
497
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
498 499 500 501 502 503 504 505
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
506
        ps0_cmd = ps_cmd % \
507 508
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
509
        ps1_cmd = ps_cmd % \
510 511
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
512 513 514 515

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
516

517 518
        print(ps0_cmd)
        print(ps1_cmd)
M
minqiyang 已提交
519 520
        ps0_pipe = open("/tmp/ps0_err.log", "wb")
        ps1_pipe = open("/tmp/ps1_err.log", "wb")
G
gongweibao 已提交
521

522
        my_print(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
523
        ps0_proc = subprocess.Popen(
524 525 526 527
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
528
        my_print(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
529
        ps1_proc = subprocess.Popen(
530 531 532 533
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
534

535
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
536

537 538 539 540 541 542
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
                   batch_merge_repeat=1):
G
gongweibao 已提交
543

544 545 546 547 548 549 550 551
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

552 553 554 555
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
556 557
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
558

559
        if self.__use_cuda:
560
            cmd += " --use_cuda"
W
Wu Yi 已提交
561 562 563 564 565
            env_local = {
                "CUDA_VISIBLE_DEVICES": "0",
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
566 567 568
        else:
            env_local = {'CPU_NUM': '1'}

W
Wu Yi 已提交
569 570
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
571

572
        if check_error_log:
573
            err_log = open("/tmp/trainer.err.log", "wb")
G
gongweibao 已提交
574
            local_proc = subprocess.Popen(
575
                cmd.split(" "),
G
gongweibao 已提交
576
                stdout=subprocess.PIPE,
577
                stderr=err_log,
W
Wu Yi 已提交
578
                env=env_local)
G
gongweibao 已提交
579 580
        else:
            local_proc = subprocess.Popen(
581
                cmd.split(" "),
G
gongweibao 已提交
582
                stdout=subprocess.PIPE,
583
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
584
                env=env_local)
G
gongweibao 已提交
585

586 587 588 589 590 591
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
592
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
593

W
Wu Yi 已提交
594
        return pickle.loads(local_out)
595 596

    def _run_cluster(self, model, envs, check_error_log):
X
Xin Pan 已提交
597
        # Run dist train to compare with local results
598 599
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model,
                                                          check_error_log, envs)
W
Wu Yi 已提交
600

X
Xin Pan 已提交
601
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
602

603 604 605 606 607 608 609 610
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
611
        tr0_cmd = tr_cmd % \
612
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
613
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
614
        tr1_cmd = tr_cmd % \
615
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
616
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
617 618 619 620 621 622 623

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
624 625 626
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
627
        if self.__use_cuda:
628 629 630 631 632 633 634 635 636 637
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
638

W
Wu Yi 已提交
639 640
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
641 642
        tr0_pipe = open("/tmp/tr0_err.log", "wb")
        tr1_pipe = open("/tmp/tr1_err.log", "wb")
G
gongweibao 已提交
643

644
        my_print(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
645
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
646
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
647
            stdout=subprocess.PIPE,
G
gongweibao 已提交
648
            stderr=tr0_pipe,
X
Xin Pan 已提交
649
            env=env0)
650
        my_print(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
651
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
652
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
653
            stdout=subprocess.PIPE,
G
gongweibao 已提交
654
            stderr=tr1_pipe,
X
Xin Pan 已提交
655 656
            env=env1)

657 658 659 660 661 662 663 664 665 666 667 668
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

669 670
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
671

G
gongweibao 已提交
672
        # close trainer file
673 674 675 676
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
677

W
Wu Yi 已提交
678 679
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
680

W
Wu Yi 已提交
681 682
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

683 684 685
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
686 687 688 689 690 691 692
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

693
        tr_cmd = tr_cmd % \
W
Wu Yi 已提交
694
                  (self._python_interp, model, self._ps_endpoints,
695
                   trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
696 697

        if self._use_reduce:
698
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
699
        if self._use_reader_alloc:
700
            tr_cmd += " --use_reader_alloc"
W
Wu Yi 已提交
701
        if self.__use_cuda:
702 703 704 705
            tr_cmd += " --use_cuda"
            env.update({
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id),
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
706 707 708
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
709
            })
W
Wu Yi 已提交
710
        else:
711
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
712

713
        if self._use_dgc:
714 715 716 717
            tr_cmd += " --use_dgc"

        if self._mp_mode:
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id)}
718 719

        if self._nccl_comm_num > 1:
720
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
721

722 723
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
724

725
        if self._enable_backward_deps:
726
            tr_cmd += " --enable_backward_deps"
727

728 729 730
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"

731 732 733
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

734
        return tr_cmd, env
W
Wu Yi 已提交
735

736 737 738 739 740 741 742
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
                           check_error_log):
        if self._use_hallreduce:
            self._ps_endpoints = ""
            for i in range(0, 4):
                self._ps_endpoints += "127.0.0.1:%s," % (self._find_free_port())
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
743

744 745 746 747 748 749
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
750

751
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
752

753 754 755 756 757 758 759 760
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
761

762
            tr_pipe = open("/tmp/tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
            my_print(
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

783 784 785
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
786
        return pickle.loads(outs[0]), pickle.loads(outs[1])
787 788 789 790 791 792 793 794 795 796 797 798

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={}):
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
799
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
800
            "FLAGS_cudnn_deterministic": "1",
W
Wu Yi 已提交
801
            "http_proxy": "",
802 803
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
804 805 806 807 808
        }

        required_envs.update(need_envs)

        if check_error_log:
809
            required_envs["GLOG_v"] = "10"
810 811 812 813
            required_envs["GLOG_logtostderr"] = "1"

        local_losses\
            = self._run_local(model_file, required_envs,
W
Wu Yi 已提交
814
                                check_error_log)
W
Wu Yi 已提交
815
        if self._nccl2_mode:
W
Wu Yi 已提交
816 817 818 819 820 821
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, True, check_error_log)
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, False, check_error_log)
W
Wu Yi 已提交
822 823 824
        else:
            tr0_losses, tr1_losses = self._run_cluster(
                model_file, required_envs, check_error_log)
825 826

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
827 828 829
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
830
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
831 832
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)