test_dist_base.py 31.3 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
27
import time
28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

34 35 36
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
import paddle.fluid.incubate.fleet.base.role_maker as role_maker

Y
Yan Xu 已提交
37
RUN_STEP = 5
38
DEFAULT_BATCH_SIZE = 2
39

T
typhoonzero 已提交
40

41 42 43 44 45 46 47 48
def print_to_out(out_losses):
    if six.PY2:
        print(pickle.dumps(out_losses))
    else:
        sys.stdout.buffer.write(pickle.dumps(out_losses))


def print_to_err(class_name, log_str):
49 50
    localtime = time.asctime(time.localtime(time.time()))
    print_str = localtime + "\t" + class_name + "\t" + log_str
G
guru4elephant 已提交
51
    if six.PY2:
52
        sys.stderr.write(pickle.dumps(print_str))
G
guru4elephant 已提交
53
    else:
54
        sys.stderr.buffer.write(pickle.dumps(print_str))
G
guru4elephant 已提交
55 56


57 58 59 60
def eprint(*args, **kwargs):
    print(*args, file=sys.stderr, **kwargs)


T
typhoonzero 已提交
61
class TestDistRunnerBase(object):
W
Wu Yi 已提交
62 63 64
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
65 66
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
67 68 69
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

70
    @staticmethod
W
Wu Yi 已提交
71 72 73 74 75
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
76
                       dc_asgd=False,
77 78
                       current_endpoint=None,
                       nccl_comm_num=1):
T
typhoonzero 已提交
79
        # NOTE: import fluid until runtime, or else forking processes will cause error.
80
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
81
        config.enable_dc_asgd = dc_asgd
82
        config.sync_mode = sync_mode
83 84
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
85
        # config.runtime_split_send_recv = True
86
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
87 88 89 90
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
91
            trainers=trainers,
92
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
93 94
        return t

W
Wu Yi 已提交
95
    def run_pserver(self, args):
W
Wu Yi 已提交
96
        self.lr = args.lr
97
        self.get_model(batch_size=args.batch_size)
98
        # NOTE: pserver should not call memory optimize
W
Wu Yi 已提交
99 100
        t = self.get_transpiler(args.trainer_id,
                                fluid.default_main_program(), args.endpoints,
W
Wu Yi 已提交
101
                                args.trainers, args.sync_mode, args.dc_asgd)
W
Wu Yi 已提交
102 103 104
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
105

T
typhoonzero 已提交
106 107 108
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
109
        print_to_err(type(self).__name__, "run pserver startup program done.")
T
typhoonzero 已提交
110
        exe.run(pserver_prog)
111
        print_to_err(type(self).__name__, "run pserver main program done.")
T
typhoonzero 已提交
112

113 114 115 116 117 118 119 120 121 122 123 124
    def run_gpu_fleet_api_trainer(self, args):
        assert args.update_method == "nccl2"

        self.lr = args.lr

        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1

        dist_strategy = DistributedStrategy()
        dist_strategy.exec_strategy = exec_strategy
        dist_strategy.fuse_memory_size = 1  #MB
        dist_strategy.fuse_laryer_size = 1
125 126 127 128
        if args.use_local_sgd:
            dist_strategy.use_local_sgd = True
        if args.ut4grad_allreduce:
            dist_strategy._ut4grad_allreduce = True
129 130 131

        role = role_maker.PaddleCloudRoleMaker(is_collective=True)
        fleet.init(role)
132
        print_to_err("gpu_fleet", "fleet.node_num:")
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
        #"fleet.node_id:", fleet.node_id(),
        #"fleet.trainer_num:", fleet.worker_num())

        test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, dist_strategy=dist_strategy)

        trainer_prog = fleet._origin_program
        dist_prog = fleet.main_program

        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        eprint(type(self).__name__, "run worker startup program done.")

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
        reader_generator = train_reader()

        def get_data():
            origin_batch = next(reader_generator)
            if args.update_method != "local" and args.use_reader_alloc:
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch

168
        print_to_err(type(self).__name__, "begin to train on trainer")
169 170 171 172 173 174
        out_losses = []
        for i in six.moves.xrange(RUN_STEP):
            loss, = exe.run(dist_prog,
                            fetch_list=[avg_cost.name],
                            feed=feeder.feed(get_data()))
            out_losses.append(loss[0])
175 176
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
177 178 179 180 181 182

        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))

183
    def run_trainer(self, args):
W
Wu Yi 已提交
184
        self.lr = args.lr
W
Wu Yi 已提交
185 186 187
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
188 189 190
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
191 192 193
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
194

W
Wu Yi 已提交
195
        if args.update_method == "pserver":
196
            print_to_err(
197 198
                type(self).__name__,
                "begin to run transpile on trainer with pserver mode")
W
Wu Yi 已提交
199 200 201
            t = self.get_transpiler(args.trainer_id,
                                    fluid.default_main_program(),
                                    args.endpoints, args.trainers,
W
Wu Yi 已提交
202
                                    args.sync_mode, args.dc_asgd)
T
typhoonzero 已提交
203
            trainer_prog = t.get_trainer_program()
204
            print_to_err(
205 206
                type(self).__name__,
                "get trainer program done with pserver mode.")
W
Wu Yi 已提交
207
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
208 209 210
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
211
            config.nccl_comm_num = args.nccl_comm_num
212 213 214
            if args.use_hallreduce:
                config.use_hierarchical_allreduce = True
                config.hierarchical_allreduce_inter_nranks = args.hallreduce_inter_nranks
215
            print_to_err(
216 217
                type(self).__name__,
                "begin to run transpile on trainer with nccl2 mode")
W
Wu Yi 已提交
218 219 220 221 222 223 224
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
225
            print_to_err(
226 227
                type(self).__name__,
                "get trainer program done. with nccl2 mode")
W
Wu Yi 已提交
228
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
229
        else:
230
            print_to_err(
231 232
                type(self).__name__,
                "do nothing about main program, just use it")
T
typhoonzero 已提交
233
            trainer_prog = fluid.default_main_program()
234
            print_to_err(type(self).__name__, "use main program done.")
T
typhoonzero 已提交
235

236
        if args.use_cuda:
237 238
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
239 240 241
        else:
            place = fluid.CPUPlace()

242 243
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
244
        print_to_err(type(self).__name__, "run worker startup program done.")
T
typhoonzero 已提交
245

W
Wu Yi 已提交
246 247
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
248

W
Wu Yi 已提交
249
        build_stra = fluid.BuildStrategy()
250 251 252
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
253

254 255 256
        if args.enable_backward_deps:
            build_stra.enable_backward_optimizer_op_deps = True

W
Wu Yi 已提交
257 258 259 260 261
        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
262
        pass_builder = None
X
Xin Pan 已提交
263
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
264
            pass_builder = build_stra._finalize_strategy_and_create_passes()
265
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
266
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
267

W
Wu Yi 已提交
268
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
269 270
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
271
        else:
W
Wu Yi 已提交
272
            # case args.update_method == "nccl2_reduce_layer":
273 274
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
275

276
        print_to_err(type(self).__name__, "begin to compile with data parallel")
X
Xin Pan 已提交
277
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
278
            loss_name=avg_cost.name,
W
Wu Yi 已提交
279
            build_strategy=build_stra,
W
Wu Yi 已提交
280
            exec_strategy=exec_strategy)
281
        print_to_err(type(self).__name__, "program compiled with data parallel")
T
typhoonzero 已提交
282 283 284 285 286 287 288

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
289
        reader_generator = train_reader()
T
typhoonzero 已提交
290

291 292
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
293
            if args.update_method != "local" and args.use_reader_alloc:
294 295 296 297 298 299 300
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
301

302
        print_to_err(type(self).__name__, "begin to train on trainer")
W
Wu Yi 已提交
303
        out_losses = []
304
        for i in six.moves.xrange(RUN_STEP):
305 306
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
307
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
308
            out_losses.append(loss[0])
309 310
            print_to_err(type(self).__name__, "run step %d finished" % i)
        print_to_err(type(self).__name__, "trainer run finished")
311

312
        print_to_out(out_losses)
T
typhoonzero 已提交
313 314


315 316 317 318 319 320 321 322 323 324
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
325

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
343 344 345
            np.random.seed(seed)
            import random
            random.seed = seed
346 347
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
348

349 350 351 352 353 354
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
355
                print_to_err(
356 357
                    type(self).__name__,
                    "begin to prepare context in dygraph with nccl2")
358
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
359
                model = dygraph.parallel.DataParallel(model, strategy)
360
                print_to_err(type(self).__name__, "model built in dygraph")
361
            out_losses = []
362
            print_to_err(type(self).__name__, "begin to run dygraph training")
363 364 365 366 367
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
G
guru4elephant 已提交
368
                if step_id % 10 == 0:
369
                    print_to_err(
370
                        type(self).__name__,
371
                        "loss at step %d: %f" % (step_id, loss.numpy()))
Y
Yan Xu 已提交
372
                out_losses.append(loss.numpy())
373

Y
Yan Xu 已提交
374 375 376
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
377 378

                loss.backward()
Y
Yan Xu 已提交
379 380
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
381 382 383

                opt.minimize(loss)
                model.clear_gradients()
384
        print_to_out(out_losses)
385 386


T
typhoonzero 已提交
387
def runtime_main(test_class):
W
Wu Yi 已提交
388 389 390 391
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
392 393 394 395
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
396
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
397 398
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
399
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
400 401
    parser.add_argument('--enable_backward_deps', action='store_true')
    parser.add_argument('--use_hallreduce', action='store_true')
402
    parser.add_argument('--gpu_fleet_api', action='store_true')
403 404
    parser.add_argument('--use_local_sgd', action='store_true')
    parser.add_argument('--ut4grad_allreduce', action='store_true')
405
    parser.add_argument(
406
        '--hallreduce_inter_nranks', type=int, required=False, default=2)
W
Wu Yi 已提交
407 408 409
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
410
    parser.add_argument('--use_cuda', action='store_true')
411
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
412
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
413
    parser.add_argument('--dc_asgd', action='store_true')
414
    parser.add_argument(
W
Wu Yi 已提交
415
        '--use_reader_alloc', action='store_true', required=False)
416
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
417
    parser.add_argument('--lr', required=False, type=float, default=0.001)
418 419
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
420 421 422 423 424
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
425 426

    args = parser.parse_args()
T
typhoonzero 已提交
427 428

    model = test_class()
W
Wu Yi 已提交
429
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
430
        model.run_pserver(args)
431 432
    elif args.gpu_fleet_api:
        model.run_gpu_fleet_api_trainer(args)
T
typhoonzero 已提交
433
    else:
434
        model.run_trainer(args)
X
Xin Pan 已提交
435

M
minqiyang 已提交
436

M
minqiyang 已提交
437
import paddle.compat as cpt
Y
Yancey1989 已提交
438 439
import socket
from contextlib import closing
M
minqiyang 已提交
440

X
Xin Pan 已提交
441 442

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
443 444 445
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

446 447 448
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
449
            self._use_dgc = False
450 451 452 453 454 455 456
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
457 458 459 460
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
461

X
Xin Pan 已提交
462 463 464
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
465
        self._port_set = set()
Y
Yancey1989 已提交
466 467
        self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
            self._find_free_port(), self._find_free_port())
M
minqiyang 已提交
468
        self._python_interp = sys.executable
W
Wu Yi 已提交
469
        self._sync_mode = True
470
        self._enforce_place = None
W
Wu Yi 已提交
471
        self._use_reduce = False
W
Wu Yi 已提交
472
        self._dc_asgd = False  # must use with async mode
473
        self._use_reader_alloc = True
W
Wu Yi 已提交
474
        self._nccl2_mode = False
475
        self._mp_mode = False
W
Wu Yi 已提交
476 477 478 479 480
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
481
        self._lr = 0.001
482
        self._use_dgc = False
483
        self._dygraph = False
484
        self._nccl_comm_num = 1
485
        self._enable_backward_deps = False
486
        self._gpu_fleet_api = False
487 488
        self._use_local_sgd = False
        self._ut4grad_allreduce = False
489
        self._use_hallreduce = False
W
Wu Yi 已提交
490
        self._setup_config()
491
        self._after_setup_config()
X
Xin Pan 已提交
492

Y
Yancey1989 已提交
493
    def _find_free_port(self):
Y
Yancey1989 已提交
494 495 496 497
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
498
                print_to_err(
499
                    type(self).__name__, "socket name: %s" % s.getsockname()[1])
Y
Yancey1989 已提交
500 501 502 503 504 505 506
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
507

508
    def start_pserver(self, model_file, check_error_log, required_envs):
X
Xin Pan 已提交
509
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
510 511 512 513 514 515 516 517
        ps_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            required_envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            ps_cmd += " -m coverage run --branch -p"

        ps_cmd += " %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"

W
Wu Yi 已提交
518
        ps0_cmd = ps_cmd % \
519 520
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
521
        ps1_cmd = ps_cmd % \
522 523
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
524 525 526 527

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
X
Xin Pan 已提交
528

529 530
        print(ps0_cmd)
        print(ps1_cmd)
M
minqiyang 已提交
531 532
        ps0_pipe = open("/tmp/ps0_err.log", "wb")
        ps1_pipe = open("/tmp/ps1_err.log", "wb")
G
gongweibao 已提交
533

534
        print_to_err(type(self).__name__, "going to start pserver process 0")
X
Xin Pan 已提交
535
        ps0_proc = subprocess.Popen(
536 537 538 539
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
540
        print_to_err(type(self).__name__, "going to start pserver process 1")
X
Xin Pan 已提交
541
        ps1_proc = subprocess.Popen(
542 543 544 545
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
546

547
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
548

549 550 551 552 553 554
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
                   batch_merge_repeat=1):
G
gongweibao 已提交
555

556 557 558 559 560 561 562 563
        cmd = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            cmd += " -m coverage run --branch -p"

        cmd += " %s --role trainer --lr %f" % (model, self._lr)

564 565 566 567
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
568 569
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
570

571
        if self.__use_cuda:
572
            cmd += " --use_cuda"
W
Wu Yi 已提交
573 574 575 576 577
            env_local = {
                "CUDA_VISIBLE_DEVICES": "0",
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
578 579 580
        else:
            env_local = {'CPU_NUM': '1'}

W
Wu Yi 已提交
581 582
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
583

584
        if check_error_log:
585
            err_log = open("/tmp/trainer.err.log", "wb")
G
gongweibao 已提交
586
            local_proc = subprocess.Popen(
587
                cmd.split(" "),
G
gongweibao 已提交
588
                stdout=subprocess.PIPE,
589
                stderr=err_log,
W
Wu Yi 已提交
590
                env=env_local)
G
gongweibao 已提交
591 592
        else:
            local_proc = subprocess.Popen(
593
                cmd.split(" "),
G
gongweibao 已提交
594
                stdout=subprocess.PIPE,
595
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
596
                env=env_local)
G
gongweibao 已提交
597

598 599 600 601 602 603
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
604
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
605

W
Wu Yi 已提交
606
        return pickle.loads(local_out)
607 608

    def _run_cluster(self, model, envs, check_error_log):
X
Xin Pan 已提交
609
        # Run dist train to compare with local results
610 611
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model,
                                                          check_error_log, envs)
W
Wu Yi 已提交
612

X
Xin Pan 已提交
613
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
614

615 616 617 618 619 620 621 622
        tr_cmd = "%s"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"

W
Wu Yi 已提交
623
        tr0_cmd = tr_cmd % \
624
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
625
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
626
        tr1_cmd = tr_cmd % \
627
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
628
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
629 630 631 632 633 634 635

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
636 637 638
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
639
        if self.__use_cuda:
640 641 642 643 644 645 646 647 648 649
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
650

W
Wu Yi 已提交
651 652
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
653 654
        tr0_pipe = open("/tmp/tr0_err.log", "wb")
        tr1_pipe = open("/tmp/tr1_err.log", "wb")
G
gongweibao 已提交
655

656
        print_to_err(type(self).__name__, "going to start trainer process 0")
X
Xin Pan 已提交
657
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
658
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
659
            stdout=subprocess.PIPE,
G
gongweibao 已提交
660
            stderr=tr0_pipe,
X
Xin Pan 已提交
661
            env=env0)
662
        print_to_err(type(self).__name__, "going to start trainer process 1")
X
Xin Pan 已提交
663
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
664
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
665
            stdout=subprocess.PIPE,
G
gongweibao 已提交
666
            stderr=tr1_pipe,
X
Xin Pan 已提交
667 668
            env=env1)

669 670 671 672 673 674 675 676 677 678 679 680
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

681 682
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
683

G
gongweibao 已提交
684
        # close trainer file
685 686 687 688
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
689

W
Wu Yi 已提交
690 691
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
692

W
Wu Yi 已提交
693 694
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

695 696 697
    def _get_nccl2_trainer_cmd(self, model, ep, update_method, trainer_id,
                               trainer_num):
        env = {}
698 699 700 701 702 703 704
        tr_cmd = "%s -u"

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            tr_cmd += " -m coverage run --branch -p"

        tr_cmd += " %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"

705
        tr_cmd = tr_cmd % \
W
Wu Yi 已提交
706
                  (self._python_interp, model, self._ps_endpoints,
707
                   trainer_id, ep, update_method, self._lr)
W
Wu Yi 已提交
708 709

        if self._use_reduce:
710
            tr_cmd += " --use_reduce"
W
Wu Yi 已提交
711
        if self._use_reader_alloc:
712
            tr_cmd += " --use_reader_alloc"
W
Wu Yi 已提交
713
        if self.__use_cuda:
714 715 716 717
            tr_cmd += " --use_cuda"
            env.update({
                "CUDA_VISIBLE_DEVICES": "{}".format(trainer_id),
                "PADDLE_TRAINERS_NUM": "{}".format(trainer_num),
718 719 720
                "PADDLE_TRAINER_ID": "{}".format(trainer_id),
                "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
                "PADDLE_CURRENT_ENDPOINT": ep,
721
            })
W
Wu Yi 已提交
722
        else:
723
            env.update({'CPU_NUM': '1'})
W
Wu Yi 已提交
724

725
        if self._use_dgc:
726 727 728 729
            tr_cmd += " --use_dgc"

        if self._mp_mode:
            env = {"FLAGS_selected_gpus": "{}".format(trainer_id)}
730 731

        if self._nccl_comm_num > 1:
732
            tr_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
733

734 735
        if self._use_hallreduce:
            tr_cmd += " --use_hallreduce --hallreduce_inter_nranks 2"
736

737
        if self._enable_backward_deps:
738
            tr_cmd += " --enable_backward_deps"
739

740 741
        if self._gpu_fleet_api:
            tr_cmd += " --gpu_fleet_api"
742 743 744 745
            if self._use_local_sgd:
                tr_cmd += " --use_local_sgd"
            if self._ut4grad_allreduce:
                tr_cmd += " --ut4grad_allreduce"
746

747 748 749
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            env['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')

750
        return tr_cmd, env
W
Wu Yi 已提交
751

752 753 754 755 756 757 758
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
                           check_error_log):
        if self._use_hallreduce:
            self._ps_endpoints = ""
            for i in range(0, 4):
                self._ps_endpoints += "127.0.0.1:%s," % (self._find_free_port())
            self._ps_endpoints = self._ps_endpoints[:-1]
W
Wu Yi 已提交
759

760 761 762 763 764 765
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
766

767
        trainer_num = len(worker_endpoints)
W
Wu Yi 已提交
768

769 770 771 772 773 774 775 776
        procs = []
        pipes = []
        for i in range(0, trainer_num):
            tr_cmd, tr_env = self._get_nccl2_trainer_cmd(
                model, worker_endpoints[i], update_method, i, trainer_num)
            tr_env.update(envs)
            print("use_hallreduce:{} tr_cmd:{}, env: {}".format(
                self._use_hallreduce, tr_cmd, tr_env))
W
Wu Yi 已提交
777

778
            tr_pipe = open("/tmp/tr{}_err.log".format(i), "wb")
W
Wu Yi 已提交
779

780
            print_to_err(
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
                type(self).__name__,
                "going to start process {} with nccl2".format(i))
            tr_proc = subprocess.Popen(
                tr_cmd.strip().split(" "),
                stdout=subprocess.PIPE,
                stderr=tr_pipe,
                env=tr_env)

            procs.append(tr_proc)
            pipes.append(tr_pipe)

        outs = []
        for i in range(0, trainer_num):
            tr_out, tr_err = procs[i].communicate()
            outs.append(tr_out)
            pipes[i].close()
            sys.stderr.write('trainer {} stderr: {}\n'.format(i, tr_err))

799 800 801
        if check_error_log:
            print("outs[0]:", outs[0])
            print("outs[1]:", outs[1])
802
        return pickle.loads(outs[0]), pickle.loads(outs[1])
803 804 805 806 807 808 809 810 811 812 813 814

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={}):
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
G
guru4elephant 已提交
815
            "FLAGS_rpc_deadline": "30000",  # 5sec to fail fast
816
            "FLAGS_cudnn_deterministic": "1",
W
Wu Yi 已提交
817
            "http_proxy": "",
818 819
            "NCCL_P2P_DISABLE": "1",
            "NCCL_SHM_DISABLE": "1"
820 821 822 823 824
        }

        required_envs.update(need_envs)

        if check_error_log:
825
            required_envs["GLOG_v"] = "10"
826 827 828 829
            required_envs["GLOG_logtostderr"] = "1"

        local_losses\
            = self._run_local(model_file, required_envs,
W
Wu Yi 已提交
830
                                check_error_log)
W
Wu Yi 已提交
831
        if self._nccl2_mode:
W
Wu Yi 已提交
832 833 834 835 836 837
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, True, check_error_log)
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, False, check_error_log)
W
Wu Yi 已提交
838 839 840
        else:
            tr0_losses, tr1_losses = self._run_cluster(
                model_file, required_envs, check_error_log)
841 842

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
843 844 845
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
846
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
847 848
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)