test_dist_base.py 24.7 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
T
typhoonzero 已提交
27

28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31 32
import paddle.fluid.dygraph as dygraph
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import DataParallel
33

Y
Yan Xu 已提交
34
RUN_STEP = 5
35
DEFAULT_BATCH_SIZE = 2
36

T
typhoonzero 已提交
37 38

class TestDistRunnerBase(object):
W
Wu Yi 已提交
39 40 41
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
42 43
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
44 45 46
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

47
    @staticmethod
W
Wu Yi 已提交
48 49 50 51 52
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
53
                       dc_asgd=False,
54 55
                       current_endpoint=None,
                       nccl_comm_num=1):
T
typhoonzero 已提交
56
        # NOTE: import fluid until runtime, or else forking processes will cause error.
57
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
58
        config.enable_dc_asgd = dc_asgd
59
        config.sync_mode = sync_mode
60 61
        if nccl_comm_num > 1:
            config.nccl_comm_num = nccl_comm_num
62
        # config.runtime_split_send_recv = True
63
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
64 65 66 67
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
68
            trainers=trainers,
69
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
70 71
        return t

W
Wu Yi 已提交
72
    def run_pserver(self, args):
W
Wu Yi 已提交
73
        self.lr = args.lr
74
        self.get_model(batch_size=args.batch_size)
75
        # NOTE: pserver should not call memory optimize
W
Wu Yi 已提交
76 77
        t = self.get_transpiler(args.trainer_id,
                                fluid.default_main_program(), args.endpoints,
W
Wu Yi 已提交
78
                                args.trainers, args.sync_mode, args.dc_asgd)
W
Wu Yi 已提交
79 80 81
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
82

T
typhoonzero 已提交
83 84 85 86 87
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
        exe.run(pserver_prog)

88
    def run_trainer(self, args):
W
Wu Yi 已提交
89
        self.lr = args.lr
W
Wu Yi 已提交
90 91 92
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
93 94 95
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
96 97 98
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
99

W
Wu Yi 已提交
100
        if args.mem_opt:
101
            fluid.memory_optimize(fluid.default_main_program(), skip_grads=True)
W
Wu Yi 已提交
102
        if args.update_method == "pserver":
W
Wu Yi 已提交
103 104 105
            t = self.get_transpiler(args.trainer_id,
                                    fluid.default_main_program(),
                                    args.endpoints, args.trainers,
W
Wu Yi 已提交
106
                                    args.sync_mode, args.dc_asgd)
T
typhoonzero 已提交
107
            trainer_prog = t.get_trainer_program()
W
Wu Yi 已提交
108
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
109 110 111
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
112
            config.nccl_comm_num = args.nccl_comm_num
W
Wu Yi 已提交
113 114 115 116 117 118 119
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
120

W
Wu Yi 已提交
121
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
122 123 124
        else:
            trainer_prog = fluid.default_main_program()

125
        if args.use_cuda:
126 127
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
128 129 130
        else:
            place = fluid.CPUPlace()

131 132
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
T
typhoonzero 已提交
133

W
Wu Yi 已提交
134 135 136
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
        exec_strategy.allow_op_delay = False
137

W
Wu Yi 已提交
138
        build_stra = fluid.BuildStrategy()
139 140 141
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
142 143 144 145 146 147

        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
148
        pass_builder = None
X
Xin Pan 已提交
149
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
150
            pass_builder = build_stra._finalize_strategy_and_create_passes()
151
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
152
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
153

W
Wu Yi 已提交
154
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
155 156
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
157
        else:
W
Wu Yi 已提交
158
            # case args.update_method == "nccl2_reduce_layer":
159 160
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
161

X
Xin Pan 已提交
162
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
163
            loss_name=avg_cost.name,
W
Wu Yi 已提交
164
            build_strategy=build_stra,
W
Wu Yi 已提交
165
            exec_strategy=exec_strategy)
T
typhoonzero 已提交
166 167 168 169 170 171 172

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
173
        reader_generator = train_reader()
T
typhoonzero 已提交
174

175 176
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
177
            if args.update_method != "local" and args.use_reader_alloc:
178 179 180 181 182 183 184
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
185

W
Wu Yi 已提交
186
        out_losses = []
187
        for _ in six.moves.xrange(RUN_STEP):
188 189
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
190
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
191 192 193 194 195
            out_losses.append(loss[0])
        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))
T
typhoonzero 已提交
196 197


198 199 200 201 202 203 204 205 206 207
class TestParallelDyGraphRunnerBase(object):
    def get_model(self):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

    def run_one_loop(self, model, opt, data):
        raise NotImplementedError(
            "train_one_loop should be implemented by the child classes.")

    def run_trainer(self, args):
Y
Yan Xu 已提交
208

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
        seed = 90
        device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
        place = fluid.CUDAPlace(device_id)

        def _get_data(batch):
            if args.update_method != "local":
                new_batch = []
                for offset, item in enumerate(batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return batch

        with fluid.dygraph.guard(place):
            fluid.default_startup_program().random_seed = seed
            fluid.default_main_program().random_seed = seed
Y
Yan Xu 已提交
226 227 228
            np.random.seed(seed)
            import random
            random.seed = seed
229 230
            model, train_reader, opt = self.get_model()
            nranks = len(args.endpoints.split(",")) if args.endpoints else 1
Y
Yan Xu 已提交
231

232 233 234 235 236 237 238
            if args.update_method == "nccl2":
                strategy = dygraph.parallel.ParallelStrategy()
                strategy.nranks = nranks
                strategy.local_rank = args.trainer_id
                strategy.trainer_endpoints = args.endpoints.split(",")
                strategy.current_endpoint = args.current_endpoint
                dygraph.parallel.prepare_context(strategy)
Y
Yan Xu 已提交
239
                model = dygraph.parallel.DataParallel(model, strategy)
240 241 242 243 244 245
            out_losses = []
            for step_id, data in enumerate(train_reader()):
                data = _get_data(data)
                if step_id == RUN_STEP:
                    break
                loss = self.run_one_loop(model, opt, data)
Y
Yan Xu 已提交
246
                out_losses.append(loss.numpy())
247

Y
Yan Xu 已提交
248 249 250
                # FIXME(Yancey1989): scale the loss inplace
                if args.update_method == "nccl2":
                    loss = model.scale_loss(loss)
251 252

                loss.backward()
Y
Yan Xu 已提交
253 254
                if args.update_method == "nccl2":
                    model.apply_collective_grads()
255 256 257 258 259 260 261 262 263

                opt.minimize(loss)
                model.clear_gradients()
            if six.PY2:
                print(pickle.dumps(out_losses))
            else:
                sys.stdout.buffer.write(pickle.dumps(out_losses))


T
typhoonzero 已提交
264
def runtime_main(test_class):
W
Wu Yi 已提交
265 266 267 268
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
269 270 271 272
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
273
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
274 275
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
276
    parser.add_argument('--nccl_comm_num', type=int, required=False, default=1)
W
Wu Yi 已提交
277 278 279 280
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
    parser.add_argument('--mem_opt', action='store_true')
281
    parser.add_argument('--use_cuda', action='store_true')
282
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
283
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
284
    parser.add_argument('--dc_asgd', action='store_true')
285
    parser.add_argument(
W
Wu Yi 已提交
286
        '--use_reader_alloc', action='store_true', required=False)
287
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
288
    parser.add_argument('--lr', required=False, type=float, default=0.001)
289 290
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
291 292 293 294 295
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
296 297

    args = parser.parse_args()
T
typhoonzero 已提交
298 299

    model = test_class()
W
Wu Yi 已提交
300
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
301
        model.run_pserver(args)
T
typhoonzero 已提交
302
    else:
303
        model.run_trainer(args)
X
Xin Pan 已提交
304

M
minqiyang 已提交
305

M
minqiyang 已提交
306
import paddle.compat as cpt
Y
Yancey1989 已提交
307 308
import socket
from contextlib import closing
M
minqiyang 已提交
309

X
Xin Pan 已提交
310 311

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
312 313 314
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

315 316 317
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
318
            self._use_dgc = False
319 320 321 322 323 324 325
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
326 327 328 329
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
330

X
Xin Pan 已提交
331 332 333
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
334
        self._port_set = set()
Y
Yancey1989 已提交
335 336
        self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
            self._find_free_port(), self._find_free_port())
M
minqiyang 已提交
337
        self._python_interp = sys.executable
W
Wu Yi 已提交
338
        self._sync_mode = True
339
        self._enforce_place = None
W
Wu Yi 已提交
340
        self._mem_opt = False
W
Wu Yi 已提交
341
        self._use_reduce = False
W
Wu Yi 已提交
342
        self._dc_asgd = False  # must use with async mode
343
        self._use_reader_alloc = True
W
Wu Yi 已提交
344
        self._nccl2_mode = False
345
        self._mp_mode = False
W
Wu Yi 已提交
346 347 348 349 350
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
351
        self._lr = 0.001
352
        self._use_dgc = False
353
        self._dygraph = False
354
        self._nccl_comm_num = 1
W
Wu Yi 已提交
355
        self._setup_config()
356
        self._after_setup_config()
X
Xin Pan 已提交
357

Y
Yancey1989 已提交
358
    def _find_free_port(self):
Y
Yancey1989 已提交
359 360 361 362 363 364 365 366 367 368 369
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
370

371
    def start_pserver(self, model_file, check_error_log, required_envs):
X
Xin Pan 已提交
372
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
W
Wu Yi 已提交
373
        ps_cmd = "%s %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"
W
Wu Yi 已提交
374
        ps0_cmd = ps_cmd % \
375 376
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
377
        ps1_cmd = ps_cmd % \
378 379
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
380 381 382 383 384 385 386

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
        if self._mem_opt:
            ps0_cmd += " --mem_opt"
            ps1_cmd += " --mem_opt"
X
Xin Pan 已提交
387

388 389
        print(ps0_cmd)
        print(ps1_cmd)
M
minqiyang 已提交
390 391
        ps0_pipe = open("/tmp/ps0_err.log", "wb")
        ps1_pipe = open("/tmp/ps1_err.log", "wb")
G
gongweibao 已提交
392

X
Xin Pan 已提交
393
        ps0_proc = subprocess.Popen(
394 395 396 397
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
X
Xin Pan 已提交
398
        ps1_proc = subprocess.Popen(
399 400 401 402
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
403

404
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
405

406 407 408 409 410 411
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
                   batch_merge_repeat=1):
G
gongweibao 已提交
412

W
Wu Yi 已提交
413 414
        cmd = "%s %s --role trainer --lr %f" % (self._python_interp, model,
                                                self._lr)
415 416 417 418
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
419 420
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
421

422
        if self.__use_cuda:
423
            cmd += " --use_cuda"
W
Wu Yi 已提交
424 425 426 427 428
            env_local = {
                "CUDA_VISIBLE_DEVICES": "0",
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
429 430 431
        else:
            env_local = {'CPU_NUM': '1'}

W
Wu Yi 已提交
432 433
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
434

435
        if check_error_log:
436
            err_log = open("/tmp/trainer.err.log", "wb")
G
gongweibao 已提交
437
            local_proc = subprocess.Popen(
438
                cmd.split(" "),
G
gongweibao 已提交
439
                stdout=subprocess.PIPE,
440
                stderr=err_log,
W
Wu Yi 已提交
441
                env=env_local)
G
gongweibao 已提交
442 443
        else:
            local_proc = subprocess.Popen(
444
                cmd.split(" "),
G
gongweibao 已提交
445
                stdout=subprocess.PIPE,
446
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
447
                env=env_local)
G
gongweibao 已提交
448

449 450 451 452 453 454
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
455
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
456

W
Wu Yi 已提交
457
        return pickle.loads(local_out)
458 459

    def _run_cluster(self, model, envs, check_error_log):
X
Xin Pan 已提交
460
        # Run dist train to compare with local results
461 462
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model,
                                                          check_error_log, envs)
W
Wu Yi 已提交
463

X
Xin Pan 已提交
464
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
465

W
Wu Yi 已提交
466
        tr_cmd = "%s %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"
W
Wu Yi 已提交
467
        tr0_cmd = tr_cmd % \
468
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
469
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
470
        tr1_cmd = tr_cmd % \
471
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
472
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
473 474 475 476 477 478 479 480 481 482

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
        if self._mem_opt:
            tr0_cmd += " --mem_opt"
            tr1_cmd += " --mem_opt"
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
483 484 485
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
486
        if self.__use_cuda:
487 488 489 490 491 492 493 494 495 496
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
497

W
Wu Yi 已提交
498 499
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
500 501
        tr0_pipe = open("/tmp/tr0_err.log", "wb")
        tr1_pipe = open("/tmp/tr1_err.log", "wb")
G
gongweibao 已提交
502

X
Xin Pan 已提交
503
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
504
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
505
            stdout=subprocess.PIPE,
G
gongweibao 已提交
506
            stderr=tr0_pipe,
X
Xin Pan 已提交
507 508
            env=env0)
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
509
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
510
            stdout=subprocess.PIPE,
G
gongweibao 已提交
511
            stderr=tr1_pipe,
X
Xin Pan 已提交
512 513
            env=env1)

514 515 516 517 518 519 520 521 522 523 524 525
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

526 527
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
528

G
gongweibao 已提交
529
        # close trainer file
530 531 532 533
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
534

W
Wu Yi 已提交
535 536
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
537

538 539 540 541 542 543
        # print server log
        with open("/tmp/ps0_err.log", "r") as fn:
            sys.stderr.write("ps0 stderr: %s\n" % fn.read())
        with open("/tmp/ps1_err.log", "r") as fn:
            sys.stderr.write("ps1 stderr: %s\n" % fn.read())

544
        # print log
545 546 547 548
        with open("/tmp/tr0_err.log", "r") as fn:
            sys.stderr.write('trainer 0 stderr: %s\n' % fn.read())
        with open("/tmp/tr1_err.log", "r") as fn:
            sys.stderr.write('trainer 1 stderr: %s\n' % fn.read())
549

W
Wu Yi 已提交
550 551
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

W
Wu Yi 已提交
552 553
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
                           check_error_log):
W
Wu Yi 已提交
554 555 556
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        w0_ep, w1_ep = worker_endpoints
W
Wu Yi 已提交
557 558 559 560
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
561

W
Wu Yi 已提交
562
        tr_cmd = "%s %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"
W
Wu Yi 已提交
563 564
        tr0_cmd = tr_cmd % \
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
565
                   0, w0_ep, update_method, self._lr)
W
Wu Yi 已提交
566 567
        tr1_cmd = tr_cmd % \
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
568
                   1, w1_ep, update_method, self._lr)
W
Wu Yi 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581

        if self._mem_opt:
            tr0_cmd += " --mem_opt"
            tr1_cmd += " --mem_opt"
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
        if self.__use_cuda:
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
W
Wu Yi 已提交
582 583 584 585 586 587 588 589 590 591 592
            env0 = {
                "CUDA_VISIBLE_DEVICES": "0",
                # for test nccl2 layer
                "PADDLE_TRAINERS_NUM": "2",
                "PADDLE_TRAINER_ID": "0"
            }
            env1 = {
                "CUDA_VISIBLE_DEVICES": "1",
                "PADDLE_TRAINERS_NUM": "2",
                "PADDLE_TRAINER_ID": "1"
            }
W
Wu Yi 已提交
593 594 595 596
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

597 598 599
        if self._use_dgc:
            tr0_cmd += " --use_dgc"
            tr1_cmd += " --use_dgc"
600 601 602 603 604

        if self._nccl_comm_num > 1:
            tr0_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)
            tr1_cmd += " --nccl_comm_num {}".format(self._nccl_comm_num)

605 606 607 608
        if self._mp_mode:
            env0 = {"FLAGS_selected_gpus": "0"}
            env1 = {"FLAGS_selected_gpus": "1"}

W
Wu Yi 已提交
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
        env0.update(envs)
        env1.update(envs)

        print("tr0_cmd:{}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd:{}, env: {}".format(tr1_cmd, env1))
        tr0_pipe = open("/tmp/tr0_err.log", "wb")
        tr1_pipe = open("/tmp/tr1_err.log", "wb")

        tr0_proc = subprocess.Popen(
            tr0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr0_pipe,
            env=env0)
        tr1_proc = subprocess.Popen(
            tr1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr1_pipe,
            env=env1)

        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()

        # close trainer file
        tr0_pipe.close()
        tr1_pipe.close()

        # print log
        sys.stderr.write('trainer 0 stderr: %s\n' % tr0_err)
        sys.stderr.write('trainer 1 stderr: %s\n' % tr1_err)

W
Wu Yi 已提交
639
        return pickle.loads(tr0_out), pickle.loads(tr1_out)
640 641 642 643 644 645 646 647 648 649 650 651

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={}):
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
652
            "FLAGS_rpc_deadline": "5000",  # 5sec to fail fast
653
            "FLAGS_cudnn_deterministic": "1",
W
Wu Yi 已提交
654 655
            "http_proxy": "",
            "NCCL_P2P_DISABLE": "1"
656 657 658 659 660
        }

        required_envs.update(need_envs)

        if check_error_log:
W
Wu Yi 已提交
661
            required_envs["GLOG_v"] = "3"
662 663 664 665
            required_envs["GLOG_logtostderr"] = "1"

        local_losses\
            = self._run_local(model_file, required_envs,
W
Wu Yi 已提交
666
                                check_error_log)
W
Wu Yi 已提交
667
        if self._nccl2_mode:
W
Wu Yi 已提交
668 669 670 671 672 673
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, True, check_error_log)
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, False, check_error_log)
W
Wu Yi 已提交
674 675 676
        else:
            tr0_losses, tr1_losses = self._run_cluster(
                model_file, required_envs, check_error_log)
677 678

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
679 680 681
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
Y
Yan Xu 已提交
682
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
W
Wu Yi 已提交
683 684
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)