test_dist_base.py 21.7 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14 15

from __future__ import print_function
X
Xin Pan 已提交
16 17 18 19 20 21 22
import time

import unittest
import os
import sys
import signal
import subprocess
23
import six
W
Wu Yi 已提交
24
import argparse
W
Wu Yi 已提交
25 26
import pickle
import numpy as np
T
typhoonzero 已提交
27

28
import paddle.fluid as fluid
29
from paddle.fluid import compiler
30 31

RUN_STEP = 10
32
DEFAULT_BATCH_SIZE = 2
33

T
typhoonzero 已提交
34 35

class TestDistRunnerBase(object):
W
Wu Yi 已提交
36 37 38
    def get_model(self,
                  batch_size=DEFAULT_BATCH_SIZE,
                  lr=0.1,
39 40
                  single_device=False,
                  use_dgc=False):
T
typhoonzero 已提交
41 42 43
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

44
    @staticmethod
W
Wu Yi 已提交
45 46 47 48 49
    def get_transpiler(trainer_id,
                       main_program,
                       pserver_endpoints,
                       trainers,
                       sync_mode,
50 51
                       dc_asgd=False,
                       current_endpoint=None):
T
typhoonzero 已提交
52
        # NOTE: import fluid until runtime, or else forking processes will cause error.
53
        config = fluid.DistributeTranspilerConfig()
W
Wu Yi 已提交
54
        config.enable_dc_asgd = dc_asgd
55
        config.sync_mode = sync_mode
56
        # config.runtime_split_send_recv = True
57
        t = fluid.DistributeTranspiler(config=config)
T
typhoonzero 已提交
58 59 60 61
        t.transpile(
            trainer_id=trainer_id,
            program=main_program,
            pservers=pserver_endpoints,
W
Wu Yi 已提交
62
            trainers=trainers,
63
            current_endpoint=current_endpoint)
T
typhoonzero 已提交
64 65
        return t

W
Wu Yi 已提交
66
    def run_pserver(self, args):
W
Wu Yi 已提交
67
        self.lr = args.lr
68
        self.get_model(batch_size=args.batch_size)
69
        # NOTE: pserver should not call memory optimize
W
Wu Yi 已提交
70 71
        t = self.get_transpiler(args.trainer_id,
                                fluid.default_main_program(), args.endpoints,
W
Wu Yi 已提交
72
                                args.trainers, args.sync_mode, args.dc_asgd)
W
Wu Yi 已提交
73 74 75
        pserver_prog = t.get_pserver_program(args.current_endpoint)
        startup_prog = t.get_startup_program(args.current_endpoint,
                                             pserver_prog)
Y
Yancey1989 已提交
76

T
typhoonzero 已提交
77 78 79 80 81
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_prog)
        exe.run(pserver_prog)

82
    def run_trainer(self, args):
W
Wu Yi 已提交
83
        self.lr = args.lr
W
Wu Yi 已提交
84 85 86
        if args.nccl2_reduce_layer_local_run:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, single_device=True)
87 88 89
        elif args.use_dgc:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size, use_dgc=args.use_dgc)
W
Wu Yi 已提交
90 91 92
        else:
            test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \
                self.get_model(batch_size=args.batch_size)
93

W
Wu Yi 已提交
94
        if args.mem_opt:
95
            fluid.memory_optimize(fluid.default_main_program(), skip_grads=True)
W
Wu Yi 已提交
96
        if args.update_method == "pserver":
W
Wu Yi 已提交
97 98 99
            t = self.get_transpiler(args.trainer_id,
                                    fluid.default_main_program(),
                                    args.endpoints, args.trainers,
W
Wu Yi 已提交
100
                                    args.sync_mode, args.dc_asgd)
T
typhoonzero 已提交
101
            trainer_prog = t.get_trainer_program()
W
Wu Yi 已提交
102
        elif args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
W
Wu Yi 已提交
103 104 105 106 107 108 109 110 111 112 113
            # transpile for nccl2
            config = fluid.DistributeTranspilerConfig()
            config.mode = "nccl2"
            nccl2_t = fluid.DistributeTranspiler(config=config)
            nccl2_t.transpile(
                args.trainer_id,
                program=fluid.default_main_program(),
                startup_program=fluid.default_startup_program(),
                trainers=args.endpoints,
                current_endpoint=args.current_endpoint)
            trainer_prog = fluid.default_main_program()
T
typhoonzero 已提交
114 115 116
        else:
            trainer_prog = fluid.default_main_program()

117
        if args.use_cuda:
118 119
            device_id = int(os.getenv("FLAGS_selected_gpus", "0"))
            place = fluid.CUDAPlace(device_id)
120 121 122
        else:
            place = fluid.CPUPlace()

123 124
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
T
typhoonzero 已提交
125

W
Wu Yi 已提交
126 127 128
        exec_strategy = fluid.ExecutionStrategy()
        exec_strategy.num_threads = 1
        exec_strategy.allow_op_delay = False
129

W
Wu Yi 已提交
130
        build_stra = fluid.BuildStrategy()
131 132 133
        # FIXME force disable enable_inplace and memory_optimize
        build_stra.enable_inplace = False
        build_stra.memory_optimize = False
W
Wu Yi 已提交
134 135 136 137 138 139

        if args.use_reduce:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
        else:
            build_stra.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce

W
Wu Yi 已提交
140
        pass_builder = None
X
Xin Pan 已提交
141
        if args.batch_merge_repeat > 1:
X
fix  
Xin Pan 已提交
142
            pass_builder = build_stra._finalize_strategy_and_create_passes()
143
            mypass = pass_builder.insert_pass(0, "multi_batch_merge_pass")
144
            mypass.set("num_repeats", args.batch_merge_repeat)
X
Xin Pan 已提交
145

W
Wu Yi 已提交
146
        if args.update_method == "nccl2" or args.update_method == "nccl2_reduce_layer":
147 148
            build_stra.num_trainers = len(args.endpoints.split(","))
            build_stra.trainer_id = args.trainer_id
W
Wu Yi 已提交
149
        else:
W
Wu Yi 已提交
150
            # case args.update_method == "nccl2_reduce_layer":
151 152
            build_stra.num_trainers = 1
            build_stra.trainer_id = 0
W
Wu Yi 已提交
153

X
Xin Pan 已提交
154
        binary = compiler.CompiledProgram(trainer_prog).with_data_parallel(
W
Wu Yi 已提交
155
            loss_name=avg_cost.name,
W
Wu Yi 已提交
156
            build_strategy=build_stra,
W
Wu Yi 已提交
157
            exec_strategy=exec_strategy)
T
typhoonzero 已提交
158 159 160 161 162 163 164

        feed_var_list = [
            var for var in trainer_prog.global_block().vars.values()
            if var.is_data
        ]

        feeder = fluid.DataFeeder(feed_var_list, place)
165
        reader_generator = train_reader()
T
typhoonzero 已提交
166

167 168
        def get_data():
            origin_batch = next(reader_generator)
W
Wu Yi 已提交
169
            if args.update_method != "local" and args.use_reader_alloc:
170 171 172 173 174 175 176
                new_batch = []
                for offset, item in enumerate(origin_batch):
                    if offset % 2 == args.trainer_id:
                        new_batch.append(item)
                return new_batch
            else:
                return origin_batch
T
typhoonzero 已提交
177

W
Wu Yi 已提交
178
        out_losses = []
179
        for _ in six.moves.xrange(RUN_STEP):
180 181
            loss, = exe.run(binary,
                            fetch_list=[avg_cost.name],
182
                            feed=feeder.feed(get_data()))
W
Wu Yi 已提交
183 184 185 186 187
            out_losses.append(loss[0])
        if six.PY2:
            print(pickle.dumps(out_losses))
        else:
            sys.stdout.buffer.write(pickle.dumps(out_losses))
T
typhoonzero 已提交
188 189 190


def runtime_main(test_class):
W
Wu Yi 已提交
191 192 193 194
    parser = argparse.ArgumentParser(description='Run dist test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
W
Wu Yi 已提交
195 196 197 198
    parser.add_argument(
        '--update_method',
        type=str,
        default="local",
W
Wu Yi 已提交
199
        choices=["pserver", "nccl2", "local", "nccl2_reduce_layer"])
W
Wu Yi 已提交
200 201 202 203 204 205
    parser.add_argument('--trainer_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
    parser.add_argument(
        '--current_endpoint', type=str, required=False, default="")
    parser.add_argument('--sync_mode', action='store_true')
    parser.add_argument('--mem_opt', action='store_true')
206
    parser.add_argument('--use_cuda', action='store_true')
207
    parser.add_argument('--use_dgc', action='store_true')
W
Wu Yi 已提交
208
    parser.add_argument('--use_reduce', action='store_true')
W
Wu Yi 已提交
209
    parser.add_argument('--dc_asgd', action='store_true')
210
    parser.add_argument(
W
Wu Yi 已提交
211
        '--use_reader_alloc', action='store_true', required=False)
212
    parser.add_argument('--batch_size', required=False, type=int, default=2)
W
Wu Yi 已提交
213
    parser.add_argument('--lr', required=False, type=float, default=0.001)
214 215
    parser.add_argument(
        '--batch_merge_repeat', required=False, type=int, default=1)
W
Wu Yi 已提交
216 217 218 219 220
    parser.add_argument(
        '--nccl2_reduce_layer_local_run',
        required=False,
        type=bool,
        default=False)
W
Wu Yi 已提交
221 222

    args = parser.parse_args()
T
typhoonzero 已提交
223 224

    model = test_class()
W
Wu Yi 已提交
225
    if args.role == "pserver" and args.update_method == "pserver":
W
Wu Yi 已提交
226
        model.run_pserver(args)
T
typhoonzero 已提交
227
    else:
228
        model.run_trainer(args)
X
Xin Pan 已提交
229

M
minqiyang 已提交
230

M
minqiyang 已提交
231
import paddle.compat as cpt
Y
Yancey1989 已提交
232 233
import socket
from contextlib import closing
M
minqiyang 已提交
234

X
Xin Pan 已提交
235 236

class TestDistBase(unittest.TestCase):
W
Wu Yi 已提交
237 238 239
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

240 241 242
    def _after_setup_config(self):
        if self._enforce_place == "CPU":
            self.__use_cuda = False
243
            self._use_dgc = False
244 245 246 247 248 249 250
        elif self._enforce_place == "GPU":
            self.__use_cuda = True
        else:
            if fluid.core.is_compiled_with_cuda():
                self.__use_cuda = True
            else:
                self.__use_cuda = False
251 252 253 254
                self._use_dgc = False

        if self._use_reduce:
            assert not self._use_dgc
255

X
Xin Pan 已提交
256 257 258
    def setUp(self):
        self._trainers = 2
        self._pservers = 2
Y
Yancey1989 已提交
259
        self._port_set = set()
Y
Yancey1989 已提交
260 261
        self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
            self._find_free_port(), self._find_free_port())
M
minqiyang 已提交
262
        self._python_interp = sys.executable
W
Wu Yi 已提交
263
        self._sync_mode = True
264
        self._enforce_place = None
W
Wu Yi 已提交
265
        self._mem_opt = False
W
Wu Yi 已提交
266
        self._use_reduce = False
W
Wu Yi 已提交
267
        self._dc_asgd = False  # must use with async mode
268
        self._use_reader_alloc = True
W
Wu Yi 已提交
269
        self._nccl2_mode = False
270
        self._mp_mode = False
W
Wu Yi 已提交
271 272 273 274 275
        # FIXME(typhoonzero): I added this stupid argument to enable
        # testing allreduce layers, which users can call layers.allreduce
        # to accumulate tensors at anywhere. Find a better way to do this
        # test, reduce check this argument everywhere.
        self._nccl2_reduce_layer = False
W
Wu Yi 已提交
276
        self._lr = 0.001
277
        self._use_dgc = False
W
Wu Yi 已提交
278
        self._setup_config()
279
        self._after_setup_config()
X
Xin Pan 已提交
280

Y
Yancey1989 已提交
281
    def _find_free_port(self):
Y
Yancey1989 已提交
282 283 284 285 286 287 288 289 290 291 292
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port
Y
Yancey1989 已提交
293

294
    def start_pserver(self, model_file, check_error_log, required_envs):
X
Xin Pan 已提交
295
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
W
Wu Yi 已提交
296
        ps_cmd = "%s %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --update_method pserver"
W
Wu Yi 已提交
297
        ps0_cmd = ps_cmd % \
298 299
                  (self._python_interp, model_file, self._ps_endpoints, ps0_ep,
                   self._trainers)
W
Wu Yi 已提交
300
        ps1_cmd = ps_cmd % \
301 302
                  (self._python_interp, model_file, self._ps_endpoints, ps1_ep,
                   self._trainers)
W
Wu Yi 已提交
303 304 305 306 307 308 309

        if self._sync_mode:
            ps0_cmd += " --sync_mode"
            ps1_cmd += " --sync_mode"
        if self._mem_opt:
            ps0_cmd += " --mem_opt"
            ps1_cmd += " --mem_opt"
X
Xin Pan 已提交
310

311 312
        print(ps0_cmd)
        print(ps1_cmd)
M
minqiyang 已提交
313 314
        ps0_pipe = open("/tmp/ps0_err.log", "wb")
        ps1_pipe = open("/tmp/ps1_err.log", "wb")
G
gongweibao 已提交
315

X
Xin Pan 已提交
316
        ps0_proc = subprocess.Popen(
317 318 319 320
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
X
Xin Pan 已提交
321
        ps1_proc = subprocess.Popen(
322 323 324 325
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
G
gongweibao 已提交
326

327
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
X
Xin Pan 已提交
328

329 330 331 332 333 334
    def _run_local(self,
                   model,
                   envs,
                   check_error_log=False,
                   batch_size=DEFAULT_BATCH_SIZE,
                   batch_merge_repeat=1):
G
gongweibao 已提交
335

W
Wu Yi 已提交
336 337
        cmd = "%s %s --role trainer --lr %f" % (self._python_interp, model,
                                                self._lr)
338 339 340 341
        if batch_size != DEFAULT_BATCH_SIZE:
            cmd += " --batch_size %d" % batch_size
        if batch_merge_repeat > 1:
            cmd += " --batch_merge_repeat %d" % batch_merge_repeat
W
Wu Yi 已提交
342 343
        if self._nccl2_reduce_layer:
            cmd += " --nccl2_reduce_layer_local_run 1"
344

345
        if self.__use_cuda:
346
            cmd += " --use_cuda"
W
Wu Yi 已提交
347 348 349 350 351
            env_local = {
                "CUDA_VISIBLE_DEVICES": "0",
                "PADDLE_TRAINERS_NUM": "1",
                "PADDLE_TRAINER_ID": "0"
            }
352 353 354
        else:
            env_local = {'CPU_NUM': '1'}

W
Wu Yi 已提交
355 356
        env_local.update(envs)
        print("local_cmd: {}, env: {}".format(cmd, env_local))
G
gongweibao 已提交
357

358
        if check_error_log:
359
            err_log = open("/tmp/trainer.err.log", "wb")
G
gongweibao 已提交
360
            local_proc = subprocess.Popen(
361
                cmd.split(" "),
G
gongweibao 已提交
362
                stdout=subprocess.PIPE,
363
                stderr=err_log,
W
Wu Yi 已提交
364
                env=env_local)
G
gongweibao 已提交
365 366
        else:
            local_proc = subprocess.Popen(
367
                cmd.split(" "),
G
gongweibao 已提交
368
                stdout=subprocess.PIPE,
369
                stderr=subprocess.PIPE,
W
Wu Yi 已提交
370
                env=env_local)
G
gongweibao 已提交
371

372 373 374 375 376 377
        local_out, local_err = local_proc.communicate()

        if check_error_log:
            err_log.close()

        sys.stderr.write('local_stderr: %s\n' % local_err)
W
Wu Yi 已提交
378
        sys.stderr.write('local_stdout: %s\n' % pickle.loads(local_out))
X
Xin Pan 已提交
379

W
Wu Yi 已提交
380
        return pickle.loads(local_out)
381 382

    def _run_cluster(self, model, envs, check_error_log):
X
Xin Pan 已提交
383
        # Run dist train to compare with local results
384 385
        ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model,
                                                          check_error_log, envs)
W
Wu Yi 已提交
386

X
Xin Pan 已提交
387
        ps0_ep, ps1_ep = self._ps_endpoints.split(",")
388

W
Wu Yi 已提交
389
        tr_cmd = "%s %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --trainers %d --update_method pserver --lr %f"
W
Wu Yi 已提交
390
        tr0_cmd = tr_cmd % \
391
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
392
                   0, ps0_ep, self._trainers, self._lr)
W
Wu Yi 已提交
393
        tr1_cmd = tr_cmd % \
394
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
395
                   1, ps1_ep, self._trainers, self._lr)
W
Wu Yi 已提交
396 397 398 399 400 401 402 403 404 405

        if self._sync_mode:
            tr0_cmd += " --sync_mode"
            tr1_cmd += " --sync_mode"
        if self._mem_opt:
            tr0_cmd += " --mem_opt"
            tr1_cmd += " --mem_opt"
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
406 407 408
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
409
        if self.__use_cuda:
410 411 412 413 414 415 416 417 418 419
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
            env0 = {"CUDA_VISIBLE_DEVICES": "0"}
            env1 = {"CUDA_VISIBLE_DEVICES": "1"}
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

        env0.update(envs)
        env1.update(envs)
X
Xin Pan 已提交
420

W
Wu Yi 已提交
421 422
        print("tr0_cmd: {}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd: {}, env: {}".format(tr1_cmd, env1))
423 424
        tr0_pipe = open("/tmp/tr0_err.log", "wb")
        tr1_pipe = open("/tmp/tr1_err.log", "wb")
G
gongweibao 已提交
425

X
Xin Pan 已提交
426
        tr0_proc = subprocess.Popen(
W
Wu Yi 已提交
427
            tr0_cmd.strip().split(" "),
X
Xin Pan 已提交
428
            stdout=subprocess.PIPE,
G
gongweibao 已提交
429
            stderr=tr0_pipe,
X
Xin Pan 已提交
430 431
            env=env0)
        tr1_proc = subprocess.Popen(
W
Wu Yi 已提交
432
            tr1_cmd.strip().split(" "),
X
Xin Pan 已提交
433
            stdout=subprocess.PIPE,
G
gongweibao 已提交
434
            stderr=tr1_pipe,
X
Xin Pan 已提交
435 436
            env=env1)

437 438 439 440 441 442 443 444 445 446 447 448
        # Wait until trainer process terminate
        while True:
            stat0 = tr0_proc.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1_proc.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

449 450
        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()
X
Xin Pan 已提交
451

G
gongweibao 已提交
452
        # close trainer file
453 454 455 456
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()
W
Wu Yi 已提交
457

W
Wu Yi 已提交
458 459
        ps0.terminate()
        ps1.terminate()
T
typhoonzero 已提交
460

461 462 463 464 465 466
        # print server log
        with open("/tmp/ps0_err.log", "r") as fn:
            sys.stderr.write("ps0 stderr: %s\n" % fn.read())
        with open("/tmp/ps1_err.log", "r") as fn:
            sys.stderr.write("ps1 stderr: %s\n" % fn.read())

467
        # print log
468 469 470 471
        with open("/tmp/tr0_err.log", "r") as fn:
            sys.stderr.write('trainer 0 stderr: %s\n' % fn.read())
        with open("/tmp/tr1_err.log", "r") as fn:
            sys.stderr.write('trainer 1 stderr: %s\n' % fn.read())
472

W
Wu Yi 已提交
473 474
        return pickle.loads(tr0_out), pickle.loads(tr1_out)

W
Wu Yi 已提交
475 476
    def _run_cluster_nccl2(self, model, envs, nccl2_reduce_layer,
                           check_error_log):
W
Wu Yi 已提交
477 478 479
        # NOTE: we reuse ps_endpoints as nccl2 worker endpoints
        worker_endpoints = self._ps_endpoints.split(",")
        w0_ep, w1_ep = worker_endpoints
W
Wu Yi 已提交
480 481 482 483
        if nccl2_reduce_layer:
            update_method = "nccl2_reduce_layer"
        else:
            update_method = "nccl2"
W
Wu Yi 已提交
484

W
Wu Yi 已提交
485
        tr_cmd = "%s %s --role trainer --endpoints %s --trainer_id %d --current_endpoint %s --update_method %s --lr %f"
W
Wu Yi 已提交
486 487
        tr0_cmd = tr_cmd % \
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
488
                   0, w0_ep, update_method, self._lr)
W
Wu Yi 已提交
489 490
        tr1_cmd = tr_cmd % \
                  (self._python_interp, model, self._ps_endpoints,
W
Wu Yi 已提交
491
                   1, w1_ep, update_method, self._lr)
W
Wu Yi 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504

        if self._mem_opt:
            tr0_cmd += " --mem_opt"
            tr1_cmd += " --mem_opt"
        if self._use_reduce:
            tr0_cmd += " --use_reduce"
            tr1_cmd += " --use_reduce"
        if self._use_reader_alloc:
            tr0_cmd += " --use_reader_alloc"
            tr1_cmd += " --use_reader_alloc"
        if self.__use_cuda:
            tr0_cmd += " --use_cuda"
            tr1_cmd += " --use_cuda"
W
Wu Yi 已提交
505 506 507 508 509 510 511 512 513 514 515
            env0 = {
                "CUDA_VISIBLE_DEVICES": "0",
                # for test nccl2 layer
                "PADDLE_TRAINERS_NUM": "2",
                "PADDLE_TRAINER_ID": "0"
            }
            env1 = {
                "CUDA_VISIBLE_DEVICES": "1",
                "PADDLE_TRAINERS_NUM": "2",
                "PADDLE_TRAINER_ID": "1"
            }
W
Wu Yi 已提交
516 517 518 519
        else:
            env0 = {'CPU_NUM': '1'}
            env1 = {'CPU_NUM': '1'}

520 521 522
        if self._use_dgc:
            tr0_cmd += " --use_dgc"
            tr1_cmd += " --use_dgc"
523 524 525 526
        if self._mp_mode:
            env0 = {"FLAGS_selected_gpus": "0"}
            env1 = {"FLAGS_selected_gpus": "1"}

W
Wu Yi 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
        env0.update(envs)
        env1.update(envs)

        print("tr0_cmd:{}, env: {}".format(tr0_cmd, env0))
        print("tr1_cmd:{}, env: {}".format(tr1_cmd, env1))
        tr0_pipe = open("/tmp/tr0_err.log", "wb")
        tr1_pipe = open("/tmp/tr1_err.log", "wb")

        tr0_proc = subprocess.Popen(
            tr0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr0_pipe,
            env=env0)
        tr1_proc = subprocess.Popen(
            tr1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr1_pipe,
            env=env1)

        tr0_out, tr0_err = tr0_proc.communicate()
        tr1_out, tr1_err = tr1_proc.communicate()

        # close trainer file
        tr0_pipe.close()
        tr1_pipe.close()

        # print log
        sys.stderr.write('trainer 0 stderr: %s\n' % tr0_err)
        sys.stderr.write('trainer 1 stderr: %s\n' % tr1_err)

W
Wu Yi 已提交
557
        return pickle.loads(tr0_out), pickle.loads(tr1_out)
558 559 560 561 562 563 564 565 566 567 568 569

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={}):
        # TODO(typhoonzero): should auto adapt GPU count on the machine.
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_fraction_of_gpu_memory_to_use": "0.15",
570
            "FLAGS_rpc_deadline": "5000",  # 5sec to fail fast
571
            "FLAGS_cudnn_deterministic": "1",
W
Wu Yi 已提交
572 573
            "http_proxy": "",
            "NCCL_P2P_DISABLE": "1"
574 575 576 577 578
        }

        required_envs.update(need_envs)

        if check_error_log:
W
Wu Yi 已提交
579
            required_envs["GLOG_v"] = "3"
580 581 582 583
            required_envs["GLOG_logtostderr"] = "1"

        local_losses\
            = self._run_local(model_file, required_envs,
W
Wu Yi 已提交
584
                                check_error_log)
W
Wu Yi 已提交
585
        if self._nccl2_mode:
W
Wu Yi 已提交
586 587 588 589 590 591
            if self._nccl2_reduce_layer:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, True, check_error_log)
            else:
                tr0_losses, tr1_losses = self._run_cluster_nccl2(
                    model_file, required_envs, False, check_error_log)
W
Wu Yi 已提交
592 593 594
        else:
            tr0_losses, tr1_losses = self._run_cluster(
                model_file, required_envs, check_error_log)
595 596

        for step_id in range(RUN_STEP):
W
Wu Yi 已提交
597 598 599 600 601 602
            local_loss = local_losses[step_id]
            tr0_loss = tr0_losses[step_id]
            tr1_loss = tr1_losses[step_id]
            dist_loss = (np.array([tr0_loss]) + np.array([tr1_loss])) / 2
            print("=======", local_loss, ":", dist_loss[0], "=======")
            self.assertAlmostEqual(local_loss, dist_loss[0], delta=delta)