test_dist_fleet_base.py 16.2 KB
Newer Older
T
tangwei12 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
16
from paddle.distributed.fleet.utils.ps_util import DistributedInfer
C
Chengmo 已提交
17 18 19 20
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
21
import paddle
22 23 24
"""
    high level unit test for distribute fleet.
"""
25

T
tangwei12 已提交
26 27
import os
import sys
28
import subprocess
T
tangwei12 已提交
29

30 31 32
import six
import shutil
import numpy as np
33 34 35 36
import argparse
from contextlib import closing
import socket
import time
37
import tempfile
38
import unittest
T
tangwei12 已提交
39

40
import paddle
C
Chengmo 已提交
41
paddle.enable_static()
T
tangwei12 已提交
42

C
Chengmo 已提交
43 44
__all__ = ['FleetDistRunnerBase', 'TestFleetBase', 'runtime_main']

T
tangwei12 已提交
45 46
RUN_STEP = 5
LEARNING_RATE = 0.01
47
DIST_UT_PORT = 0
T
tangwei12 已提交
48 49 50


class FleetDistRunnerBase(object):
51 52 53 54 55 56
    """
        run_pserver,run_trainer : after init role, using transpiler split program
        net : implment by child class, the network of model
        do training : exe run program
    """

57 58 59
    def __init__(self):
        self._exe = None

60
    def build_role(self, args):
61

62 63
        if args.role.upper() == "PSERVER":
            role = role_maker.UserDefinedRoleMaker(
64
                is_collective=False,
65
                init_gloo=False,
66
                path=args.gloo_path,
67 68
                current_id=args.current_id,
                role=role_maker.Role.SERVER,
69
                worker_endpoints=args.trainer_endpoints.split(","),
70 71 72
                server_endpoints=args.endpoints.split(","))
        else:
            role = role_maker.UserDefinedRoleMaker(
73
                is_collective=False,
74
                init_gloo=False,
75
                path=args.gloo_path,
76 77
                current_id=args.current_id,
                role=role_maker.Role.WORKER,
78
                worker_endpoints=args.trainer_endpoints.split(","),
79
                server_endpoints=args.endpoints.split(","))
80
        self.role = role
81 82 83
        return role

    def build_strategy(self, args):
84 85 86 87
        if args.mode == "sync":
            self.strategy = paddle.distributed.fleet.DistributedStrategy()
            self.strategy.a_sync = False
        elif args.mode == "async":
88 89
            self.strategy = paddle.distributed.fleet.DistributedStrategy()
            self.strategy.a_sync = True
1
123malin 已提交
90
        elif args.mode == "geo":
91 92 93 94 95
            self.strategy = paddle.distributed.fleet.DistributedStrategy()
            self.strategy.a_sync = True
            self.strategy.a_sync_configs = {
                "k_steps": args.geo_sgd_need_push_nums
            }
96 97 98 99
        elif args.mode == "auto":
            self.strategy = paddle.distributed.fleet.DistributedStrategy()
            self.strategy.auto = True

100 101 102 103
        self.dump_param = os.getenv("dump_param", "").split(",")
        self.dump_fields = os.getenv("dump_fields", "").split(",")
        self.dump_fields_path = os.getenv("dump_fields_path", "")
        debug = int(os.getenv("Debug", "0"))
104
        # TODO(update strategy to support dump params)
105
        if False:  # debug:
106 107 108 109 110 111
            self.strategy.set_debug_opt({
                "dump_param": self.dump_param,
                "dump_fields": self.dump_fields,
                "dump_fields_path": self.dump_fields_path
            })

1
123malin 已提交
112 113
        return self.strategy

114
    def build_optimizer(self, avg_cost, strategy):
C
Chengmo 已提交
115
        use_grad_clip = int(os.getenv('GRAD_CLIP', 0))
116
        grad_clip = None
C
Chengmo 已提交
117 118 119
        if use_grad_clip:
            # 1: clip_by_value; 2: clip_by_norm; 3:clip_by_global_norm
            if use_grad_clip == 1:
120
                grad_clip = paddle.nn.ClipGradByValue(min=-5.0, max=5.0)
C
Chengmo 已提交
121
            elif use_grad_clip == 2:
122
                grad_clip = paddle.nn.ClipGradByNorm(2.0)
C
Chengmo 已提交
123
            elif use_grad_clip == 3:
124
                grad_clip = paddle.nn.ClipGradByGlobalNorm(2.0)
C
Chengmo 已提交
125

C
Chengmo 已提交
126
        use_decay = int(os.getenv("USE_DECAY", "0"))
127
        if use_decay:
C
Chengmo 已提交
128 129
            scheduler = paddle.optimizer.lr.ExponentialDecay(
                learning_rate=LEARNING_RATE, gamma=0.999, verbose=True)
130
            optimizer = fluid.optimizer.SGD(scheduler, grad_clip=grad_clip)
C
Chengmo 已提交
131 132
            """
            # learning rate decay method before 2.0
133 134 135 136 137
            optimizer = fluid.optimizer.SGD(
                learning_rate=fluid.layers.exponential_decay(
                    learning_rate=LEARNING_RATE,
                    decay_steps=500,
                    decay_rate=0.969,
C
Chengmo 已提交
138 139
                    staircase=True)) 
            """
140
        else:
141
            optimizer = fluid.optimizer.SGD(LEARNING_RATE, grad_clip=grad_clip)
142
        optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
T
tangwei12 已提交
143 144
        optimizer.minimize(avg_cost)

145
    def run_pserver(self, args):
T
tangwei12 已提交
146 147 148
        fleet.init_server()
        fleet.run_server()

1
123malin 已提交
149 150 151 152 153
    def run_dataset_trainer(self, args):
        out = self.do_dataset_training(fleet)

    def run_pyreader_trainer(self, args):
        out = self.do_pyreader_training(fleet)
T
tangwei12 已提交
154

155
    def net(self, args, batch_size=4, lr=0.01):
T
tangwei12 已提交
156 157 158
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

159 160 161 162 163 164 165 166 167 168
    def get_executor(self):
        if self._exe is None:
            device_env = os.getenv("DEVICE", 'cpu')
            if device_env == 'cpu':
                device = fluid.CPUPlace()
            elif device_env == 'gpu':
                device = fluid.CUDAPlace(0)
            self._exe = fluid.Executor(device)
        return self._exe

1
123malin 已提交
169
    def do_dataset_training(self, fleet):
T
tangwei12 已提交
170
        raise NotImplementedError(
1
123malin 已提交
171 172 173 174 175
            "do_dataset_training should be implemented by child classes.")

    def do_pyreader_training(self, fleet):
        raise NotImplementedError(
            "do_pyreader_training should be implemented by child classes.")
T
tangwei12 已提交
176

T
tangwei12 已提交
177 178 179 180
    def do_distributed_testing(self, fleet):
        raise NotImplementedError(
            "do_distributed_testing should be implemented by child classes.")

T
tangwei12 已提交
181 182

class TestFleetBase(unittest.TestCase):
183 184 185 186 187
    """
        start_pserver,start_trainer : add start cmd to test
        run_cluster : using multi process to test distribute program
    """

T
tangwei12 已提交
188 189 190
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

191 192 193 194
    def tearDown(self):
        t = time.time() - self.startTime
        print('%s: %.3f' % (self.__class__.__name__, t))

T
tangwei12 已提交
195
    def setUp(self):
196 197
        self.startTime = time.time()

1
123malin 已提交
198 199
        self._mode = "sync"
        self._reader = "pyreader"
T
tangwei12 已提交
200 201
        self._trainers = 2
        self._pservers = 2
T
tangwei12 已提交
202
        self._need_test = 0
203
        self._model_dir = ""
T
tangwei12 已提交
204
        self._port_set = set()
205 206 207 208 209 210 211 212 213

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
214 215 216
            self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT + 2, DIST_UT_PORT + 3)
            DIST_UT_PORT += 4
217 218 219
        else:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
220 221
            self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())
222

T
tangwei12 已提交
223
        self._python_interp = sys.executable
224
        self._geo_sgd_need_push_nums = 5
C
Chengmo 已提交
225
        self._grad_clip_mode = 0
T
tangwei12 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
        self._setup_config()

    def _find_free_port(self):
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port

    def _start_pserver(self, cmd, required_envs):
        ps0_cmd, ps1_cmd = cmd.format(0), cmd.format(1)

244 245
        ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+")
        ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+")
T
tangwei12 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

        ps0_proc = subprocess.Popen(
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
        ps1_proc = subprocess.Popen(
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe

    def _start_trainer(self, cmd, required_envs):
        tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1)

262 263
        tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+")
        tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+")
T
tangwei12 已提交
264

265 266 267
        tr0_out = open(tempfile.gettempdir() + "/tr0_stdout.log", "wb+")
        tr1_out = open(tempfile.gettempdir() + "/tr1_stdout.log", "wb+")

T
tangwei12 已提交
268 269
        tr0_proc = subprocess.Popen(
            tr0_cmd.strip().split(" "),
270
            stdout=tr0_out,
T
tangwei12 已提交
271 272 273 274
            stderr=tr0_pipe,
            env=required_envs)
        tr1_proc = subprocess.Popen(
            tr1_cmd.strip().split(" "),
275
            stdout=tr1_out,
T
tangwei12 已提交
276 277 278 279 280 281
            stderr=tr1_pipe,
            env=required_envs)

        return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe

    def _run_cluster(self, model, envs):
282
        env = {'GRAD_CLIP': str(self._grad_clip_mode)}
283
        python_path = self._python_interp
284 285
        gloo_path = tempfile.mkdtemp()

286 287 288
        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            python_path += " -m coverage run --branch -p"
289
        env.update(envs)
T
tangwei12 已提交
290

T
tangwei12 已提交
291
        tr_cmd = "{0} {1} --role trainer --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --test {9}".format(
292 293
            python_path, model, self._ps_endpoints, self._tr_endpoints,
            self._trainers, self._mode, self._geo_sgd_need_push_nums,
T
tangwei12 已提交
294
            self._reader, gloo_path, self._need_test)
T
tangwei12 已提交
295

T
tangwei12 已提交
296
        ps_cmd = "{0} {1} --role pserver --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8} --test {9}".format(
297 298
            python_path, model, self._ps_endpoints, self._tr_endpoints,
            self._trainers, self._mode, self._geo_sgd_need_push_nums,
T
tangwei12 已提交
299
            self._reader, gloo_path, self._need_test)
300

301 302 303 304
        if self._model_dir:
            tr_cmd += " --model_dir {}".format(self._model_dir)
            ps_cmd += " --model_dir {}".format(self._model_dir)

T
tangwei12 已提交
305 306 307 308 309 310 311 312 313 314
        # Run dist train to compare with local results
        ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env)
        tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env)

        # Wait until trainer process terminate
        while True:
            stat0 = tr0.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
315

T
tangwei12 已提交
316 317 318 319 320 321 322 323 324
        while True:
            stat1 = tr1.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

        tr0_out, tr0_err = tr0.communicate()
        tr1_out, tr1_err = tr1.communicate()

325 326
        tr0_ret = tr0.returncode
        tr1_ret = tr0.returncode
C
Chengmo 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
        if tr0_ret != 0:
            print(
                "========================Error tr0_err begin==========================="
            )
            os.system("cat {}".format(tempfile.gettempdir() + "/tr0_err.log"))
            print(
                "========================Error tr0_err end==========================="
            )

        if tr1_ret != 0:
            print(
                "========================Error tr1_err begin==========================="
            )
            os.system("cat {}".format(tempfile.gettempdir() + "/tr1_err.log"))
            print(
                "========================Error tr1_err end==========================="
            )
344

T
tangwei12 已提交
345 346 347 348 349 350 351 352 353
        # close trainer file
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()

        ps0.terminate()
        ps1.terminate()

354
        shutil.rmtree(gloo_path)
C
Chengmo 已提交
355 356
        self.assertEqual(tr0_ret, 0, "something wrong in tr0, please check")
        self.assertEqual(tr1_ret, 0, "something wrong in tr1, please check")
T
tangwei12 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
        return 0, 0

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={}):
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_rpc_deadline": "5000",  # 5sec to fail fast
            "http_proxy": ""
        }

        required_envs.update(need_envs)

        if check_error_log:
            required_envs["GLOG_v"] = "3"
            required_envs["GLOG_logtostderr"] = "1"

        tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)


def runtime_main(test_class):
    parser = argparse.ArgumentParser(description='Run Fleet test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
386 387 388
    parser.add_argument(
        '--trainer_endpoints', type=str, required=False, default="")
    parser.add_argument('--gloo_path', type=str, required=False, default="")
T
tangwei12 已提交
389 390
    parser.add_argument('--current_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
1
123malin 已提交
391
    parser.add_argument('--mode', type=str, required=False, default='geo')
392 393
    parser.add_argument(
        '--geo_sgd_need_push_nums', type=int, required=False, default=2)
1
123malin 已提交
394
    parser.add_argument('--reader', type=str, required=False, default='dataset')
T
tangwei12 已提交
395
    parser.add_argument('--test', type=int, required=False, default=0)
396
    parser.add_argument('--model_dir', type=str, required=False, default="")
T
tangwei12 已提交
397 398 399
    args = parser.parse_args()

    model = test_class()
400
    role = model.build_role(args)
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416

    if args.test and args.model_dir != "":
        avg_cost = model.net(args, is_train=False)
        dist_infer = DistributedInfer()
        dist_infer.init_distributed_infer_env(
            exe=model.get_executor(),
            loss=model.avg_cost,
            role_maker=role,
            dirname=args.model_dir)
        if fleet.is_worker():
            with paddle.static.program_guard(
                    main_program=dist_infer.get_dist_infer_program()):
                model.do_distributed_testing(fleet)
                fleet.stop_worker()
                return

417 418 419 420
    fleet.init(role)
    strategy = model.build_strategy(args)
    avg_cost = model.net(args)
    model.build_optimizer(avg_cost, strategy)
421

T
tangwei12 已提交
422 423 424
    if args.role == "pserver":
        model.run_pserver(args)
    else:
1
123malin 已提交
425 426 427 428
        if args.reader == "dataset":
            model.run_dataset_trainer(args)
        else:
            model.run_pyreader_trainer(args)
T
tangwei12 已提交
429 430

        if args.test:
431 432 433
            test_origin_program = paddle.static.Program()
            test_startup_program = paddle.static.Program()
            with paddle.static.program_guard(
T
tangwei12 已提交
434 435
                    main_program=test_origin_program,
                    startup_program=test_startup_program):
436
                with paddle.utils.unique_name.guard():
T
tangwei12 已提交
437
                    avg_cost = model.net(args, is_train=False)
438 439 440 441 442 443
            dist_infer = DistributedInfer(
                main_program=test_origin_program,
                startup_program=test_startup_program)
            with paddle.static.program_guard(
                    main_program=dist_infer.get_dist_infer_program()):
                model.do_distributed_testing(fleet)
T
tangwei12 已提交
444
        fleet.stop_worker()