test_dist_fleet_base.py 12.8 KB
Newer Older
T
tangwei12 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
16 17 18
"""
    high level unit test for distribute fleet.
"""
T
tangwei12 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
import argparse
import os
import pickle
import subprocess
import sys
import time
import traceback
import math
import collections
import socket
from contextlib import closing

import six
import unittest
import numpy as np
34
import tempfile
T
tangwei12 已提交
35 36 37 38 39

import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
1
123malin 已提交
40
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
T
tangwei12 已提交
41

C
Chengmo 已提交
42 43
__all__ = ['FleetDistRunnerBase', 'TestFleetBase', 'runtime_main']

T
tangwei12 已提交
44 45
RUN_STEP = 5
LEARNING_RATE = 0.01
46
DIST_UT_PORT = 0
T
tangwei12 已提交
47 48 49


class FleetDistRunnerBase(object):
50 51 52 53 54 55
    """
        run_pserver,run_trainer : after init role, using transpiler split program
        net : implment by child class, the network of model
        do training : exe run program
    """

1
123malin 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68
    def generate_strategy(self, args):
        self.strategy = None
        if args.mode == "async":
            self.strategy = StrategyFactory.create_async_strategy()
        elif args.mode == "sync":
            self.strategy = StrategyFactory.create_sync_strategy()
        elif args.mode == "half_async":
            self.strategy = StrategyFactory.create_half_async_strategy()
        elif args.mode == "geo":
            self.strategy = StrategyFactory.create_geo_strategy(
                args.geo_sgd_need_push_nums)
        return self.strategy

T
tangwei12 已提交
69 70 71 72 73 74 75 76 77 78 79 80
    def run_pserver(self, args):
        if args.role.upper() != "PSERVER":
            raise ValueError("args role must be PSERVER")

        role = role_maker.UserDefinedRoleMaker(
            current_id=args.current_id,
            role=role_maker.Role.SERVER,
            worker_num=args.trainers,
            server_endpoints=args.endpoints.split(","))

        fleet.init(role)

1
123malin 已提交
81
        strategy = self.generate_strategy(args)
T
tangwei12 已提交
82 83 84

        avg_cost = self.net()

C
Chengmo 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97
        use_grad_clip = int(os.getenv('GRAD_CLIP', 0))
        if use_grad_clip:
            # 1: clip_by_value; 2: clip_by_norm; 3:clip_by_global_norm
            if use_grad_clip == 1:
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByValue(2.0))
            elif use_grad_clip == 2:
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByNorm(2.0))
            elif use_grad_clip == 3:
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByGlobalNorm(2.0))

T
tangwei12 已提交
98 99 100 101 102 103 104
        optimizer = fluid.optimizer.SGD(LEARNING_RATE)
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(avg_cost)

        fleet.init_server()
        fleet.run_server()

1
123malin 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
    def run_dataset_trainer(self, args):
        if args.role.upper() != "TRAINER":
            raise ValueError("args role must be TRAINER")

        role = role_maker.UserDefinedRoleMaker(
            current_id=args.current_id,
            role=role_maker.Role.WORKER,
            worker_num=args.trainers,
            server_endpoints=args.endpoints.split(","))

        fleet.init(role)

        strategy = self.generate_strategy(args)

        avg_cost = self.net()
C
Chengmo 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133

        use_grad_clip = int(os.getenv('GRAD_CLIP', 0))
        if use_grad_clip:
            # 1: clip_by_value; 2: clip_by_norm; 3:clip_by_global_norm
            if use_grad_clip == 1:
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByValue(2.0))
            elif use_grad_clip == 2:
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByNorm(2.0))
            elif use_grad_clip == 3:
                fluid.clip.set_gradient_clip(
                    clip=fluid.clip.GradientClipByGlobalNorm(2.0))

1
123malin 已提交
134 135 136 137 138 139 140
        optimizer = fluid.optimizer.SGD(LEARNING_RATE)
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(avg_cost)

        out = self.do_dataset_training(fleet)

    def run_pyreader_trainer(self, args):
T
tangwei12 已提交
141 142 143 144 145 146 147 148 149 150 151
        if args.role.upper() != "TRAINER":
            raise ValueError("args role must be TRAINER")

        role = role_maker.UserDefinedRoleMaker(
            current_id=args.current_id,
            role=role_maker.Role.WORKER,
            worker_num=args.trainers,
            server_endpoints=args.endpoints.split(","))

        fleet.init(role)

1
123malin 已提交
152
        strategy = self.generate_strategy(args)
T
tangwei12 已提交
153 154 155

        avg_cost = self.net()

1
123malin 已提交
156 157 158 159 160 161
        self.reader = fluid.io.PyReader(
            feed_list=self.feeds,
            capacity=64,
            iterable=False,
            use_double_buffer=False)

T
tangwei12 已提交
162 163 164
        optimizer = fluid.optimizer.SGD(LEARNING_RATE)
        optimizer = fleet.distributed_optimizer(optimizer, strategy)
        optimizer.minimize(avg_cost)
165

1
123malin 已提交
166
        out = self.do_pyreader_training(fleet)
T
tangwei12 已提交
167 168 169 170 171

    def net(self, batch_size=4, lr=0.01):
        raise NotImplementedError(
            "get_model should be implemented by child classes.")

1
123malin 已提交
172
    def do_dataset_training(self, fleet):
T
tangwei12 已提交
173
        raise NotImplementedError(
1
123malin 已提交
174 175 176 177 178
            "do_dataset_training should be implemented by child classes.")

    def do_pyreader_training(self, fleet):
        raise NotImplementedError(
            "do_pyreader_training should be implemented by child classes.")
T
tangwei12 已提交
179 180 181


class TestFleetBase(unittest.TestCase):
182 183 184 185 186
    """
        start_pserver,start_trainer : add start cmd to test
        run_cluster : using multi process to test distribute program
    """

T
tangwei12 已提交
187 188 189 190
    def _setup_config(self):
        raise NotImplementedError("tests should have _setup_config implemented")

    def setUp(self):
1
123malin 已提交
191 192
        self._mode = "sync"
        self._reader = "pyreader"
T
tangwei12 已提交
193 194 195
        self._trainers = 2
        self._pservers = 2
        self._port_set = set()
196 197 198 199 200 201 202 203 204 205 206 207 208 209

        global DIST_UT_PORT
        if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
            DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))

        if DIST_UT_PORT:
            print("set begin_port:", DIST_UT_PORT)
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                DIST_UT_PORT, DIST_UT_PORT + 1)
            DIST_UT_PORT += 2
        else:
            self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
                self._find_free_port(), self._find_free_port())

T
tangwei12 已提交
210
        self._python_interp = sys.executable
211
        self._geo_sgd_need_push_nums = 5
C
Chengmo 已提交
212
        self._grad_clip_mode = 0
T
tangwei12 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
        self._setup_config()

    def _find_free_port(self):
        def __free_port():
            with closing(socket.socket(socket.AF_INET,
                                       socket.SOCK_STREAM)) as s:
                s.bind(('', 0))
                return s.getsockname()[1]

        while True:
            port = __free_port()
            if port not in self._port_set:
                self._port_set.add(port)
                return port

    def _start_pserver(self, cmd, required_envs):
        ps0_cmd, ps1_cmd = cmd.format(0), cmd.format(1)

231 232
        ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+")
        ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+")
T
tangwei12 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248

        ps0_proc = subprocess.Popen(
            ps0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps0_pipe,
            env=required_envs)
        ps1_proc = subprocess.Popen(
            ps1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=ps1_pipe,
            env=required_envs)
        return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe

    def _start_trainer(self, cmd, required_envs):
        tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1)

249 250
        tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+")
        tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+")
T
tangwei12 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

        tr0_proc = subprocess.Popen(
            tr0_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr0_pipe,
            env=required_envs)
        tr1_proc = subprocess.Popen(
            tr1_cmd.strip().split(" "),
            stdout=subprocess.PIPE,
            stderr=tr1_pipe,
            env=required_envs)

        return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe

    def _run_cluster(self, model, envs):
C
Chengmo 已提交
266
        env = {'CPU_NUM': '1', 'GRAD_CLIP': str(self._grad_clip_mode)}
267 268
        env.update(envs)

269 270 271 272 273
        python_path = self._python_interp

        if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
            envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
            python_path += " -m coverage run --branch -p"
T
tangwei12 已提交
274

1
123malin 已提交
275 276 277
        tr_cmd = "{0} {1} --role trainer --endpoints {2} --current_id {{}} --trainers {3} --mode {4} --geo_sgd_need_push_nums {5} --reader {6}".format(
            python_path, model, self._ps_endpoints, self._trainers, self._mode,
            self._geo_sgd_need_push_nums, self._reader)
T
tangwei12 已提交
278

1
123malin 已提交
279 280 281
        ps_cmd = "{0} {1} --role pserver --endpoints {2} --current_id {{}} --trainers {3} --mode {4} --geo_sgd_need_push_nums {5} --reader {6}".format(
            python_path, model, self._ps_endpoints, self._trainers, self._mode,
            self._geo_sgd_need_push_nums, self._reader)
282

T
tangwei12 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
        # Run dist train to compare with local results
        ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env)
        tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env)

        # Wait until trainer process terminate
        while True:
            stat0 = tr0.poll()
            time.sleep(0.1)
            if stat0 is not None:
                break
        while True:
            stat1 = tr1.poll()
            time.sleep(0.1)
            if stat1 is not None:
                break

        tr0_out, tr0_err = tr0.communicate()
        tr1_out, tr1_err = tr1.communicate()

        # close trainer file
        tr0_pipe.close()
        tr1_pipe.close()
        ps0_pipe.close()
        ps1_pipe.close()

        ps0.terminate()
        ps1.terminate()
G
guru4elephant 已提交
310
        '''
T
tangwei12 已提交
311 312 313 314
        with open("/tmp/tr0_out.log", "wb+") as wn:
            wn.write(tr0_out)
        with open("/tmp/tr1_out.log", "wb+") as wn:
            wn.write(tr1_out)
G
guru4elephant 已提交
315 316
        # print server log
        '''
T
tangwei12 已提交
317 318

        # print server log
G
guru4elephant 已提交
319
        '''
T
tangwei12 已提交
320 321 322 323
        with open("/tmp/ps0_err.log", "r") as fn:
            sys.stderr.write("ps0 stderr: %s\n" % fn.read())
        with open("/tmp/ps1_err.log", "r") as fn:
            sys.stderr.write("ps1 stderr: %s\n" % fn.read())
G
guru4elephant 已提交
324
        '''
T
tangwei12 已提交
325 326

        # print log
G
guru4elephant 已提交
327
        '''
T
tangwei12 已提交
328 329 330 331
        with open("/tmp/tr0_err.log", "r") as fn:
            sys.stderr.write('trainer 0 stderr: %s\n' % fn.read())
        with open("/tmp/tr1_err.log", "r") as fn:
            sys.stderr.write('trainer 1 stderr: %s\n' % fn.read())
G
guru4elephant 已提交
332
        '''
T
tangwei12 已提交
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364

        return 0, 0

    def check_with_place(self,
                         model_file,
                         delta=1e-3,
                         check_error_log=False,
                         need_envs={}):
        required_envs = {
            "PATH": os.getenv("PATH", ""),
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
            "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
            "FLAGS_rpc_deadline": "5000",  # 5sec to fail fast
            "http_proxy": ""
        }

        required_envs.update(need_envs)

        if check_error_log:
            required_envs["GLOG_v"] = "3"
            required_envs["GLOG_logtostderr"] = "1"

        tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)


def runtime_main(test_class):
    parser = argparse.ArgumentParser(description='Run Fleet test.')
    parser.add_argument(
        '--role', type=str, required=True, choices=['pserver', 'trainer'])
    parser.add_argument('--endpoints', type=str, required=False, default="")
    parser.add_argument('--current_id', type=int, required=False, default=0)
    parser.add_argument('--trainers', type=int, required=False, default=1)
1
123malin 已提交
365
    parser.add_argument('--mode', type=str, required=False, default='geo')
366 367
    parser.add_argument(
        '--geo_sgd_need_push_nums', type=int, required=False, default=2)
1
123malin 已提交
368
    parser.add_argument('--reader', type=str, required=False, default='dataset')
T
tangwei12 已提交
369 370 371 372 373 374
    args = parser.parse_args()

    model = test_class()
    if args.role == "pserver":
        model.run_pserver(args)
    else:
1
123malin 已提交
375 376 377 378
        if args.reader == "dataset":
            model.run_dataset_trainer(args)
        else:
            model.run_pyreader_trainer(args)