fleet_base.py 10.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import abc

19
import paddle.fluid as fluid
T
tangwei12 已提交
20
from paddle.fluid.executor import Executor
21
from paddle.fluid.optimizer import SGD
22

23
from paddle.fluid.incubate.fleet.base.mode import Mode
T
tangwei12 已提交
24 25 26
from paddle.fluid.incubate.fleet.base.role_maker import MPISymetricRoleMaker
from paddle.fluid.incubate.fleet.base.role_maker import RoleMakerBase
from paddle.fluid.incubate.fleet.base.role_maker import UserDefinedRoleMaker
Z
Zhen Wang 已提交
27
from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision
28
from . import mode
29

30 31
__all__ = ['Fleet', 'DistributedOptimizer']
__all__ += mode.__all__
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46


class Fleet(object):
    """
    Fleet is the base class, transpiler and pslib are implementation of Fleet.

    Args:
        mode(Mode): the implementation of Fleet's mode.

    Returns:
        None
    """
    __metaclass__ = abc.ABCMeta

    def __init__(self, mode):
T
tangwei12 已提交
47 48 49 50 51
        self._is_initialized = False
        self._mode = mode
        self._optimizer = None
        self._role_maker = None
        self._executor = None
52 53 54 55 56 57 58 59 60

    def is_first_worker(self):
        """
        Check whether the node is the first instance of worker.

        Returns:
            bool: True if this is the first node of worker,
                  False if not.
        """
T
tangwei12 已提交
61
        return self._role_maker.is_first_worker()
62

T
tangwei12 已提交
63
    def worker_index(self):
64
        """
T
tangwei12 已提交
65
        Get current worker index.
66 67 68 69

        Returns:
            int: node id
        """
T
tangwei12 已提交
70
        return self._role_maker.worker_index()
71

T
tangwei12 已提交
72
    def worker_num(self):
73 74 75 76
        """
        Get current total worker number.

        Returns:
77
            int: worker numbers
78
        """
79
        return self._role_maker.worker_num()
80 81 82 83 84 85 86 87 88

    def is_worker(self):
        """
        Check whether the node is an instance of worker.

        Returns:
            bool: True if this is a node of worker,
                  False if not.
        """
T
tangwei12 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
        return self._role_maker.is_worker()

    def worker_endpoints(self, to_string=False):
        """
        Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].

        Returns:
            list/string: server endpoints
        """

        if to_string:
            return ",".join(self._role_maker.get_trainer_endpoints())
        else:
            return self._role_maker.get_trainer_endpoints()

    def server_num(self):
        """
        Get current total worker number.

        Returns:
            int: server number
        """
        return len(self._role_maker.get_pserver_endpoints())

    def server_index(self):
        """
        Get current server index.

        Returns:
            int: node id
        """
        return self._role_maker.server_index()

    def server_endpoints(self, to_string=False):
        """
        Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].

        Returns:
            list/string: server endpoints
        """

        if to_string:
            return ",".join(self._role_maker.get_pserver_endpoints())
        else:
            return self._role_maker.get_pserver_endpoints()
134 135 136 137 138 139 140 141 142

    def is_server(self):
        """
        Check whether the node is an instance of server.

        Returns:
            bool: True if this is a node of server,
                  False if not.
        """
T
tangwei12 已提交
143
        return self._role_maker.is_server()
144 145 146 147

    def split_files(self, files):
        """
        split files before distributed training,
148 149 150 151
        example 1: files is [a, b, c ,d, e]  and trainer_num = 2, then trainer
                   0 gets [a, b, c] and trainer 1 gets [d, e].
        example 2: files is [a, b], and trainer_num = 3, then trainer 0 gets
                   [a], trainer 1 gets [b],  trainer 2 gets []
152 153 154 155 156 157 158

        Args:
            files(list): file list need to be read.

        Returns:
            list: files belongs to this worker.
        """
159 160 161
        if not isinstance(files, list):
            raise TypeError("files should be a list of file need to be read.")

T
tangwei12 已提交
162
        trainer_id = self.worker_index()
T
tangwei12 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
        trainers = self.worker_num()

        remainder = len(files) % trainers
        blocksize = len(files) / trainers

        blocks = [blocksize] * trainers
        for i in range(remainder):
            blocks[i] += 1

        trainer_files = [[]] * trainers
        begin = 0
        for i in range(trainers):
            trainer_files[i] = files[begin:begin + blocks[i]]
            begin += blocks[i]

        return trainer_files[trainer_id]
179

180
    def init(self, role_maker=None):
181 182 183 184 185 186 187 188 189 190 191
        """
        should be called only once in user's python scripts,
        init() will initialize RoleMaker which is used for identifying
            current node's role, e.g. worker, server, etc.

        Args:
            role_maker(RoleMakerBase): subclass of RoleMakerBase.

        Returns:
            None
        """
192
        self._executor = Executor(fluid.CPUPlace())
193 194

        if role_maker and not isinstance(role_maker, RoleMakerBase):
195
            raise TypeError("role_maker must be an instance of RoleMakerBase")
196

197
        self._role_maker = role_maker
198
        self._role_maker.generate_role()
T
tangwei12 已提交
199
        self._is_initialized = True
200

X
xujiaqi01 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
    def all_reduce_worker(self, input, output):
        """
        all reduce between workers, only support array of one dim.

        Args:
            input(list|numpy.array): array of one dim
            output(list|numpy.array): array of one dim
        """
        self._role_maker.all_reduce_worker(input, output)

    def barrier_worker(self):
        """
        barrier between workers
        """
        self._role_maker.barrier_worker()

217
    @abc.abstractmethod
T
tangwei12 已提交
218
    def init_worker(self):
219 220 221
        pass

    @abc.abstractmethod
T
tangwei12 已提交
222
    def init_server(self, model_dir=None):
223 224 225
        pass

    @abc.abstractmethod
226
    def run_server(self):
227 228 229 230 231 232 233 234 235 236 237 238
        pass

    @abc.abstractmethod
    def stop_worker(self):
        pass

    @abc.abstractmethod
    def distributed_optimizer(self, optimizer, strategy=None):
        pass

    @abc.abstractmethod
    def save_inference_model(self,
239
                             executor,
240 241 242 243 244 245 246 247
                             dirname,
                             feeded_var_names,
                             target_vars,
                             main_program=None,
                             export_for_deployment=True):
        pass

    @abc.abstractmethod
248
    def save_persistables(self, executor, dirname, main_program=None):
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
        pass


class DistributedOptimizer(object):
    """
    DistributedOptimizer is a wrapper for paddle.fluid.optimizer
    A user should pass a paddle.fluid.optimizer to DistributedOptimizer
    minimize() function is implemented.
    DistributedOptimizer is the starting point for a user who wants to
    run distributed training. The optimized information will be stored in
    Fleet() instance who holds the global information about current distributed
    training.

    Args:
        optimizer(Optimizer): subclass of Optimizer.
T
tangwei12 已提交
264
        strategy(any): the user define config for Optimizer.
265 266 267 268 269 270 271 272

    Returns:
        None

    """
    __metaclass__ = abc.ABCMeta

    def __init__(self, optimizer, strategy=None):
G
gongweibao 已提交
273
        if not isinstance(optimizer, SGD.__bases__) \
274
                and not isinstance(optimizer, OptimizerWithMixedPrecision):
275
            raise TypeError("optimizer must be an instance of Optimizer")
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333

        self._optimizer = optimizer
        self._strategy = strategy

    @abc.abstractmethod
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        """
        First part of `minimize`, do auto-diff to append backward ops for
        the current program.

        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.
            callbacks (list|None): list of callables to run when appending backward
                operator for one parameter.

        Return:
            list: list of (param, grad) pair, grad is the output of backward.

        Examples:
            See examples in `apply_gradients`.
        """
        pass

    @abc.abstractmethod
    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                loss = network()
                optimizer = fluid.optimizer.SGD(learning_rate=0.1)
                params_grads = optimizer.backward(loss)
                # you may append operations for params_grads here
                # ...
                optimizer.apply_gradients(params_grads)
        """
        pass

    @abc.abstractmethod
    def minimize(self,
T
tangwei12 已提交
334 335 336
                 losses,
                 scopes=None,
                 startup_programs=None,
337 338 339 340 341 342 343 344 345
                 parameter_list=None,
                 no_grad_set=None):
        """
        Add operations to minimize `loss` by updating `parameter_list`.

        This method combines interface `backward()` and
        `apply_gradients()` into one.

        Args:
T
tangwei12 已提交
346 347 348
            losses (Variable|Variable List): loss variable to run optimizations.
            scopes (Scope| Scope List): scope instance.
            startup_programs (Program|Program List): startup_program for initializing parameters
349 350 351 352 353 354 355 356 357
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.

        Returns:
            tuple: (optimize_ops, params_grads) which are, list of operators appended;
            and list of (param, grad) Variables pair for optimization.
        """
        pass