fleet_base.py 10.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import abc

from enum import Enum

from paddle.fluid.optimizer import SGD
T
tangwei12 已提交
22
from paddle.fluid.executor import Executor
23

T
tangwei12 已提交
24
from role_maker import RoleMakerBase
25 26 27 28 29
from role_maker import MPISymetricRoleMaker
from role_maker import UserDefinedRoleMaker


class Mode(Enum):
T
tangwei12 已提交
30 31 32
    """
    There are various mode for fleet, each of them is designed for different model.
    """
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
    TRANSPILER = 1,
    PSLIB = 2,
    COLLECTIVE = 3


class Fleet(object):
    """
    Fleet is the base class, transpiler and pslib are implementation of Fleet.

    Args:
        mode(Mode): the implementation of Fleet's mode.

    Returns:
        None
    """
    __metaclass__ = abc.ABCMeta

    def __init__(self, mode):
        assert isinstance(mode, Mode)
T
tangwei12 已提交
52 53 54 55 56
        self._is_initialized = False
        self._mode = mode
        self._optimizer = None
        self._role_maker = None
        self._executor = None
57 58 59 60 61 62 63 64 65

    def is_first_worker(self):
        """
        Check whether the node is the first instance of worker.

        Returns:
            bool: True if this is the first node of worker,
                  False if not.
        """
T
tangwei12 已提交
66
        return self._role_maker.is_first_worker()
67

T
tangwei12 已提交
68
    def worker_index(self):
69
        """
T
tangwei12 已提交
70
        Get current worker index.
71 72 73 74

        Returns:
            int: node id
        """
T
tangwei12 已提交
75
        return self._role_maker.worker_index()
76

T
tangwei12 已提交
77
    def worker_num(self):
78 79 80 81 82 83
        """
        Get current total worker number.

        Returns:
            int: worker number
        """
T
tangwei12 已提交
84
        return len(self._role_maker.get_trainer_endpoints())
85 86 87 88 89 90 91 92 93

    def is_worker(self):
        """
        Check whether the node is an instance of worker.

        Returns:
            bool: True if this is a node of worker,
                  False if not.
        """
T
tangwei12 已提交
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
        return self._role_maker.is_worker()

    def worker_endpoints(self, to_string=False):
        """
        Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].

        Returns:
            list/string: server endpoints
        """

        if to_string:
            return ",".join(self._role_maker.get_trainer_endpoints())
        else:
            return self._role_maker.get_trainer_endpoints()

    def server_num(self):
        """
        Get current total worker number.

        Returns:
            int: server number
        """
        return len(self._role_maker.get_pserver_endpoints())

    def server_index(self):
        """
        Get current server index.

        Returns:
            int: node id
        """
        return self._role_maker.server_index()

    def server_endpoints(self, to_string=False):
        """
        Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].

        Returns:
            list/string: server endpoints
        """

        if to_string:
            return ",".join(self._role_maker.get_pserver_endpoints())
        else:
            return self._role_maker.get_pserver_endpoints()
139 140 141 142 143 144 145 146 147

    def is_server(self):
        """
        Check whether the node is an instance of server.

        Returns:
            bool: True if this is a node of server,
                  False if not.
        """
T
tangwei12 已提交
148
        return self._role_maker.is_server()
149 150 151 152 153 154 155 156 157 158 159 160 161 162

    def split_files(self, files):
        """
        split files before distributed training,
        for example, files is [a, b, c ,d, e]  and trainer_num = 2,
        then trainer 0 gets [a, b, c] and trainer 1 gets [d, e]

        Args:
            files(list): file list need to be read.

        Returns:
            list: files belongs to this worker.
        """
        file_num = len(files)
T
tangwei12 已提交
163 164
        trainer_id = self.worker_index()
        trainer_num = self.worker_num()
165 166 167 168 169 170 171 172 173 174 175
        if trainer_num > file_num:
            raise ValueError("trainer_num should be <= file_num : "
                             "%s > %s" % (trainer_num, file_num))
        start = 0
        end = 0
        for i in range(0, trainer_id + 1):
            length = file_num / trainer_num + (i < (file_num % trainer_num))
            start = end
            end += length
        return files[start:end]

T
tangwei12 已提交
176
    def init(self, executor, role_maker=None):
177 178 179 180 181 182
        """
        should be called only once in user's python scripts,
        init() will initialize RoleMaker which is used for identifying
            current node's role, e.g. worker, server, etc.

        Args:
T
tangwei12 已提交
183
            executor(Executor): The executor to run fleet.
184 185 186 187 188
            role_maker(RoleMakerBase): subclass of RoleMakerBase.

        Returns:
            None
        """
T
tangwei12 已提交
189 190
        if not isinstance(executor, Executor):
            raise ValueError("executor must be an instance of Executor")
191 192 193 194 195

        if role_maker and not isinstance(role_maker, RoleMakerBase):
            raise ValueError("role_maker must be an instance of RoleMakerBase")

        if isinstance(role_maker, MPISymetricRoleMaker):
T
tangwei12 已提交
196 197
            self._role_maker = role_maker
            self._role_maker.generate_role()
198 199

        elif isinstance(role_maker, UserDefinedRoleMaker):
T
tangwei12 已提交
200
            self._role_maker = role_maker
201 202 203 204 205 206

        else:
            raise ValueError(
                "role_maker must be an instance of UserDefinedRoleMaker/MPISymetricRoleMaker"
            )

T
tangwei12 已提交
207
        self._is_initialized = True
208 209

    @abc.abstractmethod
T
tangwei12 已提交
210
    def init_worker(self):
211 212 213
        pass

    @abc.abstractmethod
T
tangwei12 已提交
214
    def init_server(self, model_dir=None):
215 216 217
        pass

    @abc.abstractmethod
T
tangwei12 已提交
218
    def run_server(self, ):
219 220 221 222 223 224 225
        pass

    @abc.abstractmethod
    def stop_worker(self):
        pass

    @abc.abstractmethod
T
tangwei12 已提交
226
    def stop(self):
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
        pass

    @abc.abstractmethod
    def distributed_optimizer(self, optimizer, strategy=None):
        pass

    @abc.abstractmethod
    def save_inference_model(self,
                             dirname,
                             feeded_var_names,
                             target_vars,
                             main_program=None,
                             export_for_deployment=True):
        pass

    @abc.abstractmethod
T
tangwei12 已提交
243
    def save_persistables(self, dirname, main_program=None):
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
        pass


class DistributedOptimizer(object):
    """
    DistributedOptimizer is a wrapper for paddle.fluid.optimizer
    A user should pass a paddle.fluid.optimizer to DistributedOptimizer
    minimize() function is implemented.
    DistributedOptimizer is the starting point for a user who wants to
    run distributed training. The optimized information will be stored in
    Fleet() instance who holds the global information about current distributed
    training.

    Args:
        optimizer(Optimizer): subclass of Optimizer.
T
tangwei12 已提交
259
        strategy(any): the user define config for Optimizer.
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327

    Returns:
        None

    """
    __metaclass__ = abc.ABCMeta

    def __init__(self, optimizer, strategy=None):
        if not isinstance(optimizer, SGD.__bases__):
            raise ValueError("optimizer must be an instance of Optimizer")

        self._optimizer = optimizer
        self._strategy = strategy

    @abc.abstractmethod
    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        """
        First part of `minimize`, do auto-diff to append backward ops for
        the current program.

        Args:
            loss (Variable): loss variable to run optimizations.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.
            callbacks (list|None): list of callables to run when appending backward
                operator for one parameter.

        Return:
            list: list of (param, grad) pair, grad is the output of backward.

        Examples:
            See examples in `apply_gradients`.
        """
        pass

    @abc.abstractmethod
    def apply_gradients(self, params_grads):
        """
        Second part of `minimize`, appending optimization operators for
        given `params_grads` pairs.

        Args:
            params_grads (list): list of (param, grad) pair to do optimization.

        Returns:
            list: A list of operators appended to the current program.

        Examples:
            .. code-block:: python

                loss = network()
                optimizer = fluid.optimizer.SGD(learning_rate=0.1)
                params_grads = optimizer.backward(loss)
                # you may append operations for params_grads here
                # ...
                optimizer.apply_gradients(params_grads)
        """
        pass

    @abc.abstractmethod
    def minimize(self,
T
tangwei12 已提交
328 329 330
                 losses,
                 scopes=None,
                 startup_programs=None,
331 332 333 334 335 336 337 338 339
                 parameter_list=None,
                 no_grad_set=None):
        """
        Add operations to minimize `loss` by updating `parameter_list`.

        This method combines interface `backward()` and
        `apply_gradients()` into one.

        Args:
T
tangwei12 已提交
340 341 342
            losses (Variable|Variable List): loss variable to run optimizations.
            scopes (Scope| Scope List): scope instance.
            startup_programs (Program|Program List): startup_program for initializing parameters
343 344 345 346 347 348 349 350 351
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.

        Returns:
            tuple: (optimize_ops, params_grads) which are, list of operators appended;
            and list of (param, grad) Variables pair for optimization.
        """
        pass