__init__.py 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

import logging

import paddle.fluid as fluid
import paddle.fluid.io as io
import paddle.fluid.transpiler.distribute_transpiler as dist_transpiler

T
tangwei12 已提交
20 21 22
from paddle.fluid.incubate.fleet.base.fleet_base import Fleet
from paddle.fluid.incubate.fleet.base.fleet_base import Mode
from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer
23 24


25 26 27 28 29 30 31 32 33 34 35 36
class DistributedStrategy(object):
    def __init__(self):
        # precision configs
        self.use_fp16 = False
        self.use_fp32 = True
        # algorithmic communication
        self.local_sgd = False
        self.dgc = False
        # communication topology configs
        self.h_allreduce = False

    def build(self):
37
        self.strategy_map = {}
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
        # make sure we set single precision config True
        if self.use_fp32 and self.use_fp16:
            self.use_fp16 = False
        # make sure we set single algorithmic communication True
        if self.local_sgd and self.dgc:
            self.local_sgd = False
        self.strategy_map["fp16"] = self.use_fp16
        self.strategy_map["fp32"] = self.use_fp32
        self.strategy_map["localsgd"] = self.local_sgd
        self.strategy_map["dgc"] = self.dgc
        self.strategy_map["h_allreduce"] = self.h_allreduce


class DistributedOptimizerFactory(object):
    def __init__(self):
53
        self.strategy_to_optimizer_map()
54 55 56

    def strategy_to_optimizer_map(self):
        pattern = {}
57 58 59
        pattern["fp16"] = ["FP16SGDOptimizer", "FP16LocalSGDOptimizer"]
        pattern["fp32"] = ["FP32SGDOptimizer", "FP32LocalSGDOptimizer"]
        pattern["localsgd"] = ["FP16LocalSGDOptimizer", "FP32LocalSGDOptimizer"]
60
        pattern["h_allreduce"] = [
61 62 63 64
            "FP32SGDOptimizer",
            "FP32LocalSGDOptimizer",
            "FP16SGDOptimizer",
            "FP16LocalSGDOptimizer",
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
        ]
        self.pattern = pattern

    def create_by_strategy(self, optimizer, strategy):
        if strategy == None:
            strategy = DistributedStrategy()
        strategy.build()
        strategy_list = []
        for key in strategy.strategy_map:
            if strategy.strategy_map[key]:
                strategy_list.append(self.pattern[key])
        classname = list(set.intersection(*map(set, strategy_list)))[0]
        return globals()[classname](optimizer, strategy)


80 81 82
class Collective(Fleet):
    def __init__(self):
        super(Collective, self).__init__(Mode.COLLECTIVE)
T
tangwei12 已提交
83
        self._local_ip = 0
84

T
tangwei12 已提交
85
    def init_worker(self):
86 87 88
        logging.warn(
            "You should not call 'init_worker' method for collective mode.")

T
tangwei12 已提交
89
    def run_worker(self, main_programs=None, scopes=None):
90 91 92
        logging.warn(
            "You should not call 'run_worker' method for collective mode.")

T
tangwei12 已提交
93
    def init_server(self, model_dir=None):
94 95 96
        logging.warn(
            "You should not call 'init_server' method for collective mode.")

T
tangwei12 已提交
97
    def run_server(self):
98 99 100 101 102 103 104 105
        logging.warn(
            "You should not call 'run_server' method for collective mode.")

    def stop_worker(self):
        logging.warn(
            "You should not call 'stop_worker' method for collective mode.")

    def distributed_optimizer(self, optimizer, strategy=None):
106 107
        optimizer_factory = DistributedOptimizerFactory()

108
        self._optimizer = \
109
            optimizer_factory.create_by_strategy(optimizer, strategy)
T
tangwei12 已提交
110
        return self._optimizer
111 112

    def save_inference_model(self,
113
                             executor,
114 115 116 117 118 119
                             dirname,
                             feeded_var_names=None,
                             target_vars=None,
                             main_program=None,
                             export_for_deployment=True):
        io.save_inference_model(dirname, feeded_var_names, target_vars,
T
tangwei12 已提交
120
                                self._executor, main_program, None, None,
121 122
                                export_for_deployment)

123
    def save_persistables(self, executor, dirname, main_program=None):
T
tangwei12 已提交
124
        io.save_persistables(self._executor, dirname, main_program, None)
125 126 127 128 129


fleet = Collective()


130 131
class CollectiveOpBasedOptimizer(DistributedOptimizer):
    """
132 133
    Collective Operator Base Class For Distributed Optimizer
    The class is invisible to a user
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
    """

    def __init__(self, optimizer, strategy=None):
        super(CollectiveOpBasedOptimizer, self).__init__(optimizer, strategy)

    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        return self._optimizer.backward(loss, startup_program, parameter_list,
                                        no_grad_set, callbacks)

    def apply_gradients(self, params_grads):
        return self._optimizer.apply_gradients(params_grads)


152
class FP16SGDOptimizer(CollectiveOpBasedOptimizer):
153
    """
154
    do all reduce within every minibatch
155 156
    """

157 158 159
    def __init__(self, optimizer, strategy=None):
        super(FP16SGDOptimizer, self).__init__(optimizer, strategy)

160 161 162 163 164 165 166 167
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        pass


168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
class FP32LocalSGDOptimizer(CollectiveOpBasedOptimizer):
    def __init__(self, optimizer, strategy=None):
        super(FP32LocalSGDOptimizer, self).__init__(optimizer, strategy)

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        opts, param_and_grads = self._optimizer.minimize(loss)
        config = fluid.DistributeTranspilerConfig()
        config.mode = 'collective'
        config.collective_mode = 'local_sgd'
        t = fluid.DistributeTranspiler(config=config)
        t.transpile(
            trainer_id=fleet.worker_index(),
            trainers=fleet.worker_endpoints(),
            current_endpoint=fleet.worker_endpoints()[fleet.worker_index()],
            startup_program=startup_program,
            program=loss.block.program)
        return opts, param_and_grads
189

190 191

class FP32SGDOptimizer(CollectiveOpBasedOptimizer):
192
    def __init__(self, optimizer, strategy=None):
193
        super(FP32SGDOptimizer, self).__init__(optimizer, strategy)
194 195 196 197 198 199

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
200 201 202 203 204 205 206 207 208 209 210 211 212
        opts, param_and_grads = self._optimizer.minimize(loss)
        config = fluid.DistributeTranspilerConfig()
        config.mode = 'collective'
        config.collective_mode = 'grad_allreduce'
        t = fluid.DistributeTranspiler(config=config)

        t.transpile(
            trainer_id=fleet.worker_index(),
            trainers=fleet.worker_endpoints(),
            current_endpoint=fleet.worker_endpoints()[fleet.worker_index()],
            startup_program=startup_program,
            program=loss.block.program)
        return opts, param_and_grads
213 214


215 216 217 218 219 220 221 222 223 224 225 226 227
class CollectiveOptimizer(DistributedOptimizer):
    """
    DistributedOptimizer is a wrapper for paddle.fluid.optimizer
    A user should pass a paddle.fluid.optimizer to DistributedOptimizer
    minimize() function is implemented.
    DistributedOptimizer is the starting point for a user who wants to
    run distributed training. The optimized information will be stored in
    Fleet() instance who holds the global information about current distributed
    training.
    """

    def __init__(self, optimizer, strategy=None):
        super(CollectiveOptimizer, self).__init__(optimizer, strategy)
228
        self.strategy = strategy
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        return self._optimizer.backward(loss, startup_program, parameter_list,
                                        no_grad_set, callbacks)

    def apply_gradients(self, params_grads):
        return self._optimizer.apply_gradients(params_grads)

    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        """
        minimize a program through loss
        Args:
            loss (Variable|Variable List): loss variable or loss variable list to run optimization.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.
        Returns:
            tuple: (optimize_ops, params_grads) which are, list of operators appended;
            and list of (param, grad) Variables pair for optimization.
        Note that in parameter server mode, a worker will not get anything about optimize_os
        Because optmizer algorithms run on pserver side. We will make this usable in pserver
        process, but currently the optimization part is written into Fleet(). A user does not
        need to care about how to startup a pserver node.
        """
        optimize_ops, param_grads = self._optimizer.minimize(
            loss, startup_program, parameter_list, no_grad_set)

T
tangwei12 已提交
266 267 268
        worker_endpoints = fleet.worker_endpoints()
        trainer_id = fleet.worker_index()
        current_endpoint = fleet.worker_endpoints()[trainer_id]
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

        startup_program = startup_program if startup_program else \
            fluid.framework.default_startup_program

        # call transpiler
        config = dist_transpiler.DistributeTranspilerConfig()
        config.mode = "nccl2"
        t = dist_transpiler.DistributeTranspiler(config=config)
        t.transpile(
            trainer_id,
            trainers=','.join(worker_endpoints),
            startup_program=startup_program,
            current_endpoint=current_endpoint)

        return optimize_ops, param_grads