__init__.py 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

import logging

import paddle.fluid as fluid
import paddle.fluid.io as io
import paddle.fluid.transpiler.distribute_transpiler as dist_transpiler

T
tangwei12 已提交
20 21 22
from paddle.fluid.incubate.fleet.base.fleet_base import Fleet
from paddle.fluid.incubate.fleet.base.fleet_base import Mode
from paddle.fluid.incubate.fleet.base.fleet_base import DistributedOptimizer
23

24
from paddle.fluid import compiler
25

26 27
import os
import sys
28
import six
29 30 31


class LambConfig(object):
32
    def __init__(self):
33 34 35 36
        pass


class DistFCConfig(object):
37
    def __init__(self):
38
        pass
39 40


41 42 43
class Collective(Fleet):
    def __init__(self):
        super(Collective, self).__init__(Mode.COLLECTIVE)
T
tangwei12 已提交
44
        self._local_ip = 0
45

46 47 48 49
        self.startup_program = None
        self._origin_program = None
        self.main_program = None

T
tangwei12 已提交
50
    def init_worker(self):
51 52 53
        logging.warn(
            "You should not call 'init_worker' method for collective mode.")

T
tangwei12 已提交
54
    def run_worker(self, main_programs=None, scopes=None):
55 56 57
        logging.warn(
            "You should not call 'run_worker' method for collective mode.")

T
tangwei12 已提交
58
    def init_server(self, model_dir=None):
59 60 61
        logging.warn(
            "You should not call 'init_server' method for collective mode.")

T
tangwei12 已提交
62
    def run_server(self):
63 64 65 66 67 68 69 70
        logging.warn(
            "You should not call 'run_server' method for collective mode.")

    def stop_worker(self):
        logging.warn(
            "You should not call 'stop_worker' method for collective mode.")

    def distributed_optimizer(self, optimizer, strategy=None):
71
        self._optimizer = \
72
            CollectiveOptimizer(optimizer, strategy)
T
tangwei12 已提交
73
        return self._optimizer
74 75

    def save_inference_model(self,
76
                             executor,
77 78 79 80 81 82
                             dirname,
                             feeded_var_names=None,
                             target_vars=None,
                             main_program=None,
                             export_for_deployment=True):
        io.save_inference_model(dirname, feeded_var_names, target_vars,
83
                                executor, main_program, None, None,
84 85
                                export_for_deployment)

86
    def save_persistables(self, executor, dirname, main_program=None):
87 88
        io.save_persistables(executor, dirname, main_program, None)

89 90 91 92

fleet = Collective()


93 94 95 96 97 98 99 100 101 102 103 104 105
class DistributedStrategy(fluid.BuildStrategy):
    """
    Init function of DistributedStrategy
    """

    def __init__(self):
        super(DistributedStrategy, self).__init__()
        self.use_local_sgd = False
        self.use_dist_fc = False

        self.dist_fc_config = None  # DistFCConfig
        self.mode = "nccl2"  # or collective
        self.collective_mode = None  # local_sgd or grad_allreduce
G
gongweibao 已提交
106
        self.nccl_comm_num = 1
107 108 109

        self.exec_strategy = fluid.ExecutionStrategy()

110 111 112
        # configurations below are used for unit test
        self._ut4grad_allreduce = False

113

114 115
class CollectiveOpBasedOptimizer(DistributedOptimizer):
    """
116 117
    Collective Operator Base Class For Distributed Optimizer
    The class is invisible to a user
118 119 120
    """

    def __init__(self, optimizer, strategy=None):
121 122 123
        assert isinstance(
            strategy,
            DistributedStrategy), "strategy must be DistributedStrategy"
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
        super(CollectiveOpBasedOptimizer, self).__init__(optimizer, strategy)

    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        return self._optimizer.backward(loss, startup_program, parameter_list,
                                        no_grad_set, callbacks)

    def apply_gradients(self, params_grads):
        return self._optimizer.apply_gradients(params_grads)


139 140 141 142 143 144 145 146 147 148 149
class CollectiveOptimizer(DistributedOptimizer):
    """
    DistributedOptimizer is a wrapper for paddle.fluid.optimizer
    A user should pass a paddle.fluid.optimizer to DistributedOptimizer
    minimize() function is implemented.
    DistributedOptimizer is the starting point for a user who wants to
    run distributed training. The optimized information will be stored in
    Fleet() instance who holds the global information about current distributed
    training.
    """

150
    def __init__(self, optimizer, strategy=DistributedStrategy()):
151
        super(CollectiveOptimizer, self).__init__(optimizer, strategy)
152
        self.print_config = False
153 154 155 156 157 158 159 160 161 162 163 164 165

    def backward(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None,
                 callbacks=None):
        return self._optimizer.backward(loss, startup_program, parameter_list,
                                        no_grad_set, callbacks)

    def apply_gradients(self, params_grads):
        return self._optimizer.apply_gradients(params_grads)

166
    def _check_condition(self, name, **kwargs):
167
        for k, v in six.iteritems(kwargs):
168 169 170 171 172 173 174 175
            if v is True:
                assert False, "you can't use %s and %s together" % (name, k)

    def _check_collective_mode(self, main_program, optimizer, strategy):
        """
        Check the conflict condtions.
        """
        if strategy.use_local_sgd:
176 177
            strategy.mode = "collective"
            strategy.collective_mode = "local_sgd"
178 179 180 181 182 183 184 185 186 187 188 189 190 191
            self._check_condition(
                "use_local_sgd",
                use_dgc=main_program._enable_dgc,
                use_dist_fc=strategy.use_dist_fc,
                use_lamb=main_program._use_lamb)

        if strategy.use_dist_fc:
            self._check_condition(
                "use_dist_fc",
                use_dgc=main_program._enable_dgc,
                use_local_sgd=strategy.use_local_sgd,
                use_lamb=main_program._use_lamb)
            assert strategy.dist_fc_config is not None, "DistributedStrategy.dist_fc_config should be set"

192 193 194 195 196 197 198 199
        if strategy._ut4grad_allreduce:
            strategy.mode = "collective"
            strategy.collective_mode = "grad_allreduce"
            self._check_condition(
                "_ut4grad_allreduce",
                use_dgc=main_program._enable_dgc,
                use_lamb=main_program._use_lamb)

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
        if self._strategy.collective_mode=="local_sgd" \
                or self._strategy.collective_mode == "grad_allreduce":
            assert self._strategy.mode == "collective", \
                "local_sgd and grad_allreduce can be used under collective mode"

    def _transpile(self, startup_program, main_program):
        """
        Transpile the programs to distributed programs. And add the variables.
        """
        worker_endpoints = fleet.worker_endpoints()
        trainer_id = fleet.worker_index()
        current_endpoint = fleet.worker_endpoints()[trainer_id]
        worker_endpoints_env = ','.join(worker_endpoints)
        trainers_num = fleet.worker_num()

        if self.print_config:
            print("worker_endpoints:{} trainers_num:{} current_endpoint:{} \
                  trainer_id:{}".format(worker_endpoints, trainers_num,
                                        current_endpoint, trainer_id))

        # call transpiler
        config = dist_transpiler.DistributeTranspilerConfig()
        config.mode = self._strategy.mode
        config.collective_mode = self._strategy.collective_mode

        config.nccl_comm_num = self._strategy.nccl_comm_num
        config.use_hierarchical_allreduce = self._strategy.use_hierarchical_allreduce
        config.hierarchical_allreduce_inter_nranks = self._strategy.hierarchical_allreduce_inter_nranks

        t = dist_transpiler.DistributeTranspiler(config=config)
        t.transpile(
            trainer_id=trainer_id,
            trainers=worker_endpoints_env,
            startup_program=startup_program,
            program=main_program,
            current_endpoint=current_endpoint)

G
gongweibao 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
    def _get_node_ips_from_endpoints(self, endpoints):
        ss = set()
        ips = []
        for ep in endpoints:
            ip = ep.split(":")[0].strip()
            if ip not in ss:
                ss.add(ip)
                ips.append(ip)
            else:
                continue

        return ips

    def _node_num(self):
        worker_endpoints = fleet.worker_endpoints()
        current_endpoint = fleet.worker_endpoints()[fleet.worker_index()]
        worker_endpoints_env = ','.join(worker_endpoints)

        node_ips = self._get_node_ips_from_endpoints(worker_endpoints)
        node_ip = current_endpoint.split(":")[0].strip()

        node_num = len(node_ips)

        return node_num

262
    def _try_to_compile(self, startup_program, main_program):
G
gongweibao 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
        node_num = self._node_num()
        assert node_num >= 1, "nccl2 node_num must >= 1, now:{}" % node_num

        self._strategy.fuse_all_reduce_ops = True
        exec_strategy = self._strategy.exec_strategy

        if node_num <= 1:
            if self._strategy.nccl_comm_num > 1:
                logging.warn("set nccl_comm_num=1 since you only have 1 node.")
            self._strategy.nccl_comm_num = 1

            if self._strategy.use_hierarchical_allreduce:
                logging.warn(
                    "set use_hierarchical_allreduce=False since you only have 1 node."
                )
            self._strategy.use_hierarchical_allreduce = False

        sync_allreduce = os.getenv("FLAGS_sync_nccl_allreduce")
        if sync_allreduce is None or sync_allreduce == "1":
            exec_strategy.num_threads = self._strategy.nccl_comm_num + 1
            if self._strategy.use_hierarchical_allreduce:
                exec_strategy.num_threads = 2 * self._strategy.nccl_comm_num + 1
            if exec_strategy.num_threads > 4:
                logging.warn(
                    "if you use use_hierarchical_allreduce or "
                    "with multi nccl comm, please export FLAGS_sync_nccl_allreduce = 0"
                )

        if self.print_config:
            print("node_num:", node_num, "num_threads:",
                  exec_strategy.num_threads, "use_hierarchical_allreduce:",
                  self._strategy.use_hierarchical_allreduce, "nccl_comm_num:",
                  self._strategy.nccl_comm_num, "FLAGS_sync_nccl_allreduce:",
                  sync_allreduce)

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
        self._transpile(startup_program, main_program)

        if self._strategy.mode == "collective":
            return main_program

        self._strategy.num_trainers = fleet.worker_num()
        self._strategy.trainer_id = fleet.worker_index()
        self._strategy.trainers_endpoints = fleet.worker_endpoints()
        self._strategy.enable_backward_optimizer_op_deps = True

        self._compiled_program = compiler.CompiledProgram(main_program)

        self._compiled_program.with_data_parallel(
            loss_name=self._loss.name,
            build_strategy=self._strategy,
            exec_strategy=self._strategy.exec_strategy,
            share_vars_from=None)

        return self._compiled_program

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        """
        minimize a program through loss
        Args:
            loss (Variable|Variable List): loss variable or loss variable list to run optimization.
            startup_program (Program): startup_program for initializing parameters
                in `parameter_list`.
            parameter_list (list): list of Variables to update.
            no_grad_set (set|None): set of Variables should be ignored.
        Returns:
            tuple: (optimize_ops, params_grads) which are, list of operators appended;
            and list of (param, grad) Variables pair for optimization.
        Note that in parameter server mode, a worker will not get anything about optimize_os
        Because optmizer algorithms run on pserver side. We will make this usable in pserver
        process, but currently the optimization part is written into Fleet(). A user does not
        need to care about how to startup a pserver node.
        """
339 340 341 342
        main_program = loss.block.program
        if startup_program is None:
            startup_program = fluid.default_startup_program()
        fleet.startup_program = startup_program
343

344
        self._loss = loss
345

346 347
        self._check_collective_mode(main_program, self._optimizer,
                                    self._strategy)
348

349 350 351 352 353
        optimize_ops, param_grads = self._optimizer.minimize(
            loss, startup_program, parameter_list, no_grad_set)

        fleet._origin_program = main_program
        fleet.main_program = self._try_to_compile(startup_program, main_program)
354 355

        return optimize_ops, param_grads