pipeline_optimizer.py 11.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

14
from __future__ import print_function
15
from __future__ import division
16 17 18 19

import paddle.fluid as fluid
from paddle.fluid import core, unique_name
from ..base.private_helper_function import wait_server_ready
20 21
from paddle.fluid.optimizer import PipelineOptimizer as PO
from .meta_optimizer_base import MetaOptimizerBase
22
from .common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY, CollectiveHelper, is_loss_grad_op, is_backward_op, is_optimizer_op
23 24


25 26 27 28
class PipelineOptimizer(MetaOptimizerBase):
    def __init__(self, optimizer):
        super(PipelineOptimizer, self).__init__(optimizer)
        self.inner_opt = optimizer
29 30 31 32
        self.meta_optimizers_white_list = [
            "RecomputeOptimizer",
            "AMPOptimizer",
        ]
33
        self.meta_optimizers_black_list = ["GraphExecutionOptimizer", ]
34 35 36
        self.global_ring_id = 1
        self.dp_ring_id = 2
        self.start_pipeline_ring_id = 20  # Just a magic number
37 38 39 40 41

    def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
                        user_defined_strategy):
        super(PipelineOptimizer, self)._set_basic_info(
            loss, role_maker, user_defined_optimizer, user_defined_strategy)
42 43
        self.micro_batch_size = user_defined_strategy.pipeline_configs[
            'micro_batch_size']
44
        self.num_microbatches = user_defined_strategy.pipeline_configs[
45
            'accumulate_steps']
46 47
        self.schedule_mode = user_defined_strategy.pipeline_configs[
            'schedule_mode']
48 49

    def _can_apply(self):
50 51 52
        if not self.role_maker._is_collective:
            return False

53 54 55 56 57 58
        if self.user_defined_strategy.pipeline == True:
            return True
        return False

    def _disable_strategy(self, dist_strategy):
        dist_strategy.pipeline = False
59 60 61 62 63
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
            "schedule_mode": "1F1B",
        }
64

65
    def _enable_strategy(self, dist_strategy, context):
66
        dist_strategy.pipeline = True
67 68 69
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
70
            "schedule_mode": "1F1B",
71
        }
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    def _broadcast_params(self, ring_id):
        block = self.startup_program.global_block()
        param = None
        for param in block.iter_parameters():
            if param.is_distributed:
                continue

            block.append_op(
                type='c_broadcast',
                inputs={'X': param},
                outputs={'Out': param},
                attrs={
                    'ring_id': ring_id,
                    'root': 0,
                    OP_ROLE_KEY: OpRole.Forward
                })

        if not param: return  # no parameter on this device
        block.append_op(
            type='c_sync_comm_stream',
            inputs={'X': param},
            outputs={'Out': param},
            attrs={'ring_id': ring_id,
                   OP_ROLE_KEY: OpRole.Forward})

    def _get_process_group_info(self):
        # global ring info
        self.global_endpoints = self.endpoints
        self.global_rank = self.rank
        self.global_nranks = self.nranks

        # data parallel ring info
        if self.pipeline_num > 1:
            self.dp_rank = self.rank // self.inner_parallelism
            self.dp_nranks = self.nranks // self.inner_parallelism
            start_index = self.rank % self.inner_parallelism
            self.dp_endpoints = [
                self.endpoints[start_index + i * self.inner_parallelism]
                for i in range(self.pipeline_num)
            ]

    def _init_process_group(self, pipeline_pair, pipeline_ring_map):
        self._get_process_group_info()
        collective_helper = CollectiveHelper(self.role_maker, wait_port=False)
        # Create global ring for all gpus (ring_id = 0)
        collective_helper._init_communicator(
            self.startup_program, self.current_endpoint, self.global_endpoints,
            self.global_rank, self.global_ring_id, True, self.global_ring_id,
            True)
        # Create pipeline rings
        if self.inner_parallelism > 1:
            pipeline_id = self.rank // self.inner_parallelism
            start_index = pipeline_id * self.inner_parallelism
            for pair in pipeline_pair:
                pair_key = pair[0] * 1000 + pair[1]
                ring_id = pipeline_ring_map[pair_key]
                assert ring_id >= self.start_pipeline_ring_id
                first_node = pair[0] + start_index
                second_node = pair[1] + start_index
                if self.rank != first_node and self.rank != second_node:
                    continue
                pipeline_endpoints = [
                    self.endpoints[first_node], self.endpoints[second_node]
                ]
                pipeline_rank = 0 if self.rank == first_node else 1
                pipeline_nranks = 2
                collective_helper._init_communicator(
                    self.startup_program, self.current_endpoint,
                    pipeline_endpoints, pipeline_rank, ring_id, False,
                    self.global_ring_id, True)

        # Create dp rings
        if self.pipeline_num > 1:
            collective_helper._init_communicator(
                self.startup_program, self.current_endpoint, self.dp_endpoints,
                self.dp_rank, self.dp_ring_id, True, self.global_ring_id, True)
            self._broadcast_params(self.dp_ring_id)

151 152 153 154 155
    def minimize_impl(self,
                      loss,
                      startup_program=None,
                      parameter_list=None,
                      no_grad_set=None):
156 157
        self.endpoints = self.role_maker._get_trainer_endpoints()
        self.current_endpoint = self.endpoints[self.role_maker._worker_index()]
158 159
        self.rank = self.role_maker._worker_index()
        self.nranks = self.role_maker._worker_num()
160

161 162 163 164 165 166 167 168 169 170 171 172 173 174
        self.wrapped_opt = PO(self.inner_opt,
                              num_microbatches=self.num_microbatches)
        orig_startup_program = startup_program if startup_program else fluid.default_startup_program(
        )
        block = loss.block
        program = block.program

        program._pipeline_opt = dict()
        program._pipeline_opt['local_rank'] = self.rank
        program._pipeline_opt['global_ring_id'] = self.global_ring_id
        program._pipeline_opt['ring_id'] = self.start_pipeline_ring_id
        program._pipeline_opt['micro_batch_size'] = self.micro_batch_size
        program._pipeline_opt['schedule_mode'] = self.schedule_mode
        optimize_ops, params_grads, prog_list, pp_pair, ring_map = self.wrapped_opt.minimize(
175
            loss, startup_program, parameter_list, no_grad_set)
176 177 178
        self.startup_program = orig_startup_program._pipeline_opt[
            'startup_program']
        self.inner_parallelism = program._pipeline_opt['inner_parallelism']
179
        assert self.nranks % self.inner_parallelism == 0
180 181
        assert prog_list
        self.pipeline_num = len(self.endpoints) // self.inner_parallelism
182

183
        self._init_process_group(pp_pair, ring_map)
184

185 186 187 188
        self.main_program_list = prog_list
        self.main_program = program
        if self.pipeline_num > 1:
            self._transpile_main_program(loss)
189
        return optimize_ops, params_grads
190

191 192 193
    def _transpile_main_program(self, loss):
        self._insert_loss_grad_ops(loss, self.pipeline_num)
        self._insert_allreduce_ops(self.dp_ring_id)
194

195
    def _insert_loss_grad_ops(self, loss, pipeline_num):
196 197 198 199
        """
        In order to keep the learning rate consistent in different numbers of
        training workers, we scale the loss grad by the number of workers
        """
200
        block = self.main_program_list[-1].global_block()
201 202 203 204 205 206 207 208 209
        for idx, op in reversed(list(enumerate(block.ops))):
            if is_loss_grad_op(op):
                loss_grad_var = block.vars[op.output_arg_names[0]]
                block._insert_op(
                    idx + 1,
                    type='scale',
                    inputs={'X': loss_grad_var},
                    outputs={'Out': loss_grad_var},
                    attrs={
210
                        'scale': 1.0 / pipeline_num,
211 212 213 214
                        OP_ROLE_KEY: OpRole.Backward
                    })

    def _insert_allreduce_ops(self, ring_id):
215 216
        block = self.main_program._pipeline_opt['section_program'].global_block(
        )
217 218
        origin_block = self.main_program.global_block()
        grad = None
219
        processed_param_name = set()
220 221
        first_optimize_op_idx = None
        add_sync_calc_stream = False
222
        for idx, op in reversed(list(enumerate(block.ops))):
223 224 225 226
            if is_backward_op(op) and not first_optimize_op_idx:
                first_optimize_op_idx = idx + 1
                # no optimize phase
                if first_optimize_op_idx == len(block.ops): return
227
            if is_backward_op(op) and \
228
                    OP_ROLE_VAR_KEY in op.attr_names:
229 230 231 232
                op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY]
                if len(op_role_var) == 0:
                    continue
                assert len(op_role_var) % 2 == 0
233
                offset = 0
234
                for i in range(0, len(op_role_var), 2):
235
                    param_name = op_role_var[i]
236
                    param = block.vars[op_role_var[i]]
237 238
                    if param_name in processed_param_name: continue
                    processed_param_name.add(param_name)
239 240 241
                    grad_name = op_role_var[i + 1]
                    if not 'MERGED' in grad_name: grad_name += '@MERGED'
                    grad = block.vars[grad_name]
242 243 244
                    origin_param = origin_block.vars[op_role_var[i]]
                    if origin_param.is_distributed:
                        continue
245 246
                    if not add_sync_calc_stream:
                        add_sync_calc_stream = True
247
                        block._insert_op(
248
                            first_optimize_op_idx + offset,
249 250 251
                            type='c_sync_calc_stream',
                            inputs={'X': grad},
                            outputs={'Out': grad},
252
                            attrs={OP_ROLE_KEY: OpRole.Optimize})
253 254 255
                        offset += 1

                    block._insert_op(
256
                        first_optimize_op_idx + offset,
257
                        type='c_allreduce_sum',
258 259 260 261
                        inputs={'X': grad},
                        outputs={'Out': grad},
                        attrs={
                            'ring_id': ring_id,
262 263
                            'use_calc_stream': True,
                            OP_ROLE_KEY: OpRole.Optimize
264
                        })