pipeline_optimizer.py 11.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

14
import paddle.fluid as fluid
15 16
from paddle.fluid.optimizer import PipelineOptimizer as PO
from .meta_optimizer_base import MetaOptimizerBase
17
from .common import CollectiveHelper, OP_ROLE_KEY, OP_ROLE_VAR_KEY, OpRole, is_backward_op, is_loss_grad_op
18

19 20
__all__ = []

21

22
class PipelineOptimizer(MetaOptimizerBase):
23

24 25 26
    def __init__(self, optimizer):
        super(PipelineOptimizer, self).__init__(optimizer)
        self.inner_opt = optimizer
27 28 29 30
        self.meta_optimizers_white_list = [
            "RecomputeOptimizer",
            "AMPOptimizer",
        ]
31 32 33
        self.meta_optimizers_black_list = [
            "GraphExecutionOptimizer",
        ]
34 35 36
        self.global_ring_id = 1
        self.dp_ring_id = 2
        self.start_pipeline_ring_id = 20  # Just a magic number
37 38 39

    def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
                        user_defined_strategy):
40 41 42
        super(PipelineOptimizer,
              self)._set_basic_info(loss, role_maker, user_defined_optimizer,
                                    user_defined_strategy)
43 44
        self.micro_batch_size = user_defined_strategy.pipeline_configs[
            'micro_batch_size']
45
        self.num_microbatches = user_defined_strategy.pipeline_configs[
46
            'accumulate_steps']
47 48
        self.schedule_mode = user_defined_strategy.pipeline_configs[
            'schedule_mode']
49
        self.use_sharding = user_defined_strategy.sharding
50 51

    def _can_apply(self):
52 53 54
        if not self.role_maker._is_collective:
            return False

55 56 57 58
        # FIXME revise for hybrid parallelism
        if self.use_sharding:
            return False

59 60 61 62 63 64
        if self.user_defined_strategy.pipeline == True:
            return True
        return False

    def _disable_strategy(self, dist_strategy):
        dist_strategy.pipeline = False
65 66 67 68 69
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
            "schedule_mode": "1F1B",
        }
70

71
    def _enable_strategy(self, dist_strategy, context):
72
        dist_strategy.pipeline = True
73 74 75
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
76
            "schedule_mode": "1F1B",
77
        }
78

79 80 81 82 83 84 85
    def _broadcast_params(self, ring_id):
        block = self.startup_program.global_block()
        param = None
        for param in block.iter_parameters():
            if param.is_distributed:
                continue

86 87 88 89 90 91 92 93
            block.append_op(type='c_broadcast',
                            inputs={'X': param},
                            outputs={'Out': param},
                            attrs={
                                'ring_id': ring_id,
                                'root': 0,
                                OP_ROLE_KEY: OpRole.Forward
                            })
94 95

        if not param: return  # no parameter on this device
96 97 98 99 100 101 102
        block.append_op(type='c_sync_comm_stream',
                        inputs={'X': param},
                        outputs={'Out': param},
                        attrs={
                            'ring_id': ring_id,
                            OP_ROLE_KEY: OpRole.Forward
                        })
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123

    def _get_process_group_info(self):
        # global ring info
        self.global_endpoints = self.endpoints
        self.global_rank = self.rank
        self.global_nranks = self.nranks

        # data parallel ring info
        if self.pipeline_num > 1:
            self.dp_rank = self.rank // self.inner_parallelism
            self.dp_nranks = self.nranks // self.inner_parallelism
            start_index = self.rank % self.inner_parallelism
            self.dp_endpoints = [
                self.endpoints[start_index + i * self.inner_parallelism]
                for i in range(self.pipeline_num)
            ]

    def _init_process_group(self, pipeline_pair, pipeline_ring_map):
        self._get_process_group_info()
        collective_helper = CollectiveHelper(self.role_maker, wait_port=False)
        # Create global ring for all gpus (ring_id = 0)
124 125 126 127 128 129
        collective_helper._init_communicator(self.startup_program,
                                             self.current_endpoint,
                                             self.global_endpoints,
                                             self.global_rank,
                                             self.global_ring_id, True,
                                             self.global_ring_id, True)
130 131 132 133 134 135 136 137 138 139 140
        # Create pipeline rings
        if self.inner_parallelism > 1:
            pipeline_id = self.rank // self.inner_parallelism
            start_index = pipeline_id * self.inner_parallelism
            for pair in pipeline_pair:
                pair_key = pair[0] * 1000 + pair[1]
                ring_id = pipeline_ring_map[pair_key]
                assert ring_id >= self.start_pipeline_ring_id
                first_node = pair[0] + start_index
                second_node = pair[1] + start_index
                if self.rank != first_node and self.rank != second_node:
141 142 143
                    collective_helper._init_communicator(
                        self.startup_program, None, None, None, None, False,
                        self.global_ring_id, True)
144 145 146 147 148 149
                    continue
                pipeline_endpoints = [
                    self.endpoints[first_node], self.endpoints[second_node]
                ]
                pipeline_rank = 0 if self.rank == first_node else 1
                pipeline_nranks = 2
150 151 152 153 154 155
                collective_helper._init_communicator(self.startup_program,
                                                     self.current_endpoint,
                                                     pipeline_endpoints,
                                                     pipeline_rank, ring_id,
                                                     False, self.global_ring_id,
                                                     True)
156 157 158 159 160 161 162 163

        # Create dp rings
        if self.pipeline_num > 1:
            collective_helper._init_communicator(
                self.startup_program, self.current_endpoint, self.dp_endpoints,
                self.dp_rank, self.dp_ring_id, True, self.global_ring_id, True)
            self._broadcast_params(self.dp_ring_id)

164 165 166 167 168
    def minimize_impl(self,
                      loss,
                      startup_program=None,
                      parameter_list=None,
                      no_grad_set=None):
169 170
        self.endpoints = self.role_maker._get_trainer_endpoints()
        self.current_endpoint = self.endpoints[self.role_maker._worker_index()]
171 172
        self.rank = self.role_maker._worker_index()
        self.nranks = self.role_maker._worker_num()
173

174 175 176 177 178 179 180 181 182 183 184 185 186
        self.wrapped_opt = PO(self.inner_opt,
                              num_microbatches=self.num_microbatches)
        orig_startup_program = startup_program if startup_program else fluid.default_startup_program(
        )
        block = loss.block
        program = block.program

        program._pipeline_opt = dict()
        program._pipeline_opt['local_rank'] = self.rank
        program._pipeline_opt['global_ring_id'] = self.global_ring_id
        program._pipeline_opt['ring_id'] = self.start_pipeline_ring_id
        program._pipeline_opt['micro_batch_size'] = self.micro_batch_size
        program._pipeline_opt['schedule_mode'] = self.schedule_mode
187
        program._pipeline_opt['use_sharding'] = False
188 189
        program._pipeline_opt['mp_degree'] = 1
        program._pipeline_opt['mp_rank'] = 0
190
        optimize_ops, params_grads, prog_list, pp_pair, ring_map = self.wrapped_opt.minimize(
191
            loss, startup_program, parameter_list, no_grad_set)
192 193 194
        self.startup_program = orig_startup_program._pipeline_opt[
            'startup_program']
        self.inner_parallelism = program._pipeline_opt['inner_parallelism']
195
        assert self.nranks % self.inner_parallelism == 0
196 197
        assert prog_list
        self.pipeline_num = len(self.endpoints) // self.inner_parallelism
198

199
        self._init_process_group(pp_pair, ring_map)
200

201 202 203 204
        self.main_program_list = prog_list
        self.main_program = program
        if self.pipeline_num > 1:
            self._transpile_main_program(loss)
205
        return optimize_ops, params_grads
206

207 208 209
    def _transpile_main_program(self, loss):
        self._insert_loss_grad_ops(loss, self.pipeline_num)
        self._insert_allreduce_ops(self.dp_ring_id)
210

211
    def _insert_loss_grad_ops(self, loss, pipeline_num):
212 213 214 215
        """
        In order to keep the learning rate consistent in different numbers of
        training workers, we scale the loss grad by the number of workers
        """
216
        block = self.main_program_list[-1].global_block()
217 218 219
        for idx, op in reversed(list(enumerate(block.ops))):
            if is_loss_grad_op(op):
                loss_grad_var = block.vars[op.output_arg_names[0]]
220 221 222 223 224 225 226 227
                block._insert_op(idx + 1,
                                 type='scale',
                                 inputs={'X': loss_grad_var},
                                 outputs={'Out': loss_grad_var},
                                 attrs={
                                     'scale': 1.0 / pipeline_num,
                                     OP_ROLE_KEY: OpRole.Backward
                                 })
228 229

    def _insert_allreduce_ops(self, ring_id):
230 231
        block = self.main_program._pipeline_opt['section_program'].global_block(
        )
232 233
        origin_block = self.main_program.global_block()
        grad = None
234
        processed_param_name = set()
235
        first_optimize_op_idx = None
236
        for idx, op in reversed(list(enumerate(block.ops))):
237 238 239 240
            if is_backward_op(op) and not first_optimize_op_idx:
                first_optimize_op_idx = idx + 1
                # no optimize phase
                if first_optimize_op_idx == len(block.ops): return
241
            if is_backward_op(op) and \
242
                    OP_ROLE_VAR_KEY in op.attr_names:
243 244 245 246
                op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY]
                if len(op_role_var) == 0:
                    continue
                assert len(op_role_var) % 2 == 0
247
                offset = 0
248
                for i in range(0, len(op_role_var), 2):
249
                    param_name = op_role_var[i]
250
                    param = block.vars[op_role_var[i]]
251 252
                    if param_name in processed_param_name: continue
                    processed_param_name.add(param_name)
253
                    grad_name = op_role_var[i + 1]
254
                    if 'MERGED' not in grad_name: grad_name += '@MERGED'
255
                    grad = block.vars[grad_name]
256 257 258 259
                    origin_param = origin_block.vars[op_role_var[i]]
                    if origin_param.is_distributed:
                        continue

260 261 262 263 264 265 266 267 268
                    block._insert_op(first_optimize_op_idx + offset,
                                     type='c_allreduce_sum',
                                     inputs={'X': grad},
                                     outputs={'Out': grad},
                                     attrs={
                                         'ring_id': ring_id,
                                         'use_calc_stream': True,
                                         OP_ROLE_KEY: OpRole.Optimize
                                     })