pipeline_optimizer.py 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

14
from __future__ import print_function
15
from __future__ import division
16
import os
17 18 19 20

import paddle.fluid as fluid
from paddle.fluid import core, unique_name
from ..base.private_helper_function import wait_server_ready
21 22
from paddle.fluid.optimizer import PipelineOptimizer as PO
from .meta_optimizer_base import MetaOptimizerBase
23
from .common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY, CollectiveHelper, is_loss_grad_op, is_backward_op, is_optimizer_op
24

25 26
__all__ = []

27

28
class PipelineOptimizer(MetaOptimizerBase):
29

30 31 32
    def __init__(self, optimizer):
        super(PipelineOptimizer, self).__init__(optimizer)
        self.inner_opt = optimizer
33 34 35 36
        self.meta_optimizers_white_list = [
            "RecomputeOptimizer",
            "AMPOptimizer",
        ]
37 38 39
        self.meta_optimizers_black_list = [
            "GraphExecutionOptimizer",
        ]
40 41 42
        self.global_ring_id = 1
        self.dp_ring_id = 2
        self.start_pipeline_ring_id = 20  # Just a magic number
43 44 45

    def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
                        user_defined_strategy):
46 47 48
        super(PipelineOptimizer,
              self)._set_basic_info(loss, role_maker, user_defined_optimizer,
                                    user_defined_strategy)
49 50
        self.micro_batch_size = user_defined_strategy.pipeline_configs[
            'micro_batch_size']
51
        self.num_microbatches = user_defined_strategy.pipeline_configs[
52
            'accumulate_steps']
53 54
        self.schedule_mode = user_defined_strategy.pipeline_configs[
            'schedule_mode']
55
        self.use_sharding = user_defined_strategy.sharding
56 57

    def _can_apply(self):
58 59 60
        if not self.role_maker._is_collective:
            return False

61 62 63 64
        # FIXME revise for hybrid parallelism
        if self.use_sharding:
            return False

65 66 67 68 69 70
        if self.user_defined_strategy.pipeline == True:
            return True
        return False

    def _disable_strategy(self, dist_strategy):
        dist_strategy.pipeline = False
71 72 73 74 75
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
            "schedule_mode": "1F1B",
        }
76

77
    def _enable_strategy(self, dist_strategy, context):
78
        dist_strategy.pipeline = True
79 80 81
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
82
            "schedule_mode": "1F1B",
83
        }
84

85 86 87 88 89 90 91
    def _broadcast_params(self, ring_id):
        block = self.startup_program.global_block()
        param = None
        for param in block.iter_parameters():
            if param.is_distributed:
                continue

92 93 94 95 96 97 98 99
            block.append_op(type='c_broadcast',
                            inputs={'X': param},
                            outputs={'Out': param},
                            attrs={
                                'ring_id': ring_id,
                                'root': 0,
                                OP_ROLE_KEY: OpRole.Forward
                            })
100 101

        if not param: return  # no parameter on this device
102 103 104 105 106 107 108
        block.append_op(type='c_sync_comm_stream',
                        inputs={'X': param},
                        outputs={'Out': param},
                        attrs={
                            'ring_id': ring_id,
                            OP_ROLE_KEY: OpRole.Forward
                        })
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

    def _get_process_group_info(self):
        # global ring info
        self.global_endpoints = self.endpoints
        self.global_rank = self.rank
        self.global_nranks = self.nranks

        # data parallel ring info
        if self.pipeline_num > 1:
            self.dp_rank = self.rank // self.inner_parallelism
            self.dp_nranks = self.nranks // self.inner_parallelism
            start_index = self.rank % self.inner_parallelism
            self.dp_endpoints = [
                self.endpoints[start_index + i * self.inner_parallelism]
                for i in range(self.pipeline_num)
            ]

    def _init_process_group(self, pipeline_pair, pipeline_ring_map):
        self._get_process_group_info()
        collective_helper = CollectiveHelper(self.role_maker, wait_port=False)
        # Create global ring for all gpus (ring_id = 0)
130 131 132 133 134 135
        collective_helper._init_communicator(self.startup_program,
                                             self.current_endpoint,
                                             self.global_endpoints,
                                             self.global_rank,
                                             self.global_ring_id, True,
                                             self.global_ring_id, True)
136 137 138 139 140 141 142 143 144 145 146
        # Create pipeline rings
        if self.inner_parallelism > 1:
            pipeline_id = self.rank // self.inner_parallelism
            start_index = pipeline_id * self.inner_parallelism
            for pair in pipeline_pair:
                pair_key = pair[0] * 1000 + pair[1]
                ring_id = pipeline_ring_map[pair_key]
                assert ring_id >= self.start_pipeline_ring_id
                first_node = pair[0] + start_index
                second_node = pair[1] + start_index
                if self.rank != first_node and self.rank != second_node:
147 148 149
                    collective_helper._init_communicator(
                        self.startup_program, None, None, None, None, False,
                        self.global_ring_id, True)
150 151 152 153 154 155
                    continue
                pipeline_endpoints = [
                    self.endpoints[first_node], self.endpoints[second_node]
                ]
                pipeline_rank = 0 if self.rank == first_node else 1
                pipeline_nranks = 2
156 157 158 159 160 161
                collective_helper._init_communicator(self.startup_program,
                                                     self.current_endpoint,
                                                     pipeline_endpoints,
                                                     pipeline_rank, ring_id,
                                                     False, self.global_ring_id,
                                                     True)
162 163 164 165 166 167 168 169

        # Create dp rings
        if self.pipeline_num > 1:
            collective_helper._init_communicator(
                self.startup_program, self.current_endpoint, self.dp_endpoints,
                self.dp_rank, self.dp_ring_id, True, self.global_ring_id, True)
            self._broadcast_params(self.dp_ring_id)

170 171 172 173 174
    def minimize_impl(self,
                      loss,
                      startup_program=None,
                      parameter_list=None,
                      no_grad_set=None):
175 176
        self.endpoints = self.role_maker._get_trainer_endpoints()
        self.current_endpoint = self.endpoints[self.role_maker._worker_index()]
177 178
        self.rank = self.role_maker._worker_index()
        self.nranks = self.role_maker._worker_num()
179

180 181 182 183 184 185 186 187 188 189 190 191 192
        self.wrapped_opt = PO(self.inner_opt,
                              num_microbatches=self.num_microbatches)
        orig_startup_program = startup_program if startup_program else fluid.default_startup_program(
        )
        block = loss.block
        program = block.program

        program._pipeline_opt = dict()
        program._pipeline_opt['local_rank'] = self.rank
        program._pipeline_opt['global_ring_id'] = self.global_ring_id
        program._pipeline_opt['ring_id'] = self.start_pipeline_ring_id
        program._pipeline_opt['micro_batch_size'] = self.micro_batch_size
        program._pipeline_opt['schedule_mode'] = self.schedule_mode
193
        program._pipeline_opt['use_sharding'] = False
194 195
        program._pipeline_opt['mp_degree'] = 1
        program._pipeline_opt['mp_rank'] = 0
196
        optimize_ops, params_grads, prog_list, pp_pair, ring_map = self.wrapped_opt.minimize(
197
            loss, startup_program, parameter_list, no_grad_set)
198 199 200
        self.startup_program = orig_startup_program._pipeline_opt[
            'startup_program']
        self.inner_parallelism = program._pipeline_opt['inner_parallelism']
201
        assert self.nranks % self.inner_parallelism == 0
202 203
        assert prog_list
        self.pipeline_num = len(self.endpoints) // self.inner_parallelism
204

205
        self._init_process_group(pp_pair, ring_map)
206

207 208 209 210
        self.main_program_list = prog_list
        self.main_program = program
        if self.pipeline_num > 1:
            self._transpile_main_program(loss)
211
        return optimize_ops, params_grads
212

213 214 215
    def _transpile_main_program(self, loss):
        self._insert_loss_grad_ops(loss, self.pipeline_num)
        self._insert_allreduce_ops(self.dp_ring_id)
216

217
    def _insert_loss_grad_ops(self, loss, pipeline_num):
218 219 220 221
        """
        In order to keep the learning rate consistent in different numbers of
        training workers, we scale the loss grad by the number of workers
        """
222
        block = self.main_program_list[-1].global_block()
223 224 225
        for idx, op in reversed(list(enumerate(block.ops))):
            if is_loss_grad_op(op):
                loss_grad_var = block.vars[op.output_arg_names[0]]
226 227 228 229 230 231 232 233
                block._insert_op(idx + 1,
                                 type='scale',
                                 inputs={'X': loss_grad_var},
                                 outputs={'Out': loss_grad_var},
                                 attrs={
                                     'scale': 1.0 / pipeline_num,
                                     OP_ROLE_KEY: OpRole.Backward
                                 })
234 235

    def _insert_allreduce_ops(self, ring_id):
236 237
        block = self.main_program._pipeline_opt['section_program'].global_block(
        )
238 239
        origin_block = self.main_program.global_block()
        grad = None
240
        processed_param_name = set()
241
        first_optimize_op_idx = None
242
        for idx, op in reversed(list(enumerate(block.ops))):
243 244 245 246
            if is_backward_op(op) and not first_optimize_op_idx:
                first_optimize_op_idx = idx + 1
                # no optimize phase
                if first_optimize_op_idx == len(block.ops): return
247
            if is_backward_op(op) and \
248
                    OP_ROLE_VAR_KEY in op.attr_names:
249 250 251 252
                op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY]
                if len(op_role_var) == 0:
                    continue
                assert len(op_role_var) % 2 == 0
253
                offset = 0
254
                for i in range(0, len(op_role_var), 2):
255
                    param_name = op_role_var[i]
256
                    param = block.vars[op_role_var[i]]
257 258
                    if param_name in processed_param_name: continue
                    processed_param_name.add(param_name)
259 260 261
                    grad_name = op_role_var[i + 1]
                    if not 'MERGED' in grad_name: grad_name += '@MERGED'
                    grad = block.vars[grad_name]
262 263 264 265
                    origin_param = origin_block.vars[op_role_var[i]]
                    if origin_param.is_distributed:
                        continue

266 267 268 269 270 271 272 273 274
                    block._insert_op(first_optimize_op_idx + offset,
                                     type='c_allreduce_sum',
                                     inputs={'X': grad},
                                     outputs={'Out': grad},
                                     attrs={
                                         'ring_id': ring_id,
                                         'use_calc_stream': True,
                                         OP_ROLE_KEY: OpRole.Optimize
                                     })