pipeline_optimizer.py 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

14
from __future__ import print_function
15
from __future__ import division
16
import os
17 18 19 20

import paddle.fluid as fluid
from paddle.fluid import core, unique_name
from ..base.private_helper_function import wait_server_ready
21 22
from paddle.fluid.optimizer import PipelineOptimizer as PO
from .meta_optimizer_base import MetaOptimizerBase
23
from .common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY, CollectiveHelper, is_loss_grad_op, is_backward_op, is_optimizer_op
24 25


26 27 28 29
class PipelineOptimizer(MetaOptimizerBase):
    def __init__(self, optimizer):
        super(PipelineOptimizer, self).__init__(optimizer)
        self.inner_opt = optimizer
30 31 32 33
        self.meta_optimizers_white_list = [
            "RecomputeOptimizer",
            "AMPOptimizer",
        ]
34
        self.meta_optimizers_black_list = ["GraphExecutionOptimizer", ]
35 36 37
        self.global_ring_id = 1
        self.dp_ring_id = 2
        self.start_pipeline_ring_id = 20  # Just a magic number
38 39 40 41 42

    def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
                        user_defined_strategy):
        super(PipelineOptimizer, self)._set_basic_info(
            loss, role_maker, user_defined_optimizer, user_defined_strategy)
43 44
        self.micro_batch_size = user_defined_strategy.pipeline_configs[
            'micro_batch_size']
45
        self.num_microbatches = user_defined_strategy.pipeline_configs[
46
            'accumulate_steps']
47 48
        self.schedule_mode = user_defined_strategy.pipeline_configs[
            'schedule_mode']
49
        self.use_sharding = user_defined_strategy.sharding
50 51

    def _can_apply(self):
52 53 54
        if not self.role_maker._is_collective:
            return False

55 56 57 58
        # FIXME revise for hybrid parallelism
        if self.use_sharding:
            return False

59 60 61 62 63 64
        if self.user_defined_strategy.pipeline == True:
            return True
        return False

    def _disable_strategy(self, dist_strategy):
        dist_strategy.pipeline = False
65 66 67 68 69
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
            "schedule_mode": "1F1B",
        }
70

71
    def _enable_strategy(self, dist_strategy, context):
72
        dist_strategy.pipeline = True
73 74 75
        dist_strategy.pipeline_configs = {
            "micro_batch_size": 1,
            "accumulate_steps": 1,
76
            "schedule_mode": "1F1B",
77
        }
78

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
    def _broadcast_params(self, ring_id):
        block = self.startup_program.global_block()
        param = None
        for param in block.iter_parameters():
            if param.is_distributed:
                continue

            block.append_op(
                type='c_broadcast',
                inputs={'X': param},
                outputs={'Out': param},
                attrs={
                    'ring_id': ring_id,
                    'root': 0,
                    OP_ROLE_KEY: OpRole.Forward
                })

        if not param: return  # no parameter on this device
        block.append_op(
            type='c_sync_comm_stream',
            inputs={'X': param},
            outputs={'Out': param},
            attrs={'ring_id': ring_id,
                   OP_ROLE_KEY: OpRole.Forward})

    def _get_process_group_info(self):
        # global ring info
        self.global_endpoints = self.endpoints
        self.global_rank = self.rank
        self.global_nranks = self.nranks

        # data parallel ring info
        if self.pipeline_num > 1:
            self.dp_rank = self.rank // self.inner_parallelism
            self.dp_nranks = self.nranks // self.inner_parallelism
            start_index = self.rank % self.inner_parallelism
            self.dp_endpoints = [
                self.endpoints[start_index + i * self.inner_parallelism]
                for i in range(self.pipeline_num)
            ]

    def _init_process_group(self, pipeline_pair, pipeline_ring_map):
        self._get_process_group_info()
        collective_helper = CollectiveHelper(self.role_maker, wait_port=False)
        # Create global ring for all gpus (ring_id = 0)
        collective_helper._init_communicator(
            self.startup_program, self.current_endpoint, self.global_endpoints,
            self.global_rank, self.global_ring_id, True, self.global_ring_id,
            True)
        # Create pipeline rings
        if self.inner_parallelism > 1:
            pipeline_id = self.rank // self.inner_parallelism
            start_index = pipeline_id * self.inner_parallelism
            for pair in pipeline_pair:
                pair_key = pair[0] * 1000 + pair[1]
                ring_id = pipeline_ring_map[pair_key]
                assert ring_id >= self.start_pipeline_ring_id
                first_node = pair[0] + start_index
                second_node = pair[1] + start_index
                if self.rank != first_node and self.rank != second_node:
                    continue
                pipeline_endpoints = [
                    self.endpoints[first_node], self.endpoints[second_node]
                ]
                pipeline_rank = 0 if self.rank == first_node else 1
                pipeline_nranks = 2
                collective_helper._init_communicator(
                    self.startup_program, self.current_endpoint,
                    pipeline_endpoints, pipeline_rank, ring_id, False,
                    self.global_ring_id, True)

        # Create dp rings
        if self.pipeline_num > 1:
            collective_helper._init_communicator(
                self.startup_program, self.current_endpoint, self.dp_endpoints,
                self.dp_rank, self.dp_ring_id, True, self.global_ring_id, True)
            self._broadcast_params(self.dp_ring_id)

157 158 159 160 161
    def minimize_impl(self,
                      loss,
                      startup_program=None,
                      parameter_list=None,
                      no_grad_set=None):
162 163
        self.endpoints = self.role_maker._get_trainer_endpoints()
        self.current_endpoint = self.endpoints[self.role_maker._worker_index()]
164 165
        self.rank = self.role_maker._worker_index()
        self.nranks = self.role_maker._worker_num()
166

167 168 169 170 171 172 173 174 175 176 177 178 179
        self.wrapped_opt = PO(self.inner_opt,
                              num_microbatches=self.num_microbatches)
        orig_startup_program = startup_program if startup_program else fluid.default_startup_program(
        )
        block = loss.block
        program = block.program

        program._pipeline_opt = dict()
        program._pipeline_opt['local_rank'] = self.rank
        program._pipeline_opt['global_ring_id'] = self.global_ring_id
        program._pipeline_opt['ring_id'] = self.start_pipeline_ring_id
        program._pipeline_opt['micro_batch_size'] = self.micro_batch_size
        program._pipeline_opt['schedule_mode'] = self.schedule_mode
180
        program._pipeline_opt['use_sharding'] = False
181
        optimize_ops, params_grads, prog_list, pp_pair, ring_map = self.wrapped_opt.minimize(
182
            loss, startup_program, parameter_list, no_grad_set)
183 184 185
        self.startup_program = orig_startup_program._pipeline_opt[
            'startup_program']
        self.inner_parallelism = program._pipeline_opt['inner_parallelism']
186
        assert self.nranks % self.inner_parallelism == 0
187 188
        assert prog_list
        self.pipeline_num = len(self.endpoints) // self.inner_parallelism
189

190
        self._init_process_group(pp_pair, ring_map)
191

192 193 194 195
        self.main_program_list = prog_list
        self.main_program = program
        if self.pipeline_num > 1:
            self._transpile_main_program(loss)
196
        return optimize_ops, params_grads
197

198 199 200
    def _transpile_main_program(self, loss):
        self._insert_loss_grad_ops(loss, self.pipeline_num)
        self._insert_allreduce_ops(self.dp_ring_id)
201

202
    def _insert_loss_grad_ops(self, loss, pipeline_num):
203 204 205 206
        """
        In order to keep the learning rate consistent in different numbers of
        training workers, we scale the loss grad by the number of workers
        """
207
        block = self.main_program_list[-1].global_block()
208 209 210 211 212 213 214 215 216
        for idx, op in reversed(list(enumerate(block.ops))):
            if is_loss_grad_op(op):
                loss_grad_var = block.vars[op.output_arg_names[0]]
                block._insert_op(
                    idx + 1,
                    type='scale',
                    inputs={'X': loss_grad_var},
                    outputs={'Out': loss_grad_var},
                    attrs={
217
                        'scale': 1.0 / pipeline_num,
218 219 220 221
                        OP_ROLE_KEY: OpRole.Backward
                    })

    def _insert_allreduce_ops(self, ring_id):
222 223
        block = self.main_program._pipeline_opt['section_program'].global_block(
        )
224 225
        origin_block = self.main_program.global_block()
        grad = None
226
        processed_param_name = set()
227
        first_optimize_op_idx = None
228
        for idx, op in reversed(list(enumerate(block.ops))):
229 230 231 232
            if is_backward_op(op) and not first_optimize_op_idx:
                first_optimize_op_idx = idx + 1
                # no optimize phase
                if first_optimize_op_idx == len(block.ops): return
233
            if is_backward_op(op) and \
234
                    OP_ROLE_VAR_KEY in op.attr_names:
235 236 237 238
                op_role_var = op.all_attrs()[OP_ROLE_VAR_KEY]
                if len(op_role_var) == 0:
                    continue
                assert len(op_role_var) % 2 == 0
239
                offset = 0
240
                for i in range(0, len(op_role_var), 2):
241
                    param_name = op_role_var[i]
242
                    param = block.vars[op_role_var[i]]
243 244
                    if param_name in processed_param_name: continue
                    processed_param_name.add(param_name)
245 246 247
                    grad_name = op_role_var[i + 1]
                    if not 'MERGED' in grad_name: grad_name += '@MERGED'
                    grad = block.vars[grad_name]
248 249 250 251 252
                    origin_param = origin_block.vars[op_role_var[i]]
                    if origin_param.is_distributed:
                        continue

                    block._insert_op(
253
                        first_optimize_op_idx + offset,
254
                        type='c_allreduce_sum',
255 256 257 258
                        inputs={'X': grad},
                        outputs={'Out': grad},
                        attrs={
                            'ring_id': ring_id,
259 260
                            'use_calc_stream': True,
                            OP_ROLE_KEY: OpRole.Optimize
261
                        })