pipeline_parallel.py 12.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and

import paddle
import paddle.fluid as fluid
from .meta_parallel_base import MetaParallelBase
17
from .pp_utils.utils import is_float_tensor, _initialize_recompute_hcg
18
from .parallel_layers.pp_layers import PipelineLayer
19 20 21

from ..utils.hybrid_parallel_util import broadcast_mp_parameters
from ..utils.hybrid_parallel_util import broadcast_dp_parameters
22
from ..utils.hybrid_parallel_util import broadcast_sharding_parameters
23
from ..utils.log_util import logger
24
from ..meta_optimizers.dygraph_optimizer import HybridParallelOptimizer, HybridParallelGradScaler
S
ShenLiang 已提交
25
from .pp_utils import p2p_communication as p2p
26

27 28
__all__ = []

29 30 31

class PipelineParallel(MetaParallelBase):
    def __init__(self, layers, hcg, strategy):
32 33 34
        if not isinstance(layers, PipelineLayer):
            raise TypeError(
                "The Layer should be a derived class of PipelineLayer.")
35 36 37
        super(PipelineParallel, self).__init__(layers, hcg, strategy)
        self.use_data_parallel = self._hcg.get_data_parallel_world_size() > 1
        self.use_model_parallel = self._hcg.get_model_parallel_world_size() > 1
38 39
        self.use_sharding_parallel = self._hcg.get_sharding_parallel_world_size(
        ) > 1
40 41 42 43 44 45 46 47

        self.total_loss = None

        self.micro_batch_size = self._strategy.pipeline_configs[
            'micro_batch_size']
        self.accumulate_steps = self._strategy.pipeline_configs[
            'accumulate_steps']

48 49
        self._using_cache = self._strategy.pipeline_configs['p2p_cache_shape']

50 51
        self.num_stages = self._hcg.get_pipe_parallel_world_size()
        self.stage_id = self._hcg.get_stage_id()
52
        self.pp_group = self._hcg.get_pipe_parallel_group()
53

54
        p2p.initialize_p2p_groups(hcg, self._using_cache)
55

56 57
        _initialize_recompute_hcg(hcg)

58 59 60
        self.is_first_stage = self.stage_id == 0
        self.is_last_stage = (self.stage_id == (self.num_stages - 1))
        self.global_rank = self._hcg.get_global_rank()
61
        self.micro_batch_id = 0
62

63 64
        self._compute_loss = True

65 66 67 68 69 70 71
        logger.info("Pipeline Info -- num_stages: {}, stage_id: {}".format(
            self.num_stages, self.stage_id))

        if self.use_model_parallel:
            logger.info("start broadcast mp parameters")
            broadcast_mp_parameters(self._layers, self._hcg)

72 73 74 75
        if self.use_sharding_parallel:
            logger.info("start broadcast sharding parameters")
            broadcast_sharding_parameters(self._layers, self._hcg)

76
        if self.use_data_parallel:
77
            logger.info("start broadcast dp parameters")
78
            broadcast_dp_parameters(self._layers, self._hcg)
79

80
    def train_batch(self, data, optimizer, lr_scheduler=None, scaler=None):
81 82
        assert isinstance(optimizer, HybridParallelOptimizer), (
            'optimizer should be HybridParallelOptimizer subclass.')
83 84 85
        if scaler is not None:
            assert isinstance(scaler, HybridParallelGradScaler), (
                'scaler should be HybridParallelGradScaler subclass or None.')
86 87 88
        assert fluid.framework._dygraph_tracer()._has_grad, (
            'Please enable the generation of gradients.')

89 90
        if self.is_first_stage or self.is_last_stage:
            assert data is not None, (
91
                "For the first and the last stage, the data must be set.")
92
        else:
93 94
            data = None

95 96 97
        self.optimizer = optimizer
        self.lr_scheduler = lr_scheduler
        self.scaler = scaler
98
        self.data = data
99
        self._compute_loss = True
100

101 102
        self._layers.train()

103 104 105
        # store total loss of entire batch
        self.total_loss = None

106 107
        # store data id for micro_batch
        self.micro_batch_id = 0
108

109 110 111
        # Next, use the 1f1b scheduling strategy.
        # this strategy is inspired by:
        # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/schedules.py
112

113 114 115
        startup_steps = (self.num_stages - self.stage_id - 1)
        startup_steps = min(startup_steps, self.accumulate_steps)
        steady_steps = self.accumulate_steps - startup_steps
116

117 118
        input_buffers = []
        output_buffers = []
119

120 121
        for step_id in range(startup_steps):
            input_tensor = p2p.recv_forward()
122

123 124
            output_tensor = self._forward_step(input_tensor)
            p2p.send_forward(output_tensor)
125

126 127
            input_buffers.append(input_tensor)
            output_buffers.append(output_tensor)
128

129 130
        if steady_steps > 0:
            input_tensor = p2p.recv_forward()
131

132 133
        for i in range(steady_steps):
            last_iter = (i == (steady_steps - 1))
134

135
            output_tensor = self._forward_step(input_tensor)
136

137
            output_tensor_grad = p2p.send_forward_recv_backward(output_tensor)
138

139 140
            input_buffers.append(input_tensor)
            output_buffers.append(output_tensor)
141

142 143
            input_tensor, output_tensor = input_buffers.pop(
                0), output_buffers.pop(0)
144

145 146 147 148 149 150
            input_tensor_grad = self._backward_step(input_tensor, output_tensor,
                                                    output_tensor_grad)

            if last_iter:
                input_tensor = None
                p2p.send_backward(input_tensor_grad)
151
            else:
152
                input_tensor = p2p.send_backward_recv_forward(input_tensor_grad)
153

154 155 156
        for i in range(startup_steps):
            input_tensor = input_buffers.pop(0)
            output_tensor = output_buffers.pop(0)
157

158
            output_tensor_grad = p2p.recv_backward()
159

160 161 162
            input_tensor_grad = self._backward_step(input_tensor, output_tensor,
                                                    output_tensor_grad)
            p2p.send_backward(input_tensor_grad)
163

164
        self._layers.allreduce_shared_weight_gradients()
165

166
        self.train_loss = self._broadcast_final_loss()
167 168 169 170 171

        # optimizer
        self._optimizer_step()
        return self.train_loss

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
    def eval_batch(self, data, compute_loss=False):
        self._layers.eval()
        self._compute_loss = compute_loss

        # save data for eval
        self.data = data
        # store data id for micro_batch
        self.micro_batch_id = 0

        # store total loss of entire batch
        self.total_loss = None

        startup_steps = (self.num_stages - self.stage_id - 1)
        startup_steps = min(startup_steps, self.accumulate_steps)
        steady_steps = self.accumulate_steps - startup_steps

        input_buffers = []
        output_buffers = []

        for step_id in range(startup_steps):
            input_tensor = p2p.recv_forward()

            output_tensor = self._forward_step(input_tensor)
            p2p.send_forward(output_tensor)

            input_buffers.append(input_tensor)
            output_buffers.append(output_tensor)

        if steady_steps > 0:
            input_tensor = p2p.recv_forward()

        for i in range(steady_steps):
            last_iter = (i == (steady_steps - 1))

            output_tensor = self._forward_step(input_tensor)
            p2p.send_forward(output_tensor)

            input_buffers.append(input_tensor)
            output_buffers.append(output_tensor)

            if not last_iter:
                input_tensor = p2p.recv_forward()

        return self.total_loss if self._compute_loss else output_buffers

217 218 219 220 221 222 223
    def _forward_step(self, input_tensor):
        if self.stage_id == 0:
            input_tensor = self._load_micro_batch(self.micro_batch_id)

        output_tensor = self._layers.forward(input_tensor)

        if self.is_last_stage:
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
            # train calculate loss for train
            if self._compute_loss:
                assert self._layers._loss_fn is not None, "loss function should exist to compute loss"
                labels = self._load_micro_batch(self.micro_batch_id)
                output_tensor = self._layers._loss_fn(output_tensor, labels)
                assert isinstance(
                    output_tensor, paddle.Tensor
                ), "Currently, loss_fn should obtain Paddle.Tensor dtype"

                if self.accumulate_steps > 1:
                    output_tensor = output_tensor / self.accumulate_steps

                if self.total_loss is None:
                    self.total_loss = paddle.zeros_like(output_tensor)
                self.total_loss += output_tensor.detach()
239 240 241 242 243 244 245 246 247 248 249

        self.micro_batch_id += 1
        return output_tensor

    def _backward_step(self, input_tensor, output_tensor, output_tensor_grad):
        if self.is_last_stage:
            assert output_tensor_grad is None
            if self.scaler:
                paddle.autograd.backward(self.scaler.scale(output_tensor))
            else:
                paddle.autograd.backward(output_tensor)
250
        else:
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
            if isinstance(output_tensor, tuple):
                outputs = [t for t in output_tensor if not t.stop_gradient]
                assert len(outputs) == len(output_tensor_grad)
                paddle.autograd.backward(
                    tensors=outputs,
                    grad_tensors=[t for t in output_tensor_grad])
            else:
                paddle.autograd.backward(
                    tensors=[output_tensor], grad_tensors=[output_tensor_grad])

        input_tensor_grad = None
        if input_tensor is not None:
            if isinstance(input_tensor, tuple):
                input_tensor_grad = tuple(
                    [t.grad for t in input_tensor if not t.stop_gradient])
            else:
                input_tensor_grad = input_tensor.grad
        return input_tensor_grad
269 270

    def _load_micro_batch(self, cache_id):
271 272 273 274 275 276 277
        inputs = self.data
        begin = cache_id * self.micro_batch_size
        end = begin + self.micro_batch_size

        if self.is_first_stage:
            assert len(inputs) == 2, "length of input should be 2"
            if isinstance(inputs[0], tuple):
278 279 280
                assert len(
                    inputs[0]
                ) > 1, "If you use tuple for input data, it should have at least two inputs."
281 282 283 284 285 286
                batch_size = inputs[0][0].shape[0]
                assert self.micro_batch_size * self.accumulate_steps == batch_size, (
                    "batch_size needs to be divisible by micro_batch_size. Currently, "
                    "batch_size = %d, micro_batch_size = %d, accumulate_steps = %d."
                    %
                    (batch_size, self.micro_batch_size, self.accumulate_steps))
287 288
                data = [input[begin:end, :].detach() for input in inputs[0]]
                return tuple(data)
289 290 291
            else:
                batch_size = inputs[0].shape[0]
                assert self.micro_batch_size * self.accumulate_steps == batch_size
292
                return inputs[0][begin:end, :].detach()
293 294 295 296 297
        elif self.is_last_stage:
            assert len(inputs) == 2, "length of input should be 2"
            if isinstance(inputs[1], tuple):
                batch_size = inputs[1][0].shape[0]
                assert self.micro_batch_size * self.accumulate_steps == batch_size
298 299
                data = [input[begin:end, :].detach() for input in inputs[1]]
                return tuple(data)
300
            else:
301 302
                batch_size = inputs[1].shape[0]
                assert self.micro_batch_size * self.accumulate_steps == batch_size
303
                return inputs[1][begin:end, :].detach()
304 305 306
        else:
            # No data input is required for other stages
            inputs = None
307

308
    def _broadcast_final_loss(self):
309 310 311 312 313 314 315 316
        if self.is_last_stage:
            assert self.total_loss is not None, "train_batch() in last stage should obtain vaild loss"
            loss = self.total_loss.detach()
            paddle.distributed.broadcast(
                loss,
                src=self.global_rank,
                use_calc_stream=True,
                group=self.pp_group)
317
        else:
318 319 320 321 322 323 324
            loss = paddle.zeros(shape=[1], dtype="float32")
            paddle.distributed.broadcast(
                loss,
                src=self._hcg.get_rank_from_stage(self.num_stages - 1),
                use_calc_stream=True,
                group=self.pp_group)
        return loss
325

326
    def _optimizer_step(self):
327 328 329 330
        if self.scaler:
            self.scaler.minimize(self.optimizer, self.train_loss)
        else:
            self.optimizer.step()
331

332 333 334
        self.optimizer.clear_grad()
        if self.lr_scheduler:
            self.lr_scheduler.step()