distributed_fused_lamb.py 17.3 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
W
wuhuachaocoding 已提交
16
import paddle
17 18 19 20
from paddle.fluid import framework, core, layers, unique_name
from paddle.fluid.framework import Variable
from paddle.fluid.clip import ClipGradByGlobalNorm
from paddle.fluid.layer_helper import LayerHelper
21
from paddle.fluid.optimizer import Optimizer
22 23
from paddle.fluid.executor import global_scope
from paddle.fluid.framework import name_scope
24
from paddle.fluid import core, unique_name
25 26


27 28 29 30 31 32 33 34
def init_communicator(block, rank, ranks, ring_id):
    eps = os.environ['PADDLE_TRAINER_ENDPOINTS']
    eps = [ep.strip() for ep in eps.split(",") if ep.strip()]
    cur_ep = eps[rank]
    other_eps = [eps[r] for r in ranks if r != rank]

    local_rank = ranks.index(rank)
    comm_var_name = unique_name.generate('comm_id')
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
    comm_id_var = block.create_var(
        name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW
    )
    block.append_op(
        type='c_gen_nccl_id',
        inputs={},
        outputs={'Out': comm_id_var},
        attrs={
            'rank': local_rank,
            'endpoint': cur_ep,
            'other_endpoints': other_eps,
            'ring_id': ring_id,
        },
    )
    block.append_op(
        type='c_comm_init',
        inputs={'X': comm_id_var},
        outputs={},
        attrs={'nranks': len(ranks), 'rank': local_rank, 'ring_id': ring_id},
    )
55
    tmp_var = block.create_var(name=unique_name.generate('tmp'))
56 57 58 59 60 61 62 63 64 65 66 67 68 69
    block.append_op(
        type='fill_constant', outputs={'Out': tmp_var}, attrs={'value': 1}
    )
    block.append_op(
        type='c_allreduce_sum',
        inputs={'X': tmp_var},
        outputs={'Out': tmp_var},
        attrs={'ring_id': ring_id, 'use_calc_stream': True},
    )
    block.append_op(
        type='c_sync_calc_stream',
        inputs={'X': tmp_var},
        outputs={'Out': tmp_var},
    )
70 71 72 73 74
    return ring_id


def broadcast_parameters(block, parameters, ring_id):
    for p in parameters:
75 76 77 78 79 80
        block.append_op(
            type='c_broadcast',
            inputs={'X': p},
            outputs={'Out': p},
            attrs={'ring_id': ring_id, 'use_calc_stream': True},
        )
81 82


83
class DistributedFusedLamb(Optimizer):
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
    def __init__(
        self,
        learning_rate=0.001,
        lamb_weight_decay=0.01,
        beta1=0.9,
        beta2=0.999,
        epsilon=1e-6,
        parameters=None,
        grad_clip=None,
        exclude_from_weight_decay_fn=None,
        clip_after_allreduce=True,
        is_grad_scaled_by_nranks=True,
        alignment=128,
        use_master_param_norm=True,
        gradient_accumulation_steps=1,
        use_master_acc_grad=True,
        nproc_per_node=None,
        use_hierarchical_allreduce=False,
        name=None,
    ):
        assert (
            not framework._non_static_mode()
106
        ), "DistributedFusedLamb does not support dygraph mode"
107
        super().__init__(learning_rate=learning_rate, grad_clip=None, name=name)
108 109 110 111

        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon
112 113 114
        self._weight_decay = (
            lamb_weight_decay if lamb_weight_decay is not None else 0.0
        )
115 116 117 118 119 120 121 122 123 124 125 126 127 128
        if grad_clip is not None:
            assert isinstance(
                grad_clip, ClipGradByGlobalNorm
            ), "Only ClipGradByGlobalNorm is supported in DistributedFusedLamb"
            max_global_grad_norm = grad_clip.clip_norm
        else:
            max_global_grad_norm = -1.0
        self._max_global_grad_norm = max_global_grad_norm
        self._alignment = alignment if alignment is not None else -1
        self._clip_after_allreduce = clip_after_allreduce
        self._is_grad_scaled_by_nranks = is_grad_scaled_by_nranks
        self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn
        self._scale = None
        self._use_master_param_norm = use_master_param_norm
129
        self._gradient_accumulation_steps = gradient_accumulation_steps
130
        self._use_master_acc_grad = use_master_acc_grad
131
        self._nproc_per_node = nproc_per_node
132
        self._use_hierarchical_allreduce = use_hierarchical_allreduce
133 134
        assert self._gradient_accumulation_steps >= 1

135 136 137 138 139 140 141
        self.helper = LayerHelper('distributed_fused_lamb')
        self._supports_check_nan_inf = True  # very import flag for AMP

        main_block = self.helper.main_program.global_block()
        self._found_inf = main_block.create_var(
            name=unique_name.generate('found_inf'),
            shape=[1],
142 143
            dtype=core.VarDesc.VarType.BOOL,
        )
144
        self._step = None
145

146 147 148 149
        if self._gradient_accumulation_steps > 1:
            self._stop_update = main_block.create_var(
                name=unique_name.generate('stop_update'),
                shape=[1],
150 151
                dtype=core.VarDesc.VarType.BOOL,
            )
152 153 154
        else:
            self._stop_update = None

155 156
        self._param_to_master_param = {}

157 158 159
    def _get_stop_update_var(self):
        return self._stop_update if self._stop_update is not None else False

160 161 162 163 164 165 166 167
    def _set_step(self, step):
        self._step = step

    def _get_or_create_step(self):
        if self._step is None:
            self._step = self._create_persistable_var('step', dtype='int64')
        return self._step

168 169 170 171 172 173 174 175
    def _set_scale(self, scale):
        assert scale is not None
        if not isinstance(scale, Variable):
            scale = self._create_scale_from_constant(scale)
        self._scale = scale

    def _create_scale_from_constant(self, value):
        name = unique_name.generate('global_scale')
176 177 178 179 180 181 182
        return layers.create_global_var(
            name=name,
            shape=[1],
            dtype='float32',
            value=float(value),
            persistable=True,
        )
183 184 185 186 187 188 189 190 191 192

    def _get_or_create_scale(self):
        if self._scale is None:
            self._scale = self._create_scale_from_constant(1.0)
        return self._scale

    def _create_persistable_var(self, name=None, shape=[-1], dtype='float32'):
        startup_block = self.helper.startup_program.global_block()
        if name is not None:
            name = unique_name.generate(name)
193 194 195 196 197 198 199
        startup_var = startup_block.create_var(
            name=name,
            shape=shape,
            dtype=dtype,
            persistable=True,
            stop_gradient=True,
        )
200
        main_block = self.helper.main_program.global_block()
201 202 203 204 205 206 207
        main_var = main_block.create_var(
            name=startup_var.name,
            shape=startup_var.shape,
            dtype=startup_var.dtype,
            persistable=True,
            stop_gradient=True,
        )
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
        return main_var

    def _get_parameter(self, name, scope=None):
        if scope is None:
            scope = global_scope()

        master_param = self._param_to_master_param.get(name)
        assert master_param is not None

        master_param_t = scope.find_var(master_param).get_tensor()
        assert master_param_t._dtype() == core.VarDesc.VarType.FP32

        param_t = scope.find_var(name).get_tensor()
        if param_t._dtype() == core.VarDesc.VarType.FP32:
            assert param_t._ptr() == master_param_t._ptr()
            return param_t, None
        else:
            assert param_t._dtype() == core.VarDesc.VarType.FP16
            assert param_t.shape() == master_param_t.shape()
            return param_t, master_param_t

    def apply_optimize(self, params_grads):
        self.apply_gradients(params_grads)

    def apply_gradients(self, params_grads):
        flattened = []
        for p, g in params_grads:
            flattened.extend([p, g])
        with flattened[0].block.program._optimized_guard(flattened), name_scope(
237 238
            "optimizer"
        ):
239 240 241 242
            self._apply_gradients_impl(params_grads)

    def _apply_gradients_impl(self, params_grads):
        for p, g in params_grads:
243 244 245
            assert (
                g.type == core.VarDesc.VarType.LOD_TENSOR
            ), "Only support dense gradient"
246 247 248 249
            g.persistable = True  # the gradient must be persistable for fusion

        fp32_fused_param = self._create_persistable_var('fp32_fused_param')
        fp32_fused_grad = self._create_persistable_var('fp32_fused_grad')
250 251 252 253 254 255
        fp16_fused_param = self._create_persistable_var(
            'fp16_fused_param', dtype='float16'
        )
        fp16_fused_grad = self._create_persistable_var(
            'fp16_fused_grad', dtype='float16'
        )
256 257 258 259 260 261 262 263 264 265 266 267 268

        master_params = []
        for p, g in params_grads:
            master_p = self._create_persistable_var('master_weight')
            self._param_to_master_param[p.name] = master_p.name
            master_params.append(master_p)

        moment1 = self._create_persistable_var('moment1')
        moment1.is_distributed = True
        moment2 = self._create_persistable_var('moment2')
        moment2.is_distributed = True
        beta1pow = self._create_persistable_var('beta1pow')
        beta2pow = self._create_persistable_var('beta2pow')
269

270 271 272
        param_info = self._create_persistable_var('param_info', dtype='int32')
        param_info.is_distributed = True

273 274 275
        fused_offsets = self._create_persistable_var(
            'fused_offsets', dtype='int32'
        )
276 277

        fp32_partial_fused_offsets = self._create_persistable_var(
278 279
            'fp32_partial_fused_offsets', dtype='int32'
        )
280
        fp32_partial_fused_offsets.is_distributed = True
281

282
        fp16_partial_fused_offsets = self._create_persistable_var(
283 284
            'fp16_partial_fused_offsets', dtype='int32'
        )
285 286
        fp16_partial_fused_offsets.is_distributed = True

287 288 289
        param_order = self._create_persistable_var('param_order', dtype='int32')
        param_order.is_distributed = True

290 291 292 293 294
        if self._gradient_accumulation_steps > 1:
            fp32_acc_fused_grad = [
                self._create_persistable_var('fp32_acc_fused_grad')
            ]
            fp16_acc_fused_grad = [
295 296 297
                self._create_persistable_var(
                    'fp16_acc_fused_grad', dtype='float16'
                )
298 299 300 301 302 303 304
            ]
            acc_step = [self._create_persistable_var('acc_step', dtype='int64')]
        else:
            fp32_acc_fused_grad = []
            fp16_acc_fused_grad = []
            acc_step = []

305 306
        step = self._get_or_create_step()

W
wuhuachaocoding 已提交
307 308
        rank = paddle.distributed.get_rank()
        nranks = paddle.distributed.get_world_size()
309 310 311 312
        if self._nproc_per_node is None:
            nproc_per_node = nranks
        else:
            nproc_per_node = self._nproc_per_node
313 314 315
        assert (
            nranks % nproc_per_node == 0
        ), "nranks should be exactly divided by nproc_per_node"
316

317
        shard_inside_node = nranks > nproc_per_node
318 319 320 321 322 323
        local_rank = rank % nproc_per_node
        node_id = int(rank / nproc_per_node)
        node_num = int(nranks / nproc_per_node)
        ring_ids = []
        startup_block = self.helper.startup_program.global_block()
        if nranks > 1:
324 325 326
            ring_id = init_communicator(
                startup_block, rank, list(range(nranks)), 0
            )
327 328
            ring_ids.append(ring_id)

329
        use_hierarchical_allreduce = False
330 331
        if node_num > 1 and len(ring_ids) <= 1 and shard_inside_node:
            local_group_ranks = list(
332 333 334 335 336
                range(node_id * nproc_per_node, (node_id + 1) * nproc_per_node)
            )
            ring_id = init_communicator(
                startup_block, rank, local_group_ranks, 1
            )
337 338
            ring_ids.append(ring_id)

339 340 341
            if self._use_hierarchical_allreduce and nranks > nproc_per_node:
                use_hierarchical_allreduce = True
                outer_group_ranks = list(
342 343 344 345 346
                    range(rank % nproc_per_node, nranks, nproc_per_node)
                )
                ring_id = init_communicator(
                    startup_block, rank, outer_group_ranks, ring_ids[-1] + 1
                )
347 348
                ring_ids.append(ring_id)

349 350 351 352
        scale = self._get_or_create_scale()

        params = [p for p, _ in params_grads]
        grads = [g for _, g in params_grads]
353
        apply_weight_decay = [1] * len(params)
354 355 356
        if self._exclude_from_weight_decay_fn is not None:
            for i, p in enumerate(params):
                if self._exclude_from_weight_decay_fn(p):
357
                    apply_weight_decay[i] = 0
358 359

        for g in grads:
360 361 362 363 364 365 366
            startup_block.create_var(
                name=g.name,
                type=g.type,
                dtype=g.dtype,
                persistable=g.persistable,
                shape=g.shape,
            )
367

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
        if nranks > 1:
            broadcast_parameters(startup_block, params, ring_ids[0])

        startup_block.append_op(
            type='distributed_fused_lamb_init',
            inputs={
                'Param': params,
                'Grad': grads,
            },
            outputs={
                'FP32FusedParam': [fp32_fused_param],
                'FP32FusedGrad': [fp32_fused_grad],
                'FP16FusedParam': [fp16_fused_param],
                'FP16FusedGrad': [fp16_fused_grad],
                'Moment1': [moment1],
                'Moment2': [moment2],
                'Beta1Pow': [beta1pow],
                'Beta2Pow': [beta2pow],
                'GlobalScale': [scale],
                'ParamInfo': [param_info],
                'ParamOut': params,
                'MasterParamOut': master_params,
                'GradOut': grads,
                'FP32ShardFusedParamOffsets': [fp32_partial_fused_offsets],
                'FP16ShardFusedParamOffsets': [fp16_partial_fused_offsets],
                'FusedParamOffsets': [fused_offsets],
                'ParamOrder': [param_order],
                'Step': [step],
            },
            attrs={
                'alignment': self._alignment,
                'rank': local_rank if shard_inside_node else rank,
                'nranks': nproc_per_node if shard_inside_node else nranks,
                'apply_weight_decay': apply_weight_decay,
                'moment1': 0.0,
                'moment2': 0.0,
                'beta1': self._beta1,
                'beta2': self._beta2,
406 407
            },
        )
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

        main_block = self.helper.main_program.global_block()
        self._create_global_learning_rate()
        lr = None
        for p_g in params_grads:
            if lr is None:
                lr = self._create_param_lr(p_g)
            else:
                new_lr = self._create_param_lr(p_g)
                assert id(lr) == id(
                    new_lr
                ), "The learning rate for each parameter should be the same"
        assert lr is not None

        lamb_op = main_block.append_op(
            type='distributed_fused_lamb',
            inputs={
                'FP32FusedParam': [fp32_fused_param],
                'FP32FusedGrad': [fp32_fused_grad],
                'FP16FusedParam': [fp16_fused_param],
                'FP16FusedGrad': [fp16_fused_grad],
                'LearningRate': [lr],
                'Moment1': [moment1],
                'Moment2': [moment2],
                'Beta1Pow': [beta1pow],
                'Beta2Pow': [beta2pow],
                'GlobalScale': [scale],
                'ParamInfo': [param_info],
                'Param': params,
                'Grad': grads,
                'FusedParamOffsets': [fused_offsets],
                'FP32ShardFusedParamOffsets': [fp32_partial_fused_offsets],
                'FP16ShardFusedParamOffsets': [fp16_partial_fused_offsets],
441
                'ParamOrder': [param_order],
442 443 444 445 446 447 448 449
            },
            outputs={
                'FP32FusedParamOut': [fp32_fused_param],
                'FP16FusedParamOut': [fp16_fused_param],
                'Moment1Out': [moment1],
                'Moment2Out': [moment2],
                'Beta1PowOut': [beta1pow],
                'Beta2PowOut': [beta2pow],
450 451
                'ParamOut': params,
                'GradOut': grads,
452
                'FoundInf': [self._found_inf],
453 454 455 456 457 458
                'FP32AccFusedGrad': fp32_acc_fused_grad,
                'FP16AccFusedGrad': fp16_acc_fused_grad,
                'AccStep': acc_step,
                'StopUpdate': self._stop_update
                if self._stop_update is not None
                else [],
459
                'Step': [step],
460 461
            },
            attrs={
462
                'weight_decay': self._weight_decay,
463 464 465 466 467 468
                'beta1': self._beta1,
                'beta2': self._beta2,
                'epsilon': self._epsilon,
                'max_global_grad_norm': self._max_global_grad_norm,
                'clip_after_allreduce': self._clip_after_allreduce,
                'rank': rank,
469 470
                'nranks': nranks,
                'ring_id': ring_ids,
471 472
                'use_master_param_norm': self._use_master_param_norm,
                'is_grad_scaled_by_nranks': self._is_grad_scaled_by_nranks,
473
                'acc_steps': self._gradient_accumulation_steps,
474
                'use_master_acc_grad': self._use_master_acc_grad,
475
                'use_hierarchical_allreduce': use_hierarchical_allreduce,
476 477
            },
        )
478
        return [lamb_op]