stage_1_and_2.py 102.1 KB
Newer Older
J
Jeff Rasley 已提交
1 2 3 4 5 6 7 8
'''
Copyright 2019 The Microsoft DeepSpeed Team
'''

import torch
from torch.distributed.distributed_c10d import _get_global_rank
import torch.distributed as dist
from torch._six import inf
J
Jeff Rasley 已提交
9
from packaging import version as pkg_version
J
Jeff Rasley 已提交
10 11

from deepspeed.runtime.fp16.loss_scaler import LossScaler, DynamicLossScaler
12
from deepspeed.runtime.utils import bwc_tensor_model_parallel_rank, get_global_norm, see_memory_usage, is_model_parallel_parameter
J
Jeff Rasley 已提交
13
from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION_GRADIENTS
14
from deepspeed.runtime.zero.offload_constants import OFFLOAD_CPU_DEVICE, OFFLOAD_OPTIMIZER
15
from deepspeed.ops.adam import DeepSpeedCPUAdam
16
from deepspeed.ops.op_builder import UtilsBuilder
J
Jeff Rasley 已提交
17
from deepspeed.utils import logger
A
Ammar Ahmad Awan 已提交
18
from deepspeed.moe.utils import is_moe_param
J
Jeff Rasley 已提交
19
from deepspeed.git_version_info import version
20

21 22 23 24 25
from deepspeed.checkpoint.constants import (DS_VERSION,
                                            PARTITION_COUNT,
                                            SINGLE_PARTITION_OF_FP32_GROUPS,
                                            BASE_OPTIMIZER_STATE,
                                            ZERO_STAGE)
26

27 28
# Toggle this to true to enable correctness test
# with gradient partitioning and without
J
Jeff Rasley 已提交
29 30 31 32 33 34 35 36 37 38 39
pg_correctness_test = False


def input(msg):
    return


def split_half_float_double(tensors):
    dtypes = [
        "torch.cuda.HalfTensor",
        "torch.cuda.FloatTensor",
R
Rana Ali Amjad 已提交
40 41
        "torch.cuda.DoubleTensor",
        "torch.cuda.BFloat16Tensor"
J
Jeff Rasley 已提交
42 43 44
    ]
    buckets = []
    for i, dtype in enumerate(dtypes):
45
        bucket = [t for t in tensors if t.type() == dtype]
J
Jeff Rasley 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
        if bucket:
            buckets.append(bucket)
    return buckets


def isclose(a, b, rtol=1e-09, atol=0.0):
    return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)


def lcm(x, y):
    from fractions import gcd  # or can import gcd from `math` in Python 3
    return x * y // gcd(x, y)


def get_alignment_padding(tensor_list, alignment):
    num_elements = sum([tensor.numel() for tensor in tensor_list])
    remainder = num_elements % alignment
    return (alignment - remainder) if remainder else remainder


def move_to_cpu(tensor_list):
    for tensor in tensor_list:
        tensor.data = tensor.data.cpu()


def print_rank_msg(msg):
    print(f"rank {dist.get_rank()} - {msg}")


75 76 77 78 79 80 81 82 83
def _get_padded_tensor(src_tensor, size):
    if src_tensor.numel() >= size:
        return src_tensor
    padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device)
    slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel())
    slice_tensor.data.copy_(src_tensor.data)
    return padded_tensor


84
class DeepSpeedZeroOptimizer(object):
J
Jeff Rasley 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
    """
    DeepSpeedZeroOptimizer designed to reduce the memory footprint
    required for training large deep learning models.

    For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
    https://arxiv.org/abs/1910.02054

    For usage examples, refer to TODO: DeepSpeed Tutorial

    """
    def __init__(self,
                 init_optimizer,
                 timers,
                 static_loss_scale=1.0,
                 dynamic_loss_scale=False,
                 dynamic_loss_args=None,
                 verbose=True,
                 contiguous_gradients=True,
                 reduce_bucket_size=500000000,
                 allgather_bucket_size=5000000000,
                 dp_process_group=None,
A
Ammar Ahmad Awan 已提交
106 107
                 expert_parallel_group=None,
                 expert_data_parallel_group=None,
J
Jeff Rasley 已提交
108 109 110 111 112
                 reduce_scatter=True,
                 overlap_comm=False,
                 cpu_offload=False,
                 mpu=None,
                 clip_grad=0.0,
M
Mikhail Druzhinin 已提交
113
                 communication_data_type=torch.float16,
J
Jeff Rasley 已提交
114 115
                 postscale_gradients=True,
                 gradient_predivide_factor=1.0,
116
                 gradient_accumulation_steps=1,
J
Jeff Rasley 已提交
117
                 ignore_unused_parameters=True,
118
                 partition_grads=True,
A
Ammar Ahmad Awan 已提交
119 120
                 round_robin_gradients=False,
                 has_moe_layers=False,
121 122
                 fp16_master_weights_and_gradients=False,
                 elastic_checkpoint=False):
J
Jeff Rasley 已提交
123 124 125 126 127

        if dist.get_rank() == 0:
            logger.info(f"Reduce bucket size {reduce_bucket_size}")
            logger.info(f"Allgather bucket size {allgather_bucket_size}")
            logger.info(f"CPU Offload: {cpu_offload}")
128
            logger.info(f'Round robin gradient partitioning: {round_robin_gradients}')
J
Jeff Rasley 已提交
129 130 131 132
        # The fused optimizer does all the work. We need this layer for two reason:
        # 1. maintain same user API from apex.fp16_utils
        # 2. keep common stuff here in case we need to add ne552w fused optimizer later

133 134
        self.elastic_checkpoint = elastic_checkpoint

J
Jeff Rasley 已提交
135 136 137 138
        # differences from apex.fp16_utils:
        # - assume all model params in fp16
        # - assume all params requires grad
        # - flat by groups, not keeping state. TODO: remove state explicitly?
139
        # - master grad and unflat master weight never exist. TODO: a way to save out unflat master?
J
Jeff Rasley 已提交
140 141 142 143
        if not torch.cuda.is_available:
            raise SystemError("Cannot use fp16 without CUDA.")
        self.optimizer = init_optimizer

144 145 146 147 148
        # Load pre-built or JIT compile (un)flatten ops
        util_ops = UtilsBuilder().load()
        self.flatten = util_ops.flatten
        self.unflatten = util_ops.unflatten

J
Jeff Rasley 已提交
149 150 151
        # ZeRO stage 1 (False) or 2 (True)
        self.partition_gradients = partition_grads

J
Jeff Rasley 已提交
152 153 154 155 156 157 158 159
        self.timers = timers

        self.reduce_scatter = reduce_scatter

        self.overlap_comm = overlap_comm

        self.cpu_offload = cpu_offload

160
        self.deepspeed_adam_offload = cpu_offload
J
Jeff Rasley 已提交
161 162 163 164 165

        self.device = torch.cuda.current_device() if not self.cpu_offload else 'cpu'

        self.dp_process_group = dp_process_group

A
Ammar Ahmad Awan 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
        #expert parallel group
        self.ep_process_group = expert_parallel_group

        #data parallel group for experts
        self.expert_dp_process_group = expert_data_parallel_group

        #data parallel size for non-experts
        dp_size = dist.get_world_size(group=self.dp_process_group)

        #For MoE models this maybe different for different param group
        #It will be modified during MoE setup later in the init
        self.real_dp_process_group = [
            dp_process_group for i in range(len(self.optimizer.param_groups))
        ]
        self.partition_count = [dp_size for i in range(len(self.optimizer.param_groups))]
J
Jeff Rasley 已提交
181

J
Jeff Rasley 已提交
182 183
        self.is_gradient_accumulation_boundary = True

A
Ammar Ahmad Awan 已提交
184 185 186 187 188 189
        # CPU-Offload requires contiguous gradients
        self.contiguous_gradients = contiguous_gradients or cpu_offload

        self.has_moe_layers = has_moe_layers
        if self.has_moe_layers:
            self._configure_moe_settings()
190
        self._global_grad_norm = 0.
A
Ammar Ahmad Awan 已提交
191

J
Jeff Rasley 已提交
192 193 194 195 196
        if mpu is None:
            self.model_parallel_group = None
            self.model_parallel_rank = 0
        else:
            self.model_parallel_group = mpu.get_model_parallel_group()
197
            self.model_parallel_rank = bwc_tensor_model_parallel_rank(mpu)
J
Jeff Rasley 已提交
198 199 200

        self.overflow = False
        self.clip_grad = clip_grad
M
Mikhail Druzhinin 已提交
201
        self.communication_data_type = communication_data_type
J
Jeff Rasley 已提交
202 203 204 205
        self.gradient_predivide_factor = gradient_predivide_factor
        self.postscale_gradients = postscale_gradients
        self.gradient_accumulation_steps = gradient_accumulation_steps
        self.micro_step_id = 0
206
        self.ignore_unused_parameters = ignore_unused_parameters
207
        self.round_robin_gradients = round_robin_gradients
J
Jeff Rasley 已提交
208

J
Jeff Rasley 已提交
209
        self.extra_large_param_to_reduce = None
A
Ammar Ahmad Awan 已提交
210 211 212
        self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients

        if self.fp16_master_weights_and_gradients:
A
Alex Hedges 已提交
213
            assert self.cpu_offload and type(self.optimizer) in [DeepSpeedCPUAdam], f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32. Currently only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}. Either disable fp16_master_weights_and_gradients or enable ZeRO-2 Offload with DeepSpeedCPUAdam"
J
Jeff Rasley 已提交
214

J
Jeff Rasley 已提交
215
        if self.reduce_scatter:
M
Mikhail Druzhinin 已提交
216
            assert self.communication_data_type in (torch.float16, torch.bfloat16), f"ZeRO-2 supports only float16 or bfloat16 communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'"
J
Jeff Rasley 已提交
217 218 219 220
            assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-2 with reduce scatter enabled"
            assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-2 with reduce scatter enabled"

        # param flattened by groups
R
Rana Ali Amjad 已提交
221 222
        self.bit16_groups = []
        self.bit16_groups_flat = []
J
Jeff Rasley 已提交
223

224 225 226
        # param partitioned by data parallel degree
        # this will contain a list of equal sized tensors
        # each of which will be updated by a different process
R
Rana Ali Amjad 已提交
227
        self.parallel_partitioned_bit16_groups = []
J
Jeff Rasley 已提交
228

229 230
        # a single 32-bit partition of the parallel partitioned parameters
        # that this process will update
J
Jeff Rasley 已提交
231 232
        self.single_partition_of_fp32_groups = []

233
        # param partition info
J
Jeff Rasley 已提交
234

235
        # These are the parameters in each group that will not be updated by this process directly
J
Jeff Rasley 已提交
236 237
        self.params_not_in_partition = []

238
        # These are the parameters that will be updated by this process directly
J
Jeff Rasley 已提交
239 240
        self.params_in_partition = []

A
Alex Hedges 已提交
241
        # Offset from the first parameter in the the self.params_in_partition
242 243
        # the parameter boundaries may not align with partition boundaries
        # so we need to keep track of the offset
J
Jeff Rasley 已提交
244 245
        self.first_offset = []

246
        # number of elements per partition in each group
J
Jeff Rasley 已提交
247 248
        self.partition_size = []

249 250 251 252 253
        #align nccl all-gather send buffers to 4-bye boundary
        self.nccl_start_alignment_factor = 2  # 4-byte alignment/sizeof(fp16) = 2

        assert (allgather_bucket_size % self.nccl_start_alignment_factor == 0), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} "

J
Jeff Rasley 已提交
254
        self.all_reduce_print = False
255
        self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
J
Jeff Rasley 已提交
256

R
Rana Ali Amjad 已提交
257 258
        self.round_robin_bit16_groups = []
        self.round_robin_bit16_indices = []
259

260
        # Use different parallel to do all_to_all_reduce related things
J
Jeff Rasley 已提交
261 262 263 264
        # padding on each partition for alignment purposes
        self.groups_padding = []
        # loop to deal with groups
        for i, param_group in enumerate(self.optimizer.param_groups):
A
Ammar Ahmad Awan 已提交
265 266
            partition_id = dist.get_rank(group=self.real_dp_process_group[i])

J
Jeff Rasley 已提交
267
            # push this group to list before modify
268
            # TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group
R
Rana Ali Amjad 已提交
269
            self.bit16_groups.append(param_group['params'])
270

J
Jeff Rasley 已提交
271
            # Record padding required to align group to world size
A
Ammar Ahmad Awan 已提交
272 273
            if partition_id == dist.get_world_size(
                    group=self.real_dp_process_group[i]) - 1:
R
Rana Ali Amjad 已提交
274
                padding = get_alignment_padding(self.bit16_groups[i],
A
Ammar Ahmad Awan 已提交
275
                                                self.partition_count[i])
J
Jeff Rasley 已提交
276 277 278 279
            else:
                padding = 0
            self.groups_padding.append(padding)

280 281
            # not sure why apex was cloning the weights before flattening
            # removing cloning here
J
Jeff Rasley 已提交
282 283

            see_memory_usage(f"Before moving param group {i} to CPU")
284
            # move all the parameters to cpu to free up GPU space for creating flat buffer
R
Rana Ali Amjad 已提交
285
            move_to_cpu(self.bit16_groups[i])
286
            see_memory_usage(f"After moving param group {i} to CPU", force=False)
J
Jeff Rasley 已提交
287

288 289 290 291
            # Reorder group parameters for load balancing of gradient partitioning during backward among ranks.
            # This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks.
            # For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging
            # to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m).
292 293
            if self.round_robin_gradients:
                round_robin_tensors, round_robin_indices = self._round_robin_reorder(
R
Rana Ali Amjad 已提交
294
                    self.bit16_groups[i],
A
Ammar Ahmad Awan 已提交
295
                    dist.get_world_size(group=self.real_dp_process_group[i])
296 297
                )
            else:
R
Rana Ali Amjad 已提交
298 299
                round_robin_tensors = self.bit16_groups[i]
                round_robin_indices = list(range(len(self.bit16_groups[i])))
300

R
Rana Ali Amjad 已提交
301 302
            self.round_robin_bit16_groups.append(round_robin_tensors)
            self.round_robin_bit16_indices.append(round_robin_indices)
303

304
            # create flat buffer in CPU and move to GPU
R
Rana Ali Amjad 已提交
305
            self.bit16_groups_flat.append(
306
                self.flatten_dense_tensors_aligned(
R
Rana Ali Amjad 已提交
307
                    self.round_robin_bit16_groups[i],
308
                    self.nccl_start_alignment_factor *
A
Ammar Ahmad Awan 已提交
309
                    dist.get_world_size(group=self.real_dp_process_group[i])).cuda(
J
Jeff Rasley 已提交
310
                        torch.cuda.current_device()))
A
Ammar Ahmad Awan 已提交
311
            see_memory_usage(f"After flattening and moving param group {i} to GPU",
312
                             force=False)
J
Jeff Rasley 已提交
313

A
Ammar Ahmad Awan 已提交
314
            if dist.get_rank(group=self.real_dp_process_group[i]) == 0:
J
Jeff Rasley 已提交
315
                see_memory_usage(
A
Ammar Ahmad Awan 已提交
316
                    f"After Flattening and after emptying param group {i} cache",
317
                    force=False)
J
Jeff Rasley 已提交
318

R
Rana Ali Amjad 已提交
319 320
            # set model bit16 weight to slices of flattened buffer
            self._update_model_bit16_weights(i)
J
Jeff Rasley 已提交
321

322 323
            # divide the flat weights into near equal partition equal to the data parallel degree
            # each process will compute on a different part of the partition
J
Jeff Rasley 已提交
324
            data_parallel_partitions = self.get_data_parallel_partitions(
R
Rana Ali Amjad 已提交
325
                self.bit16_groups_flat[i],
A
Ammar Ahmad Awan 已提交
326
                i)
R
Rana Ali Amjad 已提交
327
            self.parallel_partitioned_bit16_groups.append(data_parallel_partitions)
J
Jeff Rasley 已提交
328

329 330
            # verify that data partition start locations are 4-byte aligned
            for partitioned_data in data_parallel_partitions:
C
Cheng Li 已提交
331 332 333 334 335
                assert (partitioned_data.data_ptr() %
                        (2 * self.nccl_start_alignment_factor) == 0)

            # verify that data partition start locations are 4-byte aligned
            for partitioned_data in data_parallel_partitions:
336 337 338
                assert (partitioned_data.data_ptr() %
                        (2 * self.nccl_start_alignment_factor) == 0)

J
Jeff Rasley 已提交
339
            # a partition of the fp32 master weights that will be updated by this process
A
Ammar Ahmad Awan 已提交
340 341
            if not fp16_master_weights_and_gradients:
                self.single_partition_of_fp32_groups.append(
R
Rana Ali Amjad 已提交
342
                    self.parallel_partitioned_bit16_groups[i][partition_id].to(
A
Ammar Ahmad Awan 已提交
343 344 345
                        self.device).clone().float().detach())
            else:
                self.single_partition_of_fp32_groups.append(
R
Rana Ali Amjad 已提交
346
                    self.parallel_partitioned_bit16_groups[i][partition_id].to(
A
Ammar Ahmad Awan 已提交
347
                        self.device).clone().half().detach())
J
Jeff Rasley 已提交
348 349 350 351 352 353

            # modify optimizer of have flat master weight
            self.single_partition_of_fp32_groups[
                i].requires_grad = True  # keep this in case internal optimizer uses it
            param_group['params'] = [self.single_partition_of_fp32_groups[i]]

R
Rana Ali Amjad 已提交
354
            partition_size = len(self.bit16_groups_flat[i]) / dist.get_world_size(
A
Ammar Ahmad Awan 已提交
355
                group=self.real_dp_process_group[i])
356
            params_in_partition, params_not_in_partition, first_offset = self.get_partition_info(
R
Rana Ali Amjad 已提交
357
                self.round_robin_bit16_groups[i],
358 359
                partition_size,
                partition_id)
J
Jeff Rasley 已提交
360 361 362 363 364 365

            self.partition_size.append(partition_size)
            self.params_in_partition.append(params_in_partition)
            self.params_not_in_partition.append(params_not_in_partition)
            self.first_offset.append(first_offset)

A
Ammar Ahmad Awan 已提交
366 367 368 369 370 371 372
        for rank in range(dist.get_world_size()):
            if dist.get_rank() == rank:
                print(
                    f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i,p in enumerate(self.single_partition_of_fp32_groups)]} "
                )
                dist.barrier()
        #exit(0)
J
Jeff Rasley 已提交
373 374 375 376 377 378
        self.reduce_bucket_size = int(reduce_bucket_size)
        self.allgather_bucket_size = int(allgather_bucket_size)

        self.reduction_event = torch.cuda.Event(enable_timing=False, blocking=False)
        self.reduction_stream = torch.cuda.Stream()
        self.cpu_computation_stream = torch.cuda.Stream()
379
        self.copy_grad_stream = torch.cuda.Stream()
J
Jeff Rasley 已提交
380 381 382 383
        self.callback_queued = False

        self.param_dict = {}

384
        # map between param_id and bool to specify if a param is in this partition
J
Jeff Rasley 已提交
385 386 387 388 389 390 391 392
        self.is_param_in_current_partition = {}

        self.grads_in_ipg_bucket = []
        self.params_in_ipg_bucket = []
        self.elements_in_ipg_bucket = 0
        self.params_already_reduced = []
        self._release_ipg_buffers()
        self.previous_reduced_grads = None
A
Ammar Ahmad Awan 已提交
393
        self.ipg_bucket_has_moe_params = False
J
Jeff Rasley 已提交
394

395
        # simplified param id
J
Jeff Rasley 已提交
396 397
        self.param_id = {}

398
        #interesting code: unique ids being assigned to individual parameters
J
Jeff Rasley 已提交
399 400
        largest_param_numel = 0
        count = 0
R
Rana Ali Amjad 已提交
401
        for i, params_group in enumerate(self.bit16_groups):
J
Jeff Rasley 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
            for param in params_group:
                unique_id = id(param)
                self.param_id[unique_id] = count
                self.param_dict[count] = param
                self.params_already_reduced.append(False)
                if param.numel() > largest_param_numel:
                    largest_param_numel = param.numel()
                count = count + 1

        for param_group in self.params_in_partition:
            for param in param_group:
                self.is_param_in_current_partition[self.get_param_id(param)] = True

        for param_group in self.params_not_in_partition:
            for param in param_group:
                self.is_param_in_current_partition[self.get_param_id(param)] = False

        if self.cpu_offload:
            self.accumulated_grads_in_cpu = {}
            self.norm_for_param_grads = {}
            self.local_overflow = False
            self.grad_position = {}
            self.temp_grad_buffer_for_cpu_offload = torch.zeros(
                largest_param_numel,
426 427
                device=self.device,
                dtype=self.dtype).pin_memory()
J
Jeff Rasley 已提交
428 429
            self.temp_grad_buffer_for_gpu_offload = torch.zeros(
                largest_param_numel,
430 431
                device=torch.cuda.current_device(),
                dtype=self.dtype)
R
Rana Ali Amjad 已提交
432
            for i, params_group in enumerate(self.bit16_groups):
J
Jeff Rasley 已提交
433 434 435 436 437
                self.get_grad_position(i,
                                       self.params_in_partition[i],
                                       self.first_offset[i],
                                       self.partition_size[i])

438
        # mapping from parameter to partition that it belongs to
J
Jeff Rasley 已提交
439 440
        self.param_to_partition_ids = {}

441
        # stores if a partition has been reduced in this step
J
Jeff Rasley 已提交
442 443
        self.is_partition_reduced = {}

444
        # number of grads in partition that still need to be computed
J
Jeff Rasley 已提交
445 446
        self.remaining_grads_in_partition = {}

447
        # total number of grads in partition
J
Jeff Rasley 已提交
448 449
        self.total_grads_in_partition = {}

450
        # stores if a grad in a partition has been computed or not
J
Jeff Rasley 已提交
451 452
        self.is_grad_computed = {}

453
        # stores the offset at which a parameter gradient needs to be inserted in a partition
J
Jeff Rasley 已提交
454 455
        self.grad_partition_insertion_offset = {}

456
        # the offset in the gradient at which it must be inserted at the beginning of the partition
J
Jeff Rasley 已提交
457 458
        self.grad_start_offset = {}

459
        # will store the averaged gradients required by this partition
J
Jeff Rasley 已提交
460 461 462 463 464
        self.averaged_gradients = {}

        # store index of first parameter in each partition
        self.first_param_index_in_partition = {}

465
        # initializes all data structures for implementing gradient partitioning
J
Jeff Rasley 已提交
466 467
        self.initialize_gradient_partitioning_data_structures()

468
        # resets the data structure value for the next backward propagation
J
Jeff Rasley 已提交
469 470
        self.reset_partition_gradient_structures()

471
        # creates backward hooks for gradient partitioning
J
Jeff Rasley 已提交
472 473
        if self.partition_gradients or self.overlap_comm:
            self.create_reduce_and_remove_grad_hooks()
J
Jeff Rasley 已提交
474 475

        # we may have a way of fusing dynamic scale. Do not support for now
R
Rana Ali Amjad 已提交
476 477 478 479
        if self.dtype == torch.float or self.dtype == torch.bfloat16 or not dynamic_loss_scale:
            loss_scale_value = 1.0 if (
                (self.dtype == torch.float) or
                (self.dtype == torch.bfloat16)) else static_loss_scale
480 481 482 483 484

            self.dynamic_loss_scale = False
            self.loss_scaler = LossScaler(scale=loss_scale_value)
            cur_iter = 0
        else:
J
Jeff Rasley 已提交
485 486 487 488 489 490 491
            if dynamic_loss_args is None:
                self.loss_scaler = DynamicLossScaler()
            else:
                self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)

            self.dynamic_loss_scale = True

A
Ammar Ahmad Awan 已提交
492
        see_memory_usage("Before initializing optimizer states", force=True)
J
Jeff Rasley 已提交
493
        self.initialize_optimizer_states()
A
Ammar Ahmad Awan 已提交
494
        see_memory_usage("After initializing optimizer states", force=True)
J
Jeff Rasley 已提交
495 496 497 498 499

        if dist.get_rank() == 0:
            logger.info(f"optimizer state initialized")

        if dist.get_rank(group=self.dp_process_group) == 0:
A
Ammar Ahmad Awan 已提交
500 501
            see_memory_usage(f"After initializing ZeRO optimizer", force=True)

502 503 504
    def is_moe_group(self, group):
        return 'moe' in group and group['moe']

A
Ammar Ahmad Awan 已提交
505 506 507 508
    def _configure_moe_settings(self):
        assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
        assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"

509
        assert any([self.is_moe_group(group) for group in self.optimizer.param_groups]), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer"
A
Ammar Ahmad Awan 已提交
510 511
        self.is_moe_param_group = []
        for i, group in enumerate(self.optimizer.param_groups):
512
            if self.is_moe_group(group):
A
Ammar Ahmad Awan 已提交
513
                assert all([is_moe_param(param) for param in group['params']]), "All params in MoE group must be MoE params"
514 515
                self.real_dp_process_group[i] = self.expert_dp_process_group[
                    group['name']]
A
Ammar Ahmad Awan 已提交
516
                self.partition_count[i] = dist.get_world_size(
517
                    group=self.expert_dp_process_group[group['name']])
A
Ammar Ahmad Awan 已提交
518 519 520 521 522 523
                self.is_moe_param_group.append(True)
            else:
                self.is_moe_param_group.append(False)

        assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE"
        assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE"
J
Jeff Rasley 已提交
524

R
Rana Ali Amjad 已提交
525 526 527 528
    def _update_model_bit16_weights(self, group_index):
        updated_params = self.unflatten(self.bit16_groups_flat[group_index],
                                        self.round_robin_bit16_groups[group_index])
        for p, q in zip(self.round_robin_bit16_groups[group_index], updated_params):
529 530 531
            p.data = q.data

        # set model fp16 weight to slices of reordered flattened buffer
R
Rana Ali Amjad 已提交
532 533 534
        for param_index, param in enumerate(self.bit16_groups[group_index]):
            new_index = self.round_robin_bit16_indices[group_index][param_index]
            param.data = self.round_robin_bit16_groups[group_index][new_index].data
535 536

    def _round_robin_reorder(self, tensor_list, num_partitions):
537 538

        # disable round robin if need to debug something
539
        # return tensor_list, list(range(len(tensor_list)))
540

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
        partition_tensors = {}

        for i, tensor in enumerate(tensor_list):
            j = i % num_partitions
            if not j in partition_tensors:
                partition_tensors[j] = []
            partition_tensors[j].append((i, tensor))

        reordered_tensors = []
        reordered_indices = {}

        for partition_index in partition_tensors.keys():
            for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]):
                reordered_indices[original_index] = len(reordered_tensors)
                reordered_tensors.append(tensor)

        return reordered_tensors, reordered_indices

J
Jeff Rasley 已提交
559 560 561 562 563 564 565 566
    def _release_ipg_buffers(self):
        if self.contiguous_gradients:
            self.ipg_buffer = None
            self.grads_in_partition = None
            self.grads_in_partition_offset = 0

    def initialize_optimizer_states(self):

R
Rana Ali Amjad 已提交
567
        for i, group in enumerate(self.bit16_groups):
J
Jeff Rasley 已提交
568 569 570 571 572 573 574 575 576 577 578 579
            single_grad_partition = torch.zeros(
                int(self.partition_size[i]),
                dtype=self.single_partition_of_fp32_groups[i].dtype,
                device=self.device)
            self.single_partition_of_fp32_groups[
                i].grad = single_grad_partition.pin_memory(
                ) if self.cpu_offload else single_grad_partition

        self.optimizer.step()

        if not self.cpu_offload:
            for group in self.single_partition_of_fp32_groups:
580
                group.grad = None  #class init
J
Jeff Rasley 已提交
581 582 583

        return

J
Jeff Rasley 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
    #########################################################################
    #################### ZeRO Stage 1 - reduce gradients ####################
    #########################################################################
    def reduce_gradients(self, pipeline_parallel=False):
        world_size = dist.get_world_size(self.dp_process_group)
        my_rank = dist.get_rank(self.dp_process_group)

        # with PP we must create ipg buffer, since backward is handled outside zero
        if pipeline_parallel and self.contiguous_gradients:
            self.ipg_buffer = []
            buf_0 = torch.empty(int(self.reduce_bucket_size),
                                dtype=self.dtype,
                                device=torch.cuda.current_device())
            self.ipg_buffer.append(buf_0)
            self.ipg_index = 0

        if not self.overlap_comm:
R
Rana Ali Amjad 已提交
601
            for i, group in enumerate(self.bit16_groups):
J
Jeff Rasley 已提交
602
                for param in group:
603 604
                    if param.grad is not None:
                        self.reduce_ready_partitions_and_remove_grads(param, i)
J
Jeff Rasley 已提交
605 606 607
        # reduce any pending grads in either hook/non-hook case
        self.overlapping_partition_gradients_reduce_epilogue()

J
Jeff Rasley 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620
    #########################################################################
    #########################ZeRO Partition Gradients########################
    #########################################################################

    def get_first_param_index(self, group_id, param_group, partition_id):
        for index, param in enumerate(param_group):
            param_id = self.get_param_id(param)
            if partition_id in self.param_to_partition_ids[group_id][param_id]:
                return index
        return None

    def initialize_gradient_partitioning_data_structures(self):

R
Rana Ali Amjad 已提交
621
        for i, param_group in enumerate(self.round_robin_bit16_groups):
A
Ammar Ahmad Awan 已提交
622 623
            total_partitions = dist.get_world_size(group=self.real_dp_process_group[i])

J
Jeff Rasley 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
            self.param_to_partition_ids[i] = {}
            self.is_partition_reduced[i] = {}
            self.total_grads_in_partition[i] = {}
            self.remaining_grads_in_partition[i] = {}
            self.is_grad_computed[i] = {}
            self.grad_partition_insertion_offset[i] = {}
            self.grad_start_offset[i] = {}
            self.first_param_index_in_partition[i] = {}

            for partition_id in range(total_partitions):
                self.is_grad_computed[i][partition_id] = {}
                self.grad_partition_insertion_offset[i][partition_id] = {}
                self.grad_start_offset[i][partition_id] = {}
                self.total_grads_in_partition[i][partition_id] = 0
                self.initialize_gradient_partition(i, param_group, partition_id)
                self.is_partition_reduced[i][partition_id] = False
                self.first_param_index_in_partition[i][
                    partition_id] = self.get_first_param_index(
                        i,
                        param_group,
                        partition_id)

    def independent_gradient_partition_epilogue(self):
        self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0)
        self.reduce_ipg_grads()
        self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0)

651
        # if dist.get_rank() == 0:
J
Jeff Rasley 已提交
652 653 654 655 656 657
        #    logger.info("Params already reduced %s", self.params_already_reduced)
        for i in range(len(self.params_already_reduced)):
            self.params_already_reduced[i] = False

        if self.overlap_comm:
            torch.cuda.synchronize()
658 659
            # It is safe to clear previously reduced grads of other partitions
            self._clear_previous_reduced_grads()
J
Jeff Rasley 已提交
660 661

        if self.cpu_offload is False:
R
Rana Ali Amjad 已提交
662
            for i, _ in enumerate(self.bit16_groups):
J
Jeff Rasley 已提交
663 664 665 666 667 668

                if not i in self.averaged_gradients or self.averaged_gradients[i] is None:
                    self.averaged_gradients[i] = self.get_flat_partition(
                        self.params_in_partition[i],
                        self.first_offset[i],
                        self.partition_size[i],
669
                        dtype=self.dtype,
J
Jeff Rasley 已提交
670 671 672 673 674 675
                        device=torch.cuda.current_device(),
                        return_tensor_list=True)
                else:
                    avg_new = self.get_flat_partition(self.params_in_partition[i],
                                                      self.first_offset[i],
                                                      self.partition_size[i],
676
                                                      dtype=self.dtype,
J
Jeff Rasley 已提交
677 678 679
                                                      device=torch.cuda.current_device(),
                                                      return_tensor_list=True)

680
                    for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new):
J
Jeff Rasley 已提交
681 682 683 684 685 686 687 688 689 690 691
                        accumulated_grad.add_(new_avg_grad)

        self._release_ipg_buffers()

        # No need to keep the gradients anymore.
        # All gradients required by the step
        # are in self.averaged_gradients
        self.zero_grad()
        see_memory_usage(f"End ipg_epilogue")

    # resets all partition to no reduced
H
Haibin Lin 已提交
692
    # sets remaining grads to the total number of grads in each partition
J
Jeff Rasley 已提交
693 694
    # set is grad computed to false for all grads in partition
    def reset_partition_gradient_structures(self):
R
Rana Ali Amjad 已提交
695
        for i, _ in enumerate(self.bit16_groups):
A
Ammar Ahmad Awan 已提交
696
            total_partitions = dist.get_world_size(group=self.real_dp_process_group[i])
J
Jeff Rasley 已提交
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
            for partition_id in range(total_partitions):
                self.is_partition_reduced[i][partition_id] = False
                self.remaining_grads_in_partition[i][
                    partition_id] = self.total_grads_in_partition[i][partition_id]

                for param_id in self.is_grad_computed[i][partition_id]:
                    self.is_grad_computed[i][partition_id][param_id] = False

    def initialize_gradient_partition(self, i, param_group, partition_id):
        def set_key_value_list(dictionary, key, value):
            if key in dictionary:
                dictionary[key].append(value)
            else:
                dictionary[key] = [value]

        def increment_value(dictionary, key):
            if key in dictionary:
                dictionary[key] += 1
            else:
                dictionary[key] = 1

        partition_size = self.partition_size[i]

        start_index = partition_size * partition_id
        end_index = partition_size * (partition_id + 1)

        current_index = 0
        first_offset = 0

        for param in param_group:

            param_size = param.numel()
            param_id = self.get_param_id(param)

            if (current_index >= start_index and current_index < end_index):
                set_key_value_list(self.param_to_partition_ids[i],
                                   param_id,
                                   partition_id)
                increment_value(self.total_grads_in_partition[i], partition_id)

                self.is_grad_computed[i][partition_id][param_id] = False

                self.grad_partition_insertion_offset[i][partition_id][
                    param_id] = current_index - start_index
                self.grad_start_offset[i][partition_id][param_id] = 0

            elif start_index > current_index and start_index < (current_index +
                                                                param_size):
745
                assert (first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition"
J
Jeff Rasley 已提交
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
                first_offset = start_index - current_index

                set_key_value_list(self.param_to_partition_ids[i],
                                   param_id,
                                   partition_id)
                increment_value(self.total_grads_in_partition[i], partition_id)

                self.is_grad_computed[i][partition_id][param_id] = False

                self.grad_partition_insertion_offset[i][partition_id][param_id] = 0
                self.grad_start_offset[i][partition_id][param_id] = first_offset

            current_index = current_index + param_size

    def overlapping_partition_gradients_reduce_epilogue(self):
        self.independent_gradient_partition_epilogue()

    def create_reduce_and_remove_grad_hooks(self):
        self.grad_accs = []
R
Rana Ali Amjad 已提交
765
        for i, param_group in enumerate(self.bit16_groups):
J
Jeff Rasley 已提交
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
            for param in param_group:
                if param.requires_grad:

                    def wrapper(param, i):
                        param_tmp = param.expand_as(param)
                        grad_acc = param_tmp.grad_fn.next_functions[0][0]

                        def reduce_partition_and_remove_grads(*notneeded):
                            self.reduce_ready_partitions_and_remove_grads(param, i)

                        grad_acc.register_hook(reduce_partition_and_remove_grads)
                        self.grad_accs.append(grad_acc)

                    wrapper(param, i)

    def get_param_id(self, param):
        unique_id = id(param)
        return self.param_id[unique_id]

    def report_ipg_memory_usage(self, tag, param_elems):
        elem_count = self.elements_in_ipg_bucket + param_elems
        percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size
        see_memory_usage(
            f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}"
        )

792 793
    # create a flat tensor aligned at the alignment boundary
    def flatten_dense_tensors_aligned(self, tensor_list, alignment):
794
        num_elements = sum(t.numel() for t in tensor_list)
795 796 797 798 799 800 801 802 803 804 805 806 807
        remaining = num_elements % alignment

        if remaining:
            elements_to_add = alignment - remaining
            pad_tensor = torch.zeros(elements_to_add,
                                     device=tensor_list[0].device,
                                     dtype=tensor_list[0].dtype)
            padded_tensor_list = tensor_list + [pad_tensor]
        else:
            padded_tensor_list = tensor_list

        return self.flatten(padded_tensor_list)

H
Haibin Lin 已提交
808
    ############### Independent Partition Gradient ########################
J
Jeff Rasley 已提交
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
    def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
        if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size:
            self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads",
                                         param.numel())
            self.reduce_ipg_grads()
            if self.contiguous_gradients and self.overlap_comm:
                # Swap ipg_index between 0 and 1
                self.ipg_index = 1 - self.ipg_index
            self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads",
                                         param.numel())

        param_id = self.get_param_id(param)
        assert self.params_already_reduced[param_id] == False, \
            f"The parameter {param_id} has already been reduced. \
            Gradient computed twice for this partition. \
            Multiple gradient reduction is currently not supported"

J
Jeff Rasley 已提交
826 827 828 829
        if param.numel() > self.reduce_bucket_size:
            self.extra_large_param_to_reduce = param

        elif self.contiguous_gradients:
830
            # keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening
J
Jeff Rasley 已提交
831 832 833 834 835 836 837 838
            new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(
                0,
                self.elements_in_ipg_bucket,
                param.numel())
            new_grad_tensor.copy_(param.grad.view(-1))
            param.grad.data = new_grad_tensor.data.view_as(param.grad)

        self.elements_in_ipg_bucket += param.numel()
839 840 841

        assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient"

J
Jeff Rasley 已提交
842 843 844
        self.grads_in_ipg_bucket.append(param.grad)
        self.params_in_ipg_bucket.append((i, param, param_id))

A
Ammar Ahmad Awan 已提交
845 846 847 848
        #make sure the average tensor function knows how to average the gradients
        if is_moe_param(param):
            self.ipg_bucket_has_moe_params = True

J
Jeff Rasley 已提交
849 850 851 852 853 854 855
        self.report_ipg_memory_usage("End ipg_remove_grads", 0)

    def print_rank_0(self, message):
        if dist.get_rank() == 0:
            logger.info(message)

    def gradient_reduction_w_predivide(self, tensor):
A
Ammar Ahmad Awan 已提交
856

J
Jeff Rasley 已提交
857 858 859 860
        dp_world_size = dist.get_world_size(group=self.dp_process_group)

        tensor_to_allreduce = tensor

M
Mikhail Druzhinin 已提交
861 862
        if self.communication_data_type != tensor.dtype:
            tensor_to_allreduce = tensor.to(self.communication_data_type)
J
Jeff Rasley 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875

        if self.postscale_gradients:
            if self.gradient_predivide_factor != 1.0:
                tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor)

            dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)

            if self.gradient_predivide_factor != dp_world_size:
                tensor_to_allreduce.mul_(self.gradient_predivide_factor / dp_world_size)
        else:
            tensor_to_allreduce.div_(dp_world_size)
            dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)

M
Mikhail Druzhinin 已提交
876
        if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
J
Jeff Rasley 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
            tensor.copy_(tensor_to_allreduce)

        return tensor

    def average_tensor(self, tensor):
        if self.overlap_comm:
            torch.cuda.synchronize()
            stream = self.reduction_stream
        else:
            stream = torch.cuda.current_stream()

        with torch.cuda.stream(stream):
            if not self.reduce_scatter:
                self.gradient_reduction_w_predivide(tensor)
                return

            # Accumulate destination ranks and bucket offsets for each gradient slice.
            # Note: potential future optimization, record access pattern of parameters
            # in backward pass and partition gradients w.r.t. access pattern so that our
            # bucket is guaranteed to be contiguous w.r.t. ranks
            rank_and_offsets = []
A
Ammar Ahmad Awan 已提交
898
            real_dp_process_group = []
J
Jeff Rasley 已提交
899 900
            curr_size = 0
            prev_id = -1
A
Ammar Ahmad Awan 已提交
901 902 903

            process_group = self.dp_process_group
            # count = 0
J
Jeff Rasley 已提交
904
            for i, param, param_id in self.params_in_ipg_bucket:
A
Ammar Ahmad Awan 已提交
905 906 907 908

                process_group = self.dp_process_group
                #Averages gradients at parameter level if ipg has a moe param
                #Otherwise averaging is done at the entire buffer level at the end of the loop
909
                # MoE param have different groups
A
Ammar Ahmad Awan 已提交
910
                if self.ipg_bucket_has_moe_params:
911 912 913
                    process_group = self.expert_dp_process_group[
                        param.group_name] if is_moe_param(
                            param) else self.dp_process_group
A
Ammar Ahmad Awan 已提交
914 915
                    param.grad.data.div_(dist.get_world_size(group=process_group))

J
Jeff Rasley 已提交
916
                partition_ids = self.param_to_partition_ids[i][param_id]
917
                assert all([p_id < dist.get_world_size(group=process_group) for p_id in partition_ids]), f"world size {dist.get_world_size(group=process_group)} and p_ids: {partition_ids}"
J
Jeff Rasley 已提交
918 919 920 921 922 923 924 925 926 927 928 929
                partition_size = self.partition_size[i]
                # Get all partition ids + their offsets
                partition_ids_w_offsets = []
                for partition_id in partition_ids:
                    offset = self.grad_start_offset[i][partition_id][param_id]
                    partition_ids_w_offsets.append((partition_id, offset))
                partition_ids_w_offsets.sort(key=lambda t: t[1])

                # Calculate rank and offsets for grad slices
                for idx in range(len(partition_ids_w_offsets)):
                    partition_id, offset = partition_ids_w_offsets[idx]

A
Ammar Ahmad Awan 已提交
930
                    # if dist.get_rank() == 0 and count < 100:
A
Alex Hedges 已提交
931
                    #     print(f"Rank {dist.get_rank()} rank offset id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}")
A
Ammar Ahmad Awan 已提交
932 933
                    # count += 1

J
Jeff Rasley 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946 947
                    # Calculate numel for grad slice depending on partition location
                    if idx == len(partition_ids_w_offsets) - 1:
                        # Last partition_id uses its own offset
                        numel = param.numel() - offset
                    else:
                        # Set numel to next partition's offset
                        numel = partition_ids_w_offsets[idx + 1][1] - offset

                    # Merge bucket ranges if they belong to the same rank
                    if partition_id == prev_id:
                        prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
                        rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + numel)
                    else:
                        rank_and_offsets.append((partition_id, curr_size, numel))
A
Ammar Ahmad Awan 已提交
948
                        real_dp_process_group.append(process_group)
J
Jeff Rasley 已提交
949 950
                    curr_size += numel
                    prev_id = partition_id
A
Ammar Ahmad Awan 已提交
951 952 953

            if not self.ipg_bucket_has_moe_params:
                tensor.div_(dist.get_world_size(group=self.dp_process_group))
J
Jeff Rasley 已提交
954 955

            async_handles = []
A
Ammar Ahmad Awan 已提交
956
            for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets):
J
Jeff Rasley 已提交
957
                grad_slice = tensor.narrow(0, int(bucket_offset), int(numel))
A
Ammar Ahmad Awan 已提交
958
                # if dist.get_rank() == 0:
A
Alex Hedges 已提交
959
                #     print(f"Rank {dist.get_rank()} rank offset id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}")
A
Ammar Ahmad Awan 已提交
960 961 962
                # dist.barrier()
                #dist.barrier()
                dst_rank = _get_global_rank(real_dp_process_group[i], dst)
J
Jeff Rasley 已提交
963 964
                async_handle = dist.reduce(grad_slice,
                                           dst=dst_rank,
A
Ammar Ahmad Awan 已提交
965
                                           group=real_dp_process_group[i],
J
Jeff Rasley 已提交
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
                                           async_op=True)
                async_handles.append(async_handle)

            for handle in async_handles:
                handle.wait()

    ##############################################################################
    ############################# CPU Offload Methods#############################
    ##############################################################################
    def get_grad_position(self, group_id, tensor_list, first_offset, partition_size):
        current_offset = 0

        for i, tensor in enumerate(tensor_list):
            param_id = self.get_param_id(tensor)
            param_start_offset = 0

            num_elements = tensor.numel()
            tensor_offset = 0

985
            # we need to offset to get to the right element
J
Jeff Rasley 已提交
986 987 988 989 990
            if i == 0 and first_offset > 0:
                tensor_offset = first_offset
                num_elements = num_elements - tensor_offset
                param_start_offset = first_offset

991
            # we dont need all elements of the tensor
J
Jeff Rasley 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
            if num_elements > (partition_size - current_offset):
                num_elements = partition_size - current_offset

            self.grad_position[param_id] = [
                int(group_id),
                int(param_start_offset),
                int(current_offset),
                int(num_elements)
            ]
            current_offset += num_elements

    def update_overflow_tracker_for_param_grad(self, param):
        if param.grad is not None and self._has_inf_or_nan(param.grad.data):
            self.local_overflow = True

    def async_accumulate_grad_in_cpu_via_gpu(self, param):
        param_id = self.get_param_id(param)

A
Ammar Ahmad Awan 已提交
1010 1011
        [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]

1012
        # copy to a preexisiting buffer to avoid memory allocation penalty
J
Jeff Rasley 已提交
1013 1014 1015 1016 1017
        dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(
            0,
            0,
            param.numel())

A
Ammar Ahmad Awan 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
        #buffer for storing gradients for this parameter in CPU
        def buffer_to_accumulate_to_in_cpu():
            if not self.fp16_master_weights_and_gradients:
                return torch.zeros(param.numel(),
                                   dtype=param.dtype,
                                   device=self.device).pin_memory()
            else:
                return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(
                    0,
                    dest_offset,
                    num_elements)

A
Alex Hedges 已提交
1030
        #accumulate gradients into param.grad or parts of it that belongs to this partition
A
Ammar Ahmad Awan 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
        def accumulate_gradients():
            if not self.fp16_master_weights_and_gradients:
                dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1),
                                  non_blocking=True)
                param.grad.data.view(-1).add_(dest_buffer)
            else:
                dest_buffer.narrow(0,
                                   source_offset,
                                   num_elements).copy_(
                                       self.accumulated_grads_in_cpu[param_id].view(-1),
                                       non_blocking=True)
                param.grad.data.view(-1).narrow(
                    0,
                    source_offset,
                    num_elements).add_(dest_buffer.narrow(0,
                                                          source_offset,
                                                          num_elements))

        #move accumulated gradients back to CPU
        def copy_gradients_to_cpu():
            if not self.fp16_master_weights_and_gradients:
                self.accumulated_grads_in_cpu[param_id].data.copy_(
                    param.grad.data.view(-1),
                    non_blocking=True)
            else:
                self.accumulated_grads_in_cpu[param_id].data.copy_(
                    param.grad.data.view(-1).narrow(0,
                                                    source_offset,
                                                    num_elements),
                    non_blocking=True)

J
Jeff Rasley 已提交
1062
        if param_id not in self.accumulated_grads_in_cpu:
A
Ammar Ahmad Awan 已提交
1063
            self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu()
J
Jeff Rasley 已提交
1064 1065

        if self.micro_step_id > 0:
A
Ammar Ahmad Awan 已提交
1066
            accumulate_gradients()
J
Jeff Rasley 已提交
1067

1068
        # at the boundary we will send 32bit directly
J
Jeff Rasley 已提交
1069
        if not self.is_gradient_accumulation_boundary:
A
Ammar Ahmad Awan 已提交
1070
            copy_gradients_to_cpu()
J
Jeff Rasley 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104

    def set_norm_for_param_grad(self, param):
        param_id = self.get_param_id(param)
        accumulated_grad = self.accumulated_grads_in_cpu[
            param_id] if self.gradient_accumulation_steps > 1 else param.grad

        [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]

        start = source_offset
        accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements)

        self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2)

    def set_norm_for_param_grad_in_gpu(self, param):
        param_id = self.get_param_id(param)
        accumulated_grad = param.grad

        [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]

        start = source_offset
        accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements)

        self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2)

    def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param):
        param_id = self.get_param_id(param)

        [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]

        dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(
            0,
            dest_offset,
            num_elements)

A
Ammar Ahmad Awan 已提交
1105 1106 1107 1108
        src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements)
        if not self.fp16_master_weights_and_gradients:
            src_tensor = src_tensor.float()

J
Jeff Rasley 已提交
1109
        dest_tensor.copy_(src_tensor, non_blocking=True)
1110
        param.grad = None  #offload only
J
Jeff Rasley 已提交
1111 1112 1113 1114 1115

    def complete_grad_norm_calculation_for_cpu_offload(self, params):
        total_norm = 0.0
        norm_type = 2.0
        for p in params:
1116 1117 1118 1119
            # Pipeline parallelism may replicate parameters. Avoid multi-counting.
            if hasattr(p, 'ds_pipe_replicated') and p.ds_pipe_replicated:
                continue

J
Jeff Rasley 已提交
1120 1121
            if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
                param_id = self.get_param_id(p)
1122 1123 1124 1125 1126 1127
                # as some model have trainable parameters but skipped in training,
                # their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run,
                # so they have no norm_for_param_grads
                if param_id in self.norm_for_param_grads:
                    param_norm = self.norm_for_param_grads[param_id]
                    total_norm += param_norm.item()**2
1128 1129 1130 1131
                else:
                    # As unused parameters in modules may not be expected sometimes,
                    # add an explicit error msg when it occurred and an option to
                    # avoid the error
1132 1133
                    assert self.ignore_unused_parameters, """
                        This assert indicates that your module has parameters that
1134
                        were not used in producing loss.
1135 1136
                        You can avoid this assert by
                        (1) enable ignore_unused_parameters option in zero_optimization config;
1137 1138 1139
                        (2) making sure all trainable parameters and `forward` function
                            outputs participate in calculating loss.
                    """
J
Jeff Rasley 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

        # Sum across all model parallel GPUs.
        total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
        torch.distributed.all_reduce(total_norm_cuda,
                                     op=torch.distributed.ReduceOp.SUM,
                                     group=self.dp_process_group)

        self._model_parallel_all_reduce(tensor=total_norm_cuda,
                                        op=torch.distributed.ReduceOp.SUM)

        total_norm = total_norm_cuda[0].item()**(1. / norm_type)

        if total_norm == float(
                'inf') or total_norm == -float('inf') or total_norm != total_norm:
            total_norm = -1

        return total_norm

    ############################################################################################
    def copy_grads_in_partition(self, param):
        if self.cpu_offload:

            if self.gradient_accumulation_steps > 1:
                self.async_accumulate_grad_in_cpu_via_gpu(param)

            if self.is_gradient_accumulation_boundary:
                self.set_norm_for_param_grad_in_gpu(param)

                self.update_overflow_tracker_for_param_grad(param)

                self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param)

            return
        #print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}")
        if self.grads_in_partition is None:
            self.grads_in_partition_offset = 0
            total_size = 0
            for group in self.params_in_partition:
                for param_in_partition in group:
                    total_size += param_in_partition.numel()

            see_memory_usage(f"before copying {total_size} gradients into partition")
            self.grads_in_partition = torch.empty(int(total_size),
1183
                                                  dtype=self.dtype,
J
Jeff Rasley 已提交
1184 1185 1186
                                                  device=torch.cuda.current_device())
            see_memory_usage(f"after copying {total_size} gradients into partition")

A
Alex Hedges 已提交
1187
        # The allreduce buffer will be rewritten. Copy the gradients in partition to a new buffer
J
Jeff Rasley 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
        new_grad_tensor = self.grads_in_partition.view(-1).narrow(
            0,
            self.grads_in_partition_offset,
            param.numel())
        new_grad_tensor.copy_(param.grad.view(-1))
        param.grad.data = new_grad_tensor.data.view_as(param.grad)
        #print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}")
        self.grads_in_partition_offset += param.numel()

    def reduce_ipg_grads(self):
        if self.contiguous_gradients:
J
Jeff Rasley 已提交
1199 1200 1201
            if self.extra_large_param_to_reduce is not None:
                assert len(self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen"
                _, _, param_id = self.params_in_ipg_bucket[0]
1202 1203
                assert self.get_param_id(
                    self.extra_large_param_to_reduce) == param_id, "param in ipg bucket does not match extra-large param"
J
Jeff Rasley 已提交
1204 1205 1206 1207
                self.average_tensor(self.extra_large_param_to_reduce.grad.view(-1))
                self.extra_large_param_to_reduce = None
            else:
                self.average_tensor(self.ipg_buffer[self.ipg_index])
J
Jeff Rasley 已提交
1208 1209 1210 1211 1212 1213
        else:
            self.buffered_reduce_fallback(
                None,
                self.grads_in_ipg_bucket,
                elements_per_buffer=self.elements_in_ipg_bucket)

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
        if self.overlap_comm:
            stream = self.reduction_stream
        elif self.cpu_offload:
            # TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed.
            #            torch.cuda.synchronize()
            #            stream = self.copy_grad_stream
            stream = torch.cuda.current_stream()
        else:
            stream = torch.cuda.current_stream()

J
Jeff Rasley 已提交
1224 1225
        with torch.cuda.stream(stream):
            for _, param, param_id in self.params_in_ipg_bucket:
1226 1227 1228 1229 1230 1231

                assert self.params_already_reduced[param_id] == False, \
                    f"The parameter {param_id} has already been reduced. \
                    Gradient computed twice for this partition. \
                    Multiple gradient reduction is currently not supported"

J
Jeff Rasley 已提交
1232
                self.params_already_reduced[param_id] = True
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245

                if self.partition_gradients:
                    if not self.is_param_in_current_partition[param_id]:
                        if self.overlap_comm and self.contiguous_gradients is False:
                            # Clear grads of other partitions during the next reduction
                            # to avoid clearing them before the reduction is complete.
                            if self.previous_reduced_grads is None:
                                self.previous_reduced_grads = []
                            self.previous_reduced_grads.append(param)
                        else:
                            param.grad = None  #only if self.partition_gradients
                    elif self.contiguous_gradients:
                        self.copy_grads_in_partition(param)
1246 1247 1248
                else:  # zero stage 1 - partition only optimizer state
                    if self.contiguous_gradients:
                        self.copy_grads_in_partition(param)
J
Jeff Rasley 已提交
1249 1250 1251

        self.grads_in_ipg_bucket = []
        self.params_in_ipg_bucket = []
A
Ammar Ahmad Awan 已提交
1252
        self.ipg_bucket_has_moe_params = False
J
Jeff Rasley 已提交
1253 1254 1255 1256
        self.elements_in_ipg_bucket = 0
        #####################################################################

    def reduce_ready_partitions_and_remove_grads(self, param, i):
J
Jeff Rasley 已提交
1257 1258
        if self.partition_gradients or self.is_gradient_accumulation_boundary:
            self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
J
Jeff Rasley 已提交
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268

    def zero_reduced_gradients(self, partition_id, i):
        def are_all_related_partitions_reduced(params_id):
            for partition_id in self.param_to_partition_ids[i][params_id]:
                if not self.is_partition_reduced[i][partition_id]:
                    return False
            return True

        for params_id in self.is_grad_computed[i][partition_id]:
            if are_all_related_partitions_reduced(params_id):
1269
                self.param_dict[params_id].grad = None  # dead code
J
Jeff Rasley 已提交
1270 1271

    def flatten_and_print(self, message, tensors, start=0, n=5):
1272
        flatten_tensor = self.flatten(tensors)
J
Jeff Rasley 已提交
1273 1274 1275 1276 1277 1278 1279

        def print_func():
            logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n))

        self.sequential_execution(print_func, message)

    def get_grads_to_reduce(self, i, partition_id):
A
Alex Hedges 已提交
1280
        def get_reducible_portion(key):
J
Jeff Rasley 已提交
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
            grad = self.param_dict[key].grad
            total_elements = grad.numel()
            start = self.grad_start_offset[i][partition_id][key]
            num_elements = min(
                total_elements - start,
                self.partition_size[i] -
                self.grad_partition_insertion_offset[i][partition_id][key])
            if not pg_correctness_test:
                if num_elements == total_elements:
                    return grad
                else:
                    return grad.contiguous().view(-1).narrow(0,
                                                             int(start),
                                                             int(num_elements))
            else:
                if num_elements == total_elements:
                    return grad.clone()
                else:
                    return grad.clone().contiguous().view(-1).narrow(
                        0,
                        int(start),
                        int(num_elements))

        grads_to_reduce = []
        for key in self.is_grad_computed[i][partition_id]:
A
Alex Hedges 已提交
1306
            grad = get_reducible_portion(key)
J
Jeff Rasley 已提交
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
            grads_to_reduce.append(grad)
        return grads_to_reduce

    def sequential_execution(self, function, message, group=None):
        if group is None:
            group = self.dp_process_group
        if dist.get_rank(group=group) == 0:
            logger.info(message)
        for id in range(dist.get_world_size(group=group)):
            if id == dist.get_rank(group=group):
                function()
            dist.barrier(group=group)

    def set_none_gradients_to_zero(self, i, partition_id):
        for param_id in self.is_grad_computed[i][partition_id]:
            param = self.param_dict[param_id]
            if param.grad is None:
                param.grad = torch.zero_like(param)

    ######################Reduction Related Methods##############################
M
Mikhail Druzhinin 已提交
1327 1328 1329 1330 1331
    def allreduce_bucket(self,
                         bucket,
                         communication_data_type=torch.float16,
                         rank=None,
                         log=None):
J
Jeff Rasley 已提交
1332
        rank = None
1333
        tensor = self.flatten(bucket)
J
Jeff Rasley 已提交
1334 1335 1336 1337

        tensor_to_allreduce = tensor

        if pg_correctness_test:
M
Mikhail Druzhinin 已提交
1338
            communication_data_type = torch.float32
J
Jeff Rasley 已提交
1339

M
Mikhail Druzhinin 已提交
1340 1341
        if communication_data_type != tensor.dtype:
            tensor_to_allreduce = tensor.to(communication_data_type)
J
Jeff Rasley 已提交
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351

        tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group))

        if rank is None:
            #    "All Reducing"
            dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
        else:
            global_rank = _get_global_rank(self.dp_process_group, rank)
            dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group)

M
Mikhail Druzhinin 已提交
1352
        if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce:
J
Jeff Rasley 已提交
1353 1354 1355 1356 1357
            if rank is None or rank == dist.get_rank(group=self.dp_process_group):
                tensor.copy_(tensor_to_allreduce)

        return tensor

1358 1359 1360
    def _clear_previous_reduced_grads(self):
        if self.previous_reduced_grads is not None:
            for param in self.previous_reduced_grads:
1361
                param.grad = None  # overlap enabled
1362 1363
            self.previous_reduced_grads = None

1364
    # if rank is specified do a reduction instead of an allreduce
J
Jeff Rasley 已提交
1365 1366 1367
    def allreduce_and_copy(self, small_bucket, rank=None, log=None):
        if self.overlap_comm:
            torch.cuda.synchronize()
1368 1369
            # It is safe to clear the previously reduced grads of other partitions
            self._clear_previous_reduced_grads()
J
Jeff Rasley 已提交
1370 1371 1372 1373 1374 1375 1376
            stream = self.reduction_stream
        else:
            stream = torch.cuda.current_stream()

        with torch.cuda.stream(stream):
            allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log)
            if rank is None or rank == dist.get_rank(group=self.dp_process_group):
1377
                for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
J
Jeff Rasley 已提交
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
                    buf.copy_(synced)

    def allreduce_no_retain(self,
                            bucket,
                            numel_per_bucket=500000000,
                            rank=None,
                            log=None):
        small_bucket = []
        numel = 0
        for tensor in bucket:
            small_bucket.append(tensor)
            numel = numel + tensor.numel()
            if numel > numel_per_bucket:
                self.allreduce_and_copy(small_bucket, rank=rank, log=None)
                small_bucket = []
1393

J
Jeff Rasley 已提交
1394 1395 1396
        if len(small_bucket) > 0:
            self.allreduce_and_copy(small_bucket, rank=rank, log=log)

1397 1398
    # allows using reduction of gradients instead of using all_reduce

J
Jeff Rasley 已提交
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
    def buffered_reduce_fallback(self,
                                 rank,
                                 grads,
                                 elements_per_buffer=500000000,
                                 log=None):
        split_buckets = split_half_float_double(grads)

        for i, bucket in enumerate(split_buckets):
            self.allreduce_no_retain(bucket,
                                     numel_per_bucket=elements_per_buffer,
                                     rank=rank,
                                     log=log)

    #############################################################################
    #############################################################################
    #############################################################################

1416 1417
    # views the tensor as multiple partitions and returns
    # those partitions
A
Ammar Ahmad Awan 已提交
1418
    def get_data_parallel_partitions(self, tensor, group_id):
J
Jeff Rasley 已提交
1419 1420
        partitions = []

A
Ammar Ahmad Awan 已提交
1421 1422
        dp = dist.get_world_size(group=self.real_dp_process_group[group_id])
        dp_id = dist.get_rank(group=self.real_dp_process_group[group_id])
J
Jeff Rasley 已提交
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458

        total_num_elements = tensor.numel()

        base_size = total_num_elements // dp
        remaining = total_num_elements % dp

        start = 0
        for id in range(dp):
            partition_size = base_size
            if id < remaining:
                partition_size = partition_size + 1
            partitions.append(tensor.narrow(0, start, partition_size))
            start = start + partition_size
        return partitions

    def get_partition_info(self, tensor_list, partition_size, partition_id):
        params_in_partition = []
        params_not_in_partition = []

        start_index = partition_size * partition_id
        end_index = partition_size * (partition_id + 1)

        current_index = 0
        first_offset = 0

        for tensor in tensor_list:

            tensor_size = tensor.numel()

            if (current_index >= start_index and current_index < end_index):
                params_in_partition.append(tensor)

            elif start_index > current_index and start_index < (current_index +
                                                                tensor_size):
                params_in_partition.append(tensor)

1459
                assert (first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition"
J
Jeff Rasley 已提交
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
                first_offset = start_index - current_index

            else:
                params_not_in_partition.append(tensor)

            current_index = current_index + tensor_size

        return params_in_partition, params_not_in_partition, first_offset

    def zero_grad(self, set_grads_to_None=True):
        """
        Zero FP16 parameter grads.
        """
        # FP32 grad should never exist.
        # For speed, set model fp16 grad to None by default
R
Rana Ali Amjad 已提交
1475
        for group in self.bit16_groups:
J
Jeff Rasley 已提交
1476 1477
            for p in group:
                if set_grads_to_None:
1478
                    p.grad = None  # epilogue and in step
J
Jeff Rasley 已提交
1479 1480 1481 1482 1483 1484 1485 1486 1487
                else:
                    if p.grad is not None:
                        p.grad.detach_()
                        p.grad.zero_()

    def _model_parallel_all_reduce(self, tensor, op):
        """ Perform all reduce within model parallel group, if any.
        """
        if self.model_parallel_group is None:
1488
            pass
J
Jeff Rasley 已提交
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
        else:
            torch.distributed.all_reduce(tensor=tensor,
                                         op=op,
                                         group=self.model_parallel_group)

    def get_grad_norm_direct(self, gradients, params, norm_type=2):
        """Clips gradient norm of an iterable of parameters.

        This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
        added functionality to handle model parallel parameters. Note that
        the gradients are modified in place.

        Arguments:
            parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
                single Tensor that will have gradients normalized
            max_norm (float or int): max norm of the gradients
            norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
                infinity norm.

        Returns:
            Total norm of the parameters (viewed as a single vector).
        """
        norm_type = float(norm_type)
        if norm_type == inf:
            total_norm = max(g.data.abs().max() for g in gradients)
            total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
            torch.distributed.all_reduce(total_norm_cuda,
                                         op=torch.distributed.ReduceOp.MAX,
                                         group=self.dp_process_group)

            # Take max across all GPUs.
            self._model_parallel_all_reduce(tensor=total_norm_cuda,
                                            op=torch.distributed.ReduceOp.MAX)
            total_norm = total_norm_cuda[0].item()
        else:
            total_norm = 0.0
1525
            # if dist.get_rank() == 0:
A
Alex Hedges 已提交
1526
            #    logger.info(f"Total Norm beginning {total_norm}")
J
Jeff Rasley 已提交
1527
            for g, p in zip(gradients, params):
1528 1529 1530
                # Pipeline parallelism may replicate parameters. Avoid multi-counting.
                if hasattr(p, 'ds_pipe_replicated') and p.ds_pipe_replicated:
                    continue
J
Jeff Rasley 已提交
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
                if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
                    param_norm = g.data.double().norm(2)
                    total_norm += param_norm.item()**2
            # Sum across all model parallel GPUs.
            total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
            torch.distributed.all_reduce(total_norm_cuda,
                                         op=torch.distributed.ReduceOp.SUM,
                                         group=self.dp_process_group)

            self._model_parallel_all_reduce(tensor=total_norm_cuda,
                                            op=torch.distributed.ReduceOp.SUM)

            total_norm = total_norm_cuda[0].item()**(1. / norm_type)

        if total_norm == float(
                'inf') or total_norm == -float('inf') or total_norm != total_norm:
            total_norm = -1

        return total_norm

1551 1552 1553
    # creates a flat fused tensor from the tensor list starting at the first_offset
    # in the first tensor of the list. If there are not enough elements in the tensor
    # list then the flat tensor will be padded with zeros
J
Jeff Rasley 已提交
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
    def get_flat_partition(self,
                           tensor_list,
                           first_offset,
                           partition_size,
                           dtype,
                           device,
                           return_tensor_list=False):
        flat_tensor_list = []
        current_size = 0
        for i, tensor in enumerate(tensor_list):
            if tensor.grad is None:
1565
                tensor.grad = torch.zeros_like(tensor)
J
Jeff Rasley 已提交
1566 1567 1568 1569 1570

            tensor = tensor.grad
            num_elements = tensor.numel()
            tensor_offset = 0

1571
            # we need to offset to get to the right element
J
Jeff Rasley 已提交
1572 1573 1574 1575
            if i == 0 and first_offset > 0:
                tensor_offset = first_offset
                num_elements = num_elements - tensor_offset

1576
            # we dont need all elements of the tensor
J
Jeff Rasley 已提交
1577 1578 1579
            if num_elements > (partition_size - current_size):
                num_elements = partition_size - current_size

1580 1581
            # we need a narrow view of the tensor based on the tensor offset and number of elements that
            # we need from this tensor
J
Jeff Rasley 已提交
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
            if tensor_offset > 0 or num_elements < tensor.numel():
                flat_tensor_list.append(tensor.contiguous().view(-1).narrow(
                    0,
                    int(tensor_offset),
                    int(num_elements)))
            else:
                flat_tensor_list.append(tensor)

            current_size = current_size + num_elements

1592
        # this means its the last partition and does not align with the dp boundary. We need to pad before flattening
J
Jeff Rasley 已提交
1593 1594 1595 1596 1597 1598 1599 1600 1601
        if current_size < partition_size:
            flat_tensor_list.append(
                torch.zeros(int(partition_size - current_size),
                            dtype=dtype,
                            device=device))

        if return_tensor_list:
            return flat_tensor_list

1602
        return self.flatten(flat_tensor_list)
J
Jeff Rasley 已提交
1603 1604 1605

    def free_grad_in_param_list(self, param_list):
        for p in param_list:
1606
            p.grad = None  # in step
J
Jeff Rasley 已提交
1607 1608 1609 1610 1611

    def reset_cpu_buffers(self):
        self.norm_for_param_grads = {}
        self.local_overflow = False

1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
    def log_timers(self, timer_names):
        if self.timers is None:
            return

        self.timers.log(names=list(timer_names))

    def start_timers(self, timer_names):
        if self.timers is None:
            return

        for name in timer_names:
            self.timers(name).start()

    def stop_timers(self, timer_names):
        if self.timers is None:
            return

        for name in timer_names:
            self.timers(name).stop()

J
Jeff Rasley 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
    def step(self, closure=None):
        """
        Not supporting closure.
        """
        self.micro_step_id = -1

        see_memory_usage(f"In step before checking overflow")

        # First compute norm for all group so we know if there is overflow
        self.check_overflow()
1642 1643 1644 1645
        OPTIMIZER_ALLGATHER = 'optimizer_allgather'
        OPTIMIZER_GRADIENTS = 'optimizer_gradients'
        OPTIMIZER_STEP = 'optimizer_step'
        timer_names = [OPTIMIZER_ALLGATHER, OPTIMIZER_GRADIENTS, OPTIMIZER_STEP]
J
Jeff Rasley 已提交
1646 1647 1648 1649

        prev_scale = self.loss_scale
        self._update_scale(self.overflow)
        if self.overflow:
S
Stas Bekman 已提交
1650 1651 1652 1653 1654 1655 1656 1657

            if dist.get_rank() == 0:
                logger.info(
                    "[deepscale] OVERFLOW! Rank {} Skipping step. Attempted loss scale: {}, "
                    "reducing to {}".format(dist.get_rank(),
                                            prev_scale,
                                            self.loss_scale))

J
Jeff Rasley 已提交
1658 1659 1660 1661 1662 1663 1664 1665 1666
            see_memory_usage('After overflow before clearing gradients')
            self.zero_grad()
            if self.cpu_offload:
                self.reset_cpu_buffers()
            else:
                self.averaged_gradients = {}

            see_memory_usage('After overflow after clearing gradients')

1667 1668
            self.start_timers(timer_names)
            self.stop_timers(timer_names)
J
Jeff Rasley 已提交
1669 1670
            return

1671
        self.start_timers([OPTIMIZER_GRADIENTS])
J
Jeff Rasley 已提交
1672 1673 1674
        norm_groups = []
        single_partition_grad_groups = []
        skip = False
R
Rana Ali Amjad 已提交
1675
        for i, group in enumerate(self.bit16_groups):
A
Ammar Ahmad Awan 已提交
1676
            partition_id = dist.get_rank(group=self.real_dp_process_group[i])
J
Jeff Rasley 已提交
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
            if self.cpu_offload:
                norm_groups.append(
                    self.complete_grad_norm_calculation_for_cpu_offload(
                        self.params_in_partition[i]))
                single_grad_partition = self.single_partition_of_fp32_groups[i].grad
            else:
                norm_groups.append(
                    self.get_grad_norm_direct(self.averaged_gradients[i],
                                              self.params_in_partition[i]))

A
Alex Hedges 已提交
1687
                # free gradients for all the parameters that are not updated by this process
J
Jeff Rasley 已提交
1688 1689
                self.free_grad_in_param_list(self.params_not_in_partition[i])

1690
                # create a flat gradients for parameters updated by this process
J
Jeff Rasley 已提交
1691
                # If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors
A
Ammar Ahmad Awan 已提交
1692 1693
                if partition_id == dist.get_world_size(
                        group=self.real_dp_process_group[i]) - 1:
1694
                    single_grad_partition = self.flatten_dense_tensors_aligned(
J
Jeff Rasley 已提交
1695 1696 1697 1698
                        self.averaged_gradients[i],
                        int(self.partition_size[i])).to(
                            self.single_partition_of_fp32_groups[i].dtype)
                else:
1699 1700
                    single_grad_partition = self.flatten(self.averaged_gradients[i]).to(
                        self.single_partition_of_fp32_groups[i].dtype)
J
Jeff Rasley 已提交
1701
                assert single_grad_partition.numel() == self.partition_size[i], \
1702 1703
                    "averaged gradients have different number of elements that partition size {} {} {} {}".format(
                        single_grad_partition.numel(), self.partition_size[i], i, partition_id)
J
Jeff Rasley 已提交
1704 1705

                self.single_partition_of_fp32_groups[i].grad = single_grad_partition
1706
                # release all the gradient since we have already created a necessary copy in dp_grad_partition
J
Jeff Rasley 已提交
1707 1708 1709 1710 1711 1712
                self.free_grad_in_param_list(self.params_in_partition[i])

                self.averaged_gradients[i] = None

            single_partition_grad_groups.append(single_grad_partition)

A
Ammar Ahmad Awan 已提交
1713 1714 1715
        if self.has_moe_layers:
            self._average_expert_grad_norms(norm_groups)

1716 1717
        self._global_grad_norm = get_global_norm(norm_list=norm_groups)
        self.unscale_and_clip_grads(single_partition_grad_groups, self._global_grad_norm)
1718
        self.stop_timers([OPTIMIZER_GRADIENTS])
J
Jeff Rasley 已提交
1719

1720
        self.start_timers([OPTIMIZER_STEP])
J
Jeff Rasley 已提交
1721
        if self.deepspeed_adam_offload:
1722
            from deepspeed.ops.adam import DeepSpeedCPUAdam
1723
            if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half:
1724
                bit16_param_groups = [[
R
Rana Ali Amjad 已提交
1725
                    bit16_partitions[partition_id]
1726
                ] for bit16_partitions in self.parallel_partitioned_bit16_groups]
R
Rana Ali Amjad 已提交
1727
                self.optimizer.step(fp16_param_groups=bit16_param_groups)
1728 1729
            else:
                self.optimizer.step()
R
Rana Ali Amjad 已提交
1730 1731
                for bit16_partitions, fp32_partition in zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups):
                    bit16_partitions[partition_id].data.copy_(fp32_partition.data)
J
Jeff Rasley 已提交
1732 1733 1734
        else:
            self.optimizer.step()

1735
            # get rid of the fp32 gradients. Not needed anymore
J
Jeff Rasley 已提交
1736 1737
            if not self.cpu_offload:
                for group in self.single_partition_of_fp32_groups:
1738
                    group.grad = None  # in step
J
Jeff Rasley 已提交
1739

R
Rana Ali Amjad 已提交
1740 1741
            for bit16_partitions, fp32_partition in zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups):
                bit16_partitions[partition_id].data.copy_(fp32_partition.data)
J
Jeff Rasley 已提交
1742

1743
        self.stop_timers([OPTIMIZER_STEP])
J
Jeff Rasley 已提交
1744 1745 1746 1747

        if self.cpu_offload:
            self.reset_cpu_buffers()

1748
        self.start_timers([OPTIMIZER_ALLGATHER])
1749
        # gather the updated weights from everyone
R
Rana Ali Amjad 已提交
1750
        for group_id, partitioned_params in enumerate(self.parallel_partitioned_bit16_groups):
J
Jeff Rasley 已提交
1751

1752
            # Sequential AllGather Best of both worlds
A
Ammar Ahmad Awan 已提交
1753 1754
            dp_world_size = dist.get_world_size(
                group=self.real_dp_process_group[group_id])
J
Jeff Rasley 已提交
1755 1756 1757 1758 1759 1760
            num_shards = max(
                1,
                partitioned_params[partition_id].numel() * dp_world_size //
                self.allgather_bucket_size)

            shard_size = partitioned_params[partition_id].numel() // num_shards
1761 1762 1763 1764

            # Enforce nccl/rccl alignment of start location of each shard
            shard_size = shard_size - (shard_size % self.nccl_start_alignment_factor)

J
Jeff Rasley 已提交
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783
            num_elements = shard_size

            assert shard_size * num_shards <= partitioned_params[partition_id].numel()

            for shard_id in range(num_shards):

                if shard_id == (num_shards - 1):
                    num_elements = partitioned_params[partition_id].numel(
                    ) - shard_id * shard_size

                shard_list = []
                for dp_id in range(dp_world_size):
                    curr_shard = partitioned_params[dp_id].narrow(
                        0,
                        shard_id * shard_size,
                        num_elements).detach()
                    shard_list.append(curr_shard)
                dist.all_gather(shard_list,
                                shard_list[partition_id],
A
Ammar Ahmad Awan 已提交
1784
                                group=self.real_dp_process_group[group_id])
1785
        self.stop_timers([OPTIMIZER_ALLGATHER])
J
Jeff Rasley 已提交
1786 1787 1788

        # TODO: we probably don't need this? just to be safe
        for i in range(len(norm_groups)):
R
Rana Ali Amjad 已提交
1789
            self._update_model_bit16_weights(i)
J
Jeff Rasley 已提交
1790

1791
        self.log_timers(timer_names)
J
Jeff Rasley 已提交
1792
        see_memory_usage('After zero_optimizer step')
1793

J
Jeff Rasley 已提交
1794 1795
        return

A
Ammar Ahmad Awan 已提交
1796 1797 1798 1799
    def _average_expert_grad_norms(self, norm_groups):
        for i, norm in enumerate(norm_groups):
            if self.is_moe_param_group[i]:
                scaled_norm = norm * 1.0 / float(
1800
                    dist.get_world_size(group=self.real_dp_process_group[i]))
A
Ammar Ahmad Awan 已提交
1801 1802 1803
                scaled_norm_tensor = torch.tensor(scaled_norm,
                                                  device='cuda',
                                                  dtype=torch.float)
1804
                dist.all_reduce(scaled_norm_tensor, group=self.real_dp_process_group[i])
A
Ammar Ahmad Awan 已提交
1805 1806
                norm_groups[i] = scaled_norm_tensor.item()

1807
    def unscale_and_clip_grads(self, grad_groups_flat, total_norm):
J
Jeff Rasley 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
        # compute combined scale factor for this group
        combined_scale = self.loss_scale
        if self.clip_grad > 0.:
            # norm is in fact norm*scale
            clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
            if clip > 1:
                combined_scale = clip * self.loss_scale

        for grad in grad_groups_flat:
            if isinstance(grad, list):
                sub_partitions = grad
                for g in sub_partitions:
                    g.data.mul_(1. / combined_scale)
            else:
                grad.data.mul_(1. / combined_scale)

    def _check_overflow(self, partition_gradients=True):
        self.overflow = self.has_overflow(partition_gradients)

    # `params` is a list / generator of torch.Variable
    def has_overflow_serial(self, params, is_grad_list=False):
        for p in params:
            if p.grad is not None and self._has_inf_or_nan(p.grad.data):
                return True

        return False

    def has_overflow_partitioned_grads_serial(self):
R
Rana Ali Amjad 已提交
1836
        for i in range(len(self.bit16_groups)):
J
Jeff Rasley 已提交
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
            for j, grad in enumerate(self.averaged_gradients[i]):
                if grad is not None and self._has_inf_or_nan(grad.data, j):
                    return True
        return False

    def has_overflow(self, partition_gradients=True):
        if partition_gradients:
            overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial(
            )
            overflow_gpu = torch.cuda.ByteTensor([overflow])
A
Ammar Ahmad Awan 已提交
1847 1848
            '''This will capture overflow across all data parallel and expert parallel process
            Since expert parallel process are a subset of data parallel process'''
J
Jeff Rasley 已提交
1849 1850 1851 1852 1853 1854
            torch.distributed.all_reduce(overflow_gpu,
                                         op=torch.distributed.ReduceOp.MAX,
                                         group=self.dp_process_group)

        else:
            params = []
R
Rana Ali Amjad 已提交
1855
            for group in self.bit16_groups:
J
Jeff Rasley 已提交
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
                for param in group:
                    params.append(param)

            overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients)
            overflow_gpu = torch.cuda.ByteTensor([overflow])

        # Since each model parallel GPU carries only part of the model,
        # make sure overflow flag is synced across all the model parallel GPUs
        self._model_parallel_all_reduce(tensor=overflow_gpu,
                                        op=torch.distributed.ReduceOp.MAX)

        overflow = overflow_gpu[0].item()
        return bool(overflow)

    # `x` is a torch.Tensor
    @staticmethod
    def _has_inf_or_nan(x, j=None):
        try:
            # if x is half, the .float() incurs an additional deep copy, but it's necessary if
            # Pytorch's .sum() creates a one-element tensor of the same type as x
            # (which is true for some recent version of pytorch).
            cpu_sum = float(x.float().sum())
            # More efficient version that can be used if .sum() returns a Python scalar
            # cpu_sum = float(x.sum())
        except RuntimeError as instance:
            # We want to check if inst is actually an overflow exception.
            # RuntimeError could come from a different error.
            # If so, we still want the exception to propagate.
            if "value cannot be converted" not in instance.args[0]:
                raise
            return True
        else:
            if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
                return True
            return False

    def backward(self, loss, retain_graph=False):
        """
        :attr:`backward` performs the following steps:

        1. fp32_loss = loss.float()
        2. scaled_loss = fp32_loss*loss_scale
        3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
        """
        self.micro_step_id += 1

        if self.contiguous_gradients:
            self.ipg_buffer = []
J
Jeff Rasley 已提交
1904
            buf_0 = torch.empty(int(self.reduce_bucket_size),
1905
                                dtype=self.dtype,
J
Jeff Rasley 已提交
1906 1907 1908 1909 1910
                                device=torch.cuda.current_device())
            self.ipg_buffer.append(buf_0)

            # Use double buffers to avoid data access conflict when overlap_comm is enabled.
            if self.overlap_comm:
J
Jeff Rasley 已提交
1911
                buf_1 = torch.empty(int(self.reduce_bucket_size),
1912
                                    dtype=self.dtype,
J
Jeff Rasley 已提交
1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
                                    device=torch.cuda.current_device())
                self.ipg_buffer.append(buf_1)
            self.ipg_index = 0

        self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)

    def check_overflow(self, partition_gradients=True):
        self._check_overflow(partition_gradients)

    def _update_scale(self, has_overflow=False):
        self.loss_scaler.update_scale(has_overflow)

    # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
    def _get_state(self):
        return self.optimizer.state

    def _set_state(self, value):
        self.optimizer.state = value

    state = property(_get_state, _set_state)

    # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
    # (for example, to adjust the learning rate)
    def _get_param_groups(self):
        return self.optimizer.param_groups

    def _set_param_groups(self, value):
        self.optimizer.param_groups = value

    param_groups = property(_get_param_groups, _set_param_groups)

    # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
    def _get_loss_scale(self):
        return self.loss_scaler.loss_scale

    def _set_loss_scale(self, value):
        self.loss_scaler.cur_scale = value

    loss_scale = property(_get_loss_scale, _set_loss_scale)
    cur_scale = property(_get_loss_scale, _set_loss_scale)

    # Return group tensor after removing paddings that are added for alignment to DP world size.
    # This method works on the assumption that each group contains a single flattened tensor.
    def _get_groups_without_padding(self, groups_with_padding):
        groups_without_padding = []
        for i, group in enumerate(groups_with_padding):
            lean_length = group.numel() - self.groups_padding[i]
            groups_without_padding.append(group[:lean_length])

        return groups_without_padding

    # Return optimizer state after removing paddings that are added for alignment.
    def _get_state_without_padding(self, state_with_padding, padding):
        lean_state = {}
        for key, value in state_with_padding.items():
            if torch.is_tensor(value):
                lean_length = value.numel() - padding
                lean_state[key] = value[:lean_length]
            else:
                lean_state[key] = value

        return lean_state

    # Return base optimizer states.
    # This method assumes that each param group contains a single flattened tensor.
    def _get_base_optimizer_state(self):
        optimizer_groups_state = []
        for i, group in enumerate(self.optimizer.param_groups):
            p = group['params'][0]
            lean_optimizer_state = self._get_state_without_padding(
                self.optimizer.state[p],
                self.groups_padding[i])
            optimizer_groups_state.append(lean_optimizer_state)

        return optimizer_groups_state

    def state_dict(self):
        """
        Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
        This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
        of the contained Pytorch optimizer.
        Example::
            checkpoint = {}
            checkpoint['model'] = model.state_dict()
            checkpoint['optimizer'] = optimizer.state_dict()
            torch.save(checkpoint, "saved.pth")
        """
        state_dict = {}
        state_dict['loss_scaler'] = self.loss_scaler
        state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
        state_dict['overflow'] = self.overflow

2005 2006 2007 2008
        if self.elastic_checkpoint:
            state_dict[BASE_OPTIMIZER_STATE] = self._get_base_optimizer_state()
        else:
            state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict()
J
Jeff Rasley 已提交
2009

J
Jeff Rasley 已提交
2010 2011 2012
        # Remove paddings for DP alignment to enable loading for other alignment values
        fp32_groups_without_padding = self._get_groups_without_padding(
            self.single_partition_of_fp32_groups)
2013 2014
        state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = fp32_groups_without_padding

2015 2016
        state_dict[ZERO_STAGE] = ZERO_OPTIMIZATION_GRADIENTS
        state_dict[PARTITION_COUNT] = self.partition_count
J
Jeff Rasley 已提交
2017

2018
        state_dict[DS_VERSION] = version
J
Jeff Rasley 已提交
2019 2020 2021

        return state_dict

2022
    # Restore base optimizer fp32 weights from elastic checkpoint by:
J
Jeff Rasley 已提交
2023 2024 2025
    # 1) Merging fp32 weights from checkpoints of all partitions
    # 2) Extracting fp32 weights for current partition from merged weights
    # 3) Using extracted weights to update base optimizer weights directly.
2026
    def _restore_from_elastic_fp32_weights(self, all_state_dict):
J
Jeff Rasley 已提交
2027
        merged_single_partition_of_fp32_groups = []
2028

J
Jeff Rasley 已提交
2029
        for i in range(len(self.single_partition_of_fp32_groups)):
A
Ammar Ahmad Awan 已提交
2030
            partition_id = dist.get_rank(group=self.real_dp_process_group[i])
J
Jeff Rasley 已提交
2031
            merged_partitions = [
2032
                sd[SINGLE_PARTITION_OF_FP32_GROUPS][i] for sd in all_state_dict
J
Jeff Rasley 已提交
2033
            ]
2034
            if self.is_moe_group(self.optimizer.param_groups[i]):
2035 2036
                ranks = self.get_ep_ranks(
                    group_name=self.optimizer.param_groups[i]['name'])
2037
                merged_partitions = [merged_partitions[i] for i in ranks]
2038
            flat_merged_partitions = self.flatten_dense_tensors_aligned(
J
Jeff Rasley 已提交
2039
                merged_partitions,
2040
                self.nccl_start_alignment_factor *
A
Ammar Ahmad Awan 已提交
2041 2042
                dist.get_world_size(group=self.real_dp_process_group[i]))
            dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, i)
J
Jeff Rasley 已提交
2043 2044 2045 2046 2047
            merged_single_partition_of_fp32_groups.append(dp_partitions[partition_id])

        for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups):
            current.data.copy_(saved.data)

R
Rana Ali Amjad 已提交
2048 2049 2050
    # Restore base optimizer fp32 weights from ZeRO fp16 or bfloat16 weights
    def _restore_from_bit16_weights(self):
        for group_id, (bit16_partitions, fp32_partition) in enumerate(zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)):
A
Ammar Ahmad Awan 已提交
2051
            partition_id = dist.get_rank(group=self.real_dp_process_group[group_id])
R
Rana Ali Amjad 已提交
2052
            fp32_partition.data.copy_(bit16_partitions[partition_id].data)
J
Jeff Rasley 已提交
2053

R
Rana Ali Amjad 已提交
2054
    # Refresh the fp32 master params from the fp16 or bfloat16 copies.
J
Jeff Rasley 已提交
2055
    def refresh_fp32_params(self):
R
Rana Ali Amjad 已提交
2056
        self._restore_from_bit16_weights()
J
Jeff Rasley 已提交
2057 2058

    # Extract optimizer state for current partition from merged states of all partitions
A
Ammar Ahmad Awan 已提交
2059 2060 2061
    def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id):
        partition_id = dist.get_rank(group=self.real_dp_process_group[group_id])
        alignment = dist.get_world_size(group=self.real_dp_process_group[group_id])
J
Jeff Rasley 已提交
2062
        if torch.is_tensor(all_partition_states[0]):
2063
            flat_merged_partitions = self.flatten_dense_tensors_aligned(
J
Jeff Rasley 已提交
2064 2065
                all_partition_states,
                alignment)
A
Ammar Ahmad Awan 已提交
2066 2067
            dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions,
                                                              group_id)
J
Jeff Rasley 已提交
2068 2069 2070 2071 2072
            return dp_partitions[partition_id]
        else:
            # Assume non-tensor states are not partitioned and equal across ranks, so return first one
            return all_partition_states[0]

2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
    def _restore_base_optimizer_state(self, base_optimizer_group_states):
        if type(base_optimizer_group_states) == dict:
            base_optimizer_group_states = base_optimizer_group_states['state']
        for i, group in enumerate(self.optimizer.param_groups):
            p = group['params'][0]
            for key, saved in base_optimizer_group_states[i].items():
                if torch.is_tensor(self.optimizer.state[p][key]):
                    dst_tensor = self.optimizer.state[p][key]
                    src_tensor = _get_padded_tensor(saved, dst_tensor.numel())
                    self.optimizer.state[p][key].data.copy_(src_tensor.data)
                else:
                    self.optimizer.state[p][key] = saved

2086
    def get_ep_ranks(self, rank=0, group_name=None):
2087
        from deepspeed.utils import groups
2088 2089 2090
        expert_parallel_size_ = groups._get_expert_parallel_world_size(group_name)
        world_size = groups._get_data_parallel_world_size()
        rank = groups._get_expert_parallel_rank(group_name)
2091 2092 2093
        ranks = range(rank, world_size, expert_parallel_size_)
        return list(ranks)

2094
    # Restore base optimizer state from elastic checkpoint by
J
Jeff Rasley 已提交
2095 2096 2097
    # 1) Merging optimizer state from checkpoints of all partitions
    # 2) Extracting optimizer state for current partition from the merged state
    # 3) Using the extracted value to directly update the base optimizer.
2098
    def _restore_elastic_base_optimizer_state(self, all_state_dict):
J
Jeff Rasley 已提交
2099 2100 2101 2102
        base_optimizer_group_states = []
        for i in range(len(self.optimizer.param_groups)):
            partition_states = {}
            all_partition_group_states = [
2103
                sd[BASE_OPTIMIZER_STATE][i] for sd in all_state_dict
J
Jeff Rasley 已提交
2104
            ]
2105 2106

            if self.is_moe_group(self.optimizer.param_groups[i]):
2107 2108
                ranks = self.get_ep_ranks(
                    group_name=self.optimizer.param_groups[i]['name'])
2109 2110 2111 2112
                all_partition_group_states = [
                    all_partition_group_states[i] for i in ranks
                ]

J
Jeff Rasley 已提交
2113 2114 2115 2116 2117 2118
            for key in all_partition_group_states[0].keys():
                all_partition_states = [
                    all_states[key] for all_states in all_partition_group_states
                ]
                partition_states[key] = self._partition_base_optimizer_state(
                    key,
A
Ammar Ahmad Awan 已提交
2119 2120
                    all_partition_states,
                    i)
J
Jeff Rasley 已提交
2121 2122
            base_optimizer_group_states.append(partition_states)

2123
        self._restore_base_optimizer_state(base_optimizer_group_states)
J
Jeff Rasley 已提交
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153

    def load_state_dict(self,
                        state_dict_list,
                        load_optimizer_states=True,
                        load_from_fp32_weights=False):
        r"""Loading ZeRO checkpoint

        Arguments:
            state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
                Note that the number of saved partitions may differ from number of loading partitions to support
                changing GPU count, specifically DP world size, between saving and loading checkpoints.
            load_optimizer_states: Boolean indicating whether or not to load base optimizer states
            load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
            copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
        """
        """
        Loads a state_dict created by an earlier call to state_dict().
        If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
        whose parameters in turn came from ``model``, it is expected that the user
        will call ``model.load_state_dict()`` before
        ``fp16_optimizer_instance.load_state_dict()`` is called.
        Example::
            model = torch.nn.Linear(D_in, D_out).cuda().half()
            optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
            optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
            ...
            checkpoint = torch.load("saved.pth")
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        """
2154

J
Jeff Rasley 已提交
2155
        # I think it should actually be ok to reload the optimizer before the model.
2156 2157 2158 2159 2160 2161
        dp_rank = dist.get_rank(group=self.dp_process_group)
        current_rank_sd = state_dict_list[dp_rank]
        self.loss_scaler = current_rank_sd['loss_scaler']
        self.dynamic_loss_scale = current_rank_sd['dynamic_loss_scale']
        self.overflow = current_rank_sd['overflow']

2162
        ckpt_version = current_rank_sd.get(DS_VERSION, False)
2163
        assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed"
2164
        ckpt_version = pkg_version.parse(ckpt_version)
J
Jeff Rasley 已提交
2165

J
Jeff Rasley 已提交
2166 2167 2168 2169 2170
        # zero stage 1 mode
        if not self.partition_gradients:
            required_version = pkg_version.parse("0.3.17")
            error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \
                "with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \
2171
                "please use an older version of DeepSpeed (<= 0.5.8) and set 'legacy_stage1': true in your zero config json."
2172
            assert required_version <= ckpt_version, f"Old version: {ckpt_version} {error_str}"
J
Jeff Rasley 已提交
2173

2174
        ckpt_is_rigid = isinstance(current_rank_sd[BASE_OPTIMIZER_STATE], dict)
J
Jeff Rasley 已提交
2175

J
Jeff Rasley 已提交
2176
        if load_optimizer_states:
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
            if ckpt_is_rigid:
                # loading rigid ckpt into either rigid or elastic exec
                self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE])
            else:
                if self.elastic_checkpoint:
                    # loading elastic into elastic exec
                    self._restore_elastic_base_optimizer_state(state_dict_list)
                else:
                    # loading an elastic checkpoint into rigid exec
                    self._restore_base_optimizer_state(
                        current_rank_sd[BASE_OPTIMIZER_STATE])
J
Jeff Rasley 已提交
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204

        # At this point, the optimizer's references to the model's fp32 parameters are up to date.
        # The optimizer's hyperparameters and internal buffers are also up to date.
        # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
        # out of date.  There are two options.
        # 1:  Refresh the master params from the model's fp16 params.
        # This requires less storage but incurs precision loss.
        # 2:  Save and restore the fp32 master copies separately.
        # We choose option 1 if changing DP degree and option 2 otherwise.
        #
        # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
        # of their associated parameters, because it's possible those buffers might not exist yet in
        # the current optimizer instance.  In our case, as long as the current FP16_Optimizer has been
        # constructed in the same way as the one whose state_dict we are loading, the same master params
        # are guaranteed to exist, so we can just copy_() from the saved master params.

        if load_from_fp32_weights:
2205 2206 2207 2208 2209 2210 2211 2212
            # option 2 from above
            if self.elastic_checkpoint and not ckpt_is_rigid:
                self._restore_from_elastic_fp32_weights(state_dict_list)
            else:
                # For non-elastic checkpoint, simply copying from saved weights of current rank is sufficient.
                for current, saved in zip(self.single_partition_of_fp32_groups, current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]):
                    src_tensor = _get_padded_tensor(saved, current.numel())
                    current.data.copy_(src_tensor.data)
J
Jeff Rasley 已提交
2213
        else:
2214
            # option 1 from above
R
Rana Ali Amjad 已提交
2215
            self._restore_from_bit16_weights()
J
Jeff Rasley 已提交
2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229


def _handle_overflow(cpu_sum, x, i):
    import math
    rank = torch.distributed.get_rank()
    if rank == 0:
        t_i = -1
        for v_i, v in enumerate(x.data.contiguous().view(-1)):
            if not math.isfinite(float(v)):
                t_i = v_i
                break
        logger.info(
            f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}"
        )
S
Stas Bekman 已提交
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311


def estimate_zero2_model_states_mem_needs(total_params,
                                          num_gpus_per_node=1,
                                          num_nodes=1,
                                          cpu_offload=True,
                                          additional_buffer_factor=1.5):

    total_gpus = num_nodes * num_gpus_per_node

    if cpu_offload:
        gpu_mem = 2 * total_params
        cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor
    else:
        gpu_mem = 4 * total_params + int(16 * total_params / total_gpus)
        cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor

    return int(cpu_mem), int(gpu_mem)


def model_to_params(model):
    # shared params calculated only once
    total_params = sum(
        dict((p.data_ptr(),
              p.numel()) for p in model.parameters()).values())
    return total_params


def estimate_zero2_model_states_mem_needs_all_live(model,
                                                   num_gpus_per_node=1,
                                                   num_nodes=1,
                                                   additional_buffer_factor=1.5):
    """
    Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
    for a given ``model`` and hardware setup.

    If you have an actual model object, use this function and everything will be derived
    automatically.

    If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass
    the ``total_params`` explicitly.

    Args:
        - ``model``: ``nn.Module`` object
        - ``num_gpus_per_node``: how many gpus per node (defaults to 1)
        - ``num_nodes``: how many nodes (defaults to 1),
        - ``additional_buffer_factor``: estimation factor (defaults to 1.5):

    """

    total_params = model_to_params(model)

    estimate_zero2_model_states_mem_needs_all_cold(
        total_params=total_params,
        num_gpus_per_node=num_gpus_per_node,
        num_nodes=num_nodes,
        additional_buffer_factor=additional_buffer_factor)


def estimate_zero2_model_states_mem_needs_all_cold(total_params,
                                                   num_gpus_per_node=1,
                                                   num_nodes=1,
                                                   additional_buffer_factor=1.5):
    """
    Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
    for a given ``model`` and hardware setup.

    If it's a hypothetical model, use this function where you have to pass
    the ``total_params`` and ``largest_layer_params`` explicitly.

    If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything
    will be derived automatically.

    Args:
        - ``total_params``: total  model params
        - ``num_gpus_per_node``: how many gpus per node (defaults to 1)
        - ``num_nodes``: how many nodes (defaults to 1),
        - ``additional_buffer_factor``: estimation factor (defaults to 1.5):

    """
    def format_options(cpu_offload):
        enabled = []
2312 2313
        device = f'{OFFLOAD_CPU_DEVICE:4}' if cpu_offload else "none"
        enabled.append(f"{OFFLOAD_OPTIMIZER}={device}")
S
Stas Bekman 已提交
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333
        return ", ".join(enabled)

    nodes_str = "nodes" if num_nodes > 1 else "node"
    gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
    print(
        "Estimated memory needed for params, optim states and gradients for a:\n"
        f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
        f"SW: Model with {int(total_params/1e6)}M total params.")
    print("  per CPU  |  per GPU |   Options")
    for cpu_offload in [True, False]:
        cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(
            total_params=total_params,
            num_gpus_per_node=num_gpus_per_node,
            num_nodes=num_nodes,
            cpu_offload=cpu_offload,
            additional_buffer_factor=additional_buffer_factor
        )

        options_str = format_options(cpu_offload=cpu_offload)
        print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}")