partition_parameters.py 55.0 KB
Newer Older
J
Jeff Rasley 已提交
1 2 3 4 5
"""
"Copyright 2020 The Microsoft DeepSpeed Team.
Licensed under the MIT license.
"""

S
Samyam Rajbhandari 已提交
6 7 8 9 10 11 12 13
import os
import time
import types
from enum import Enum
import functools
import itertools

import torch
14 15
from torch.distributed.distributed_c10d import _get_global_rank, group
import torch.distributed as dist
S
Samyam Rajbhandari 已提交
16

J
Jeff Rasley 已提交
17 18 19 20
from .linear import LinearModuleForZeroStage3, LinearFunctionForZeroStage3
from .offload_constants import *

from ..utils import see_memory_usage
S
Stas Bekman 已提交
21
from deepspeed.utils import log_dist, init_distributed, logger
S
Stas Bekman 已提交
22
from deepspeed.utils.debug import debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name, debug_param2name, debug_param2name_id_shape_status, printflock, log_rank_file
S
Samyam Rajbhandari 已提交
23

J
Jeff Rasley 已提交
24 25 26
from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus
from ..config import DeepSpeedConfig

S
Samyam Rajbhandari 已提交
27
param_count = 0
28
partitioned_param_data_shape = [1]
S
Samyam Rajbhandari 已提交
29 30 31


def print_rank_0(message, debug=False, force=False):
S
Stas Bekman 已提交
32 33
    rank = torch.distributed.get_rank()
    if rank == 0 and (debug or force):
S
Samyam Rajbhandari 已提交
34
        print(message)
S
Stas Bekman 已提交
35 36 37 38 39
    # other variations
    # - print for all ranks w/o interleaving
    # printflock(f"[{rank}] {message}")
    # - print to log file per rank
    # log_rank_file(rank, message)
S
Samyam Rajbhandari 已提交
40 41 42


def is_zero_param(parameter):
J
Jeff Rasley 已提交
43 44
    if not torch.is_tensor(parameter):
        return False
S
Samyam Rajbhandari 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    return hasattr(parameter, 'ds_id')


def _init_external_params(module):
    if not hasattr(module, '_external_params'):
        module._external_params = {}

        def external_parameters(self):
            return self._external_params.items()

        def all_parameters(self):
            return itertools.chain(self.named_parameters(self,
                                                         recurse=False),
                                   external_parameters(self))

        module.ds_external_parameters = types.MethodType(external_parameters, module)
        module.all_parameters = types.MethodType(all_parameters, module)


def register_external_parameter(module, parameter):
    """Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in
    the forward and backward passes of ``module``.

    This is used when a parameter is accessed outside of its owning module's
    ``forward()``. DeepSpeed must know to collect it from its partitioned
    state and when to release the memory.

    .. note::
        This is only applicable to training with ZeRO stage 3.

    Args:
        module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass.
        parameter (``torch.nn.Parameter``): The parameter to register.

    Raises:
        RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.


    Examples
    ========

    #. Register a weight that is used in another module's forward pass (line 6).
       Parameter ``layer1.weight`` is used by ``layer2`` (line 11).

        .. code-block:: python
            :linenos:
            :emphasize-lines: 6,11

            class ModuleZ3(torch.nn.Module):
                def __init__(self, *args):
                    super().__init__(self, *args)
                    self.layer1 = SomeLayer()
                    self.layer2 = OtherLayer()
                    deepspeed.zero.register_external_parameter(self, self.layer1.weight)

                def forward(self, input):
                    x = self.layer1(input)
                    # self.layer1.weight is required by self.layer2.forward
                    y = self.layer2(x, self.layer1.weight)
                    return y
    """
    if not isinstance(parameter, torch.nn.Parameter):
        raise RuntimeError('Parameter is not a torch.nn.Parameter')

    if not hasattr(module, '_external_params'):
        _init_external_params(module)

    key = id(parameter)
    module._external_params[key] = parameter


J
Jeff Rasley 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
def unregister_external_parameter(module, parameter):
    """Reverses the effects of :meth:`register_external_parameter`.

    Args:
        module (``torch.nn.Module``): The module to affect.
        parameter (``torch.nn.Parameter``): The parameter to unregister.

    Raises:
        RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
        RuntimeError: If ``parameter`` is not a registered external parameter of ``module``.
    """
    if not isinstance(parameter, torch.nn.Parameter):
        raise RuntimeError('Parameter is not a torch.nn.Parameter')

    if not hasattr(module,
                   '_external_params') or id(parameter) not in module._external_params:
        raise RuntimeError('Parameter is not a registered external parameter of module.')

    key = id(parameter)
    del module._external_params[key]


S
Samyam Rajbhandari 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
class ZeroParamType(Enum):

    # same as regular pytorch parameters
    NORMAL = 1

    # parameters are partitioned across data parallel process
    PARTITIONED = 2

    # the parameter is held with a unique process rank
    # and is not available on all other process
    REMOTE = 3


class ZeroParamStatus(Enum):
    # parameters are fully present and ready for use on all processes
    AVAILABLE = 1

    # parameters are either partitioned or remote in some or all process
    NOT_AVAILABLE = 2

    # parameters are being gathered.
    INFLIGHT = 3


_orig_torch_empty = torch.empty


165
def empty_cuda_tensor_half(*size, **kwargs):
S
Samyam Rajbhandari 已提交
166 167 168 169 170 171 172 173 174
    if not 'device' in kwargs.keys():
        kwargs['device'] = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
    tensor = _orig_torch_empty(*size, **kwargs)
    if tensor.is_floating_point():
        return tensor.half()
    else:
        return tensor


175
def new_cuda_tensor_half(cls, *args):
S
Samyam Rajbhandari 已提交
176 177 178 179 180 181 182 183
    device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
    tensor = torch.ones((1, 1), device=device).new_empty(*args).half()
    if tensor.is_floating_point():
        return tensor.half()
    else:
        return tensor


184 185 186 187 188 189 190 191 192 193 194 195 196
def empty_cuda_tensor(*size, **kwargs):
    if not 'device' in kwargs.keys():
        kwargs['device'] = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
    tensor = _orig_torch_empty(*size, **kwargs)
    return tensor


def new_cuda_tensor(cls, *args):
    device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
    tensor = torch.ones((1, 1), device=device).new_empty(*args)
    return tensor


197 198 199 200 201 202 203 204 205 206 207 208 209 210
# https://stackoverflow.com/a/63851681/9201239
def get_all_subclasses(cls):
    subclass_list = []

    def recurse(cl):
        for subclass in cl.__subclasses__():
            subclass_list.append(subclass)
            recurse(subclass)

    recurse(cls)

    return set(subclass_list)


S
Samyam Rajbhandari 已提交
211 212 213 214 215 216 217 218
reuse_buffers = False
temp_contiguous_tensor = None
empty_buffers = {}


# Inserts _post_init_method at the end of init method
# for all sub classes of torch.nn.Module
class InsertPostInitMethodToModuleSubClasses(object):
O
Olatunji Ruwase 已提交
219 220 221 222 223
    def __init__(self,
                 enabled=True,
                 mem_efficient_linear=True,
                 ds_config=None,
                 dtype=None):
S
Samyam Rajbhandari 已提交
224 225
        self.mem_efficient_linear = mem_efficient_linear
        self.enabled = enabled
O
Olatunji Ruwase 已提交
226
        self._set_dtype(ds_config, dtype)
227
        assert self.dtype in [torch.half, torch.float], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.float]"
S
Samyam Rajbhandari 已提交
228 229 230 231 232 233 234 235

    def __enter__(self):
        if not self.enabled:
            return

        def partition_after(f):
            @functools.wraps(f)
            def wrapper(module, *args, **kwargs):
236 237 238 239 240 241 242 243 244

                # important logic: We want to run post_init only after child's __init__ is
                # completed, and do nothing after __init__ of any of its parents and grandparents in
                # the inheritance ancestry. This way the partitioning will need to happen only once
                # when the whole object is ready to be partitioned and not before. This is because
                # often the child module will need to tweak the weights - for example running a
                # custom weights init function. So if a parent created the weights param, the child
                # won't need to gather it in order to tweak it

S
Samyam Rajbhandari 已提交
245 246
                print_rank_0(f'Before initializing {module.__class__.__name__}',
                             force=False)
247 248 249 250 251 252 253

                is_child_module = False
                if not hasattr(module, "_ds_child_entered"):
                    # child's __init__ was called, since parents all see the same object they can now skip post_init
                    is_child_module = True
                    setattr(module, "_ds_child_entered", True)

S
Samyam Rajbhandari 已提交
254
                f(module, *args, **kwargs)
255 256 257 258 259 260 261 262 263

                if is_child_module:
                    # child's __init__ is done, now we can run a single post_init on the child object
                    delattr(module, "_ds_child_entered")

                    print_rank_0(f'Running post_init for {module.__class__.__name__}',
                                 force=False)
                    self._post_init_method(module)

S
Samyam Rajbhandari 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276
                print_rank_0(
                    f'After initializing followed by post init for {module.__class__.__name__}',
                    force=False)

            return wrapper

        def _enable_class(cls):
            cls._old_init = cls.__init__
            cls.__init__ = partition_after(cls.__init__)

        def _init_subclass(cls, **kwargs):
            cls.__init__ = partition_after(cls.__init__)

277 278 279
        # Replace .__init__() for all existing subclasses of torch.nn.Module recursively
        for subclass in get_all_subclasses(torch.nn.modules.module.Module):
            # print(f"subclass={subclass.__module__}.{subclass.__qualname__}")
S
Samyam Rajbhandari 已提交
280 281 282 283 284 285 286 287
            _enable_class(subclass)

        # holding on to the current __init__subclass__ for exit
        torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__
        torch.Tensor.__old_new__ = torch.Tensor.__new__

        # Replace .__init__() for future subclasses of torch.nn.Module
        torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass)
288 289 290 291 292 293
        if self.dtype == torch.half:
            torch.Tensor.__new__ = new_cuda_tensor_half
            torch.empty = empty_cuda_tensor_half
        else:
            torch.Tensor.__new__ = new_cuda_tensor
            torch.empty = empty_cuda_tensor
S
Samyam Rajbhandari 已提交
294 295

        if self.mem_efficient_linear:
296
            print_rank_0(
297
                "nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.",
S
Stas Bekman 已提交
298
                force=False)
S
Samyam Rajbhandari 已提交
299 300 301 302 303 304 305 306 307 308 309
            self.linear_bk = torch.nn.functional.linear
            torch.nn.functional.linear = LinearFunctionForZeroStage3.apply

    def __exit__(self, exc_type, exc_value, traceback):
        if not self.enabled:
            return

        def _disable_class(cls):
            cls.__init__ = cls._old_init

        # Replace .__init__() for all existing subclasses of torch.nn.Module
310
        for subclass in get_all_subclasses(torch.nn.modules.module.Module):
S
Samyam Rajbhandari 已提交
311 312 313 314 315 316 317 318
            _disable_class(subclass)

        # Replace .__init__() for future subclasses of torch.nn.Module
        torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass

        torch.Tensor.__new__ = torch.Tensor.__old_new__
        torch.empty = _orig_torch_empty

319 320
        # un doing it here will undo it during training
        # if self.mem_efficient_linear:
321
        #    torch.nn.functional.linear = self.linear_bk
J
Jeff Rasley 已提交
322 323
        #        if self.mem_efficient_linear:
        #            torch.nn.functional.linear = self.linear_bk
S
Samyam Rajbhandari 已提交
324 325 326 327 328 329 330 331 332

        # Now that we cleaned up the metaclass injection, raise the exception.
        if exc_type is not None:
            return False

    # To be implemented by inheriting classes
    def _post_init_method(self, module):
        pass

333 334
    def _set_dtype(self, ds_config, dtype):
        if ds_config is not None and dtype is None:
O
Olatunji Ruwase 已提交
335
            self.dtype = torch.half if ds_config.fp16_enabled else torch.float
336 337 338 339 340
        elif dtype is None:
            self.dtype = torch.half
        else:
            self.dtype = dtype

S
Samyam Rajbhandari 已提交
341 342 343 344 345 346 347 348 349 350 351

# Replaces all parameters in module with Scattered Parameters
class Init(InsertPostInitMethodToModuleSubClasses):
    param_id = 0

    def __init__(self,
                 module=None,
                 data_parallel_group=None,
                 mem_efficient_linear=True,
                 remote_device=None,
                 pin_memory=False,
O
Olatunji Ruwase 已提交
352
                 config_dict_or_path=None,
353
                 config=None,
354
                 enabled=True,
O
Olatunji Ruwase 已提交
355 356
                 dtype=None,
                 mpu=None):
S
Samyam Rajbhandari 已提交
357 358 359 360 361 362 363 364 365 366 367 368
        """A context to enable massive model construction for training with
        ZeRO-3. Models are automatically partitioned (or, sharded) across the
        system and converted to half precision.

        Args:
            module (``torch.nn.Module``, optional): If provided, partition the model as
                if it was constructed in the context.
            data_parallel_group (``torch.distributed`` process group, optional):
                The group of processes to partition among. Defaults to all processes.
            mem_efficient_linear (bool, optional): Replace
                torch.nn.functional.linear with an implementation that allows
                DeepSpeed to partition parameters. Defaults to ``True``.
J
Jeff Rasley 已提交
369 370 371
            remote_device (string, optional): The initial device to store model
                weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
                memory. The model may still be moved to GPU based on the
J
Jeff Rasley 已提交
372 373
                offload settings for training. Defaults to param offload device if a config is
                defined, otherwise GPU.
S
Samyam Rajbhandari 已提交
374 375
            pin_memory (bool, optional): Potentially increase performance by
                using pinned memory for model weights. ``remote_device`` must be
J
Jeff Rasley 已提交
376
                ``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
377
            config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
J
Jeff Rasley 已提交
378
                for swapping fp16 params to NVMe.
379
            config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
S
Samyam Rajbhandari 已提交
380 381
            enabled (bool, optional): If ``False``, this context has no
                effect. Defaults to ``True``.
S
Stas Bekman 已提交
382 383
            dtype (``dtype``, optional): Can be used to change the data type of the parameters.
                Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
A
Alex Hedges 已提交
384
            mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
S
Samyam Rajbhandari 已提交
385 386 387 388 389

        This context accelerates model initialization and enables models that
        are too large to allocate in their entirety in CPU memory. It has the
        following effects:

J
Jeff Rasley 已提交
390
        #. allocates tensors to either GPU or CPU memory or NVMe
S
Samyam Rajbhandari 已提交
391 392 393 394 395 396
        #. converts floating point tensors to half precision
        #. immediately partitions tensors among the group of data-parallel devices
        #. (*optional*) replaces ``torch.nn.functional.linear`` with a more
           memory-efficient implementation

        These modifications allow for models that exceed the size of local CPU/GPU
J
Jeff Rasley 已提交
397 398
        memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU
        or GPU memory or NVMe) across all nodes. Consider initializing a model with one
S
Samyam Rajbhandari 已提交
399 400 401 402 403 404 405 406 407 408
        trillion parameters, whose weights occupy two terabytes (TB) in half
        precision. The initial CPU allocation in full precision requires 4TB of
        memory *per process*, and so a system with 8 GPUs per node would need 32TB of
        CPU memory due to data-parallel redundancies. Instead, by immediately
        partitioning tensors we remove the redundancies. The result is that
        regardless of the number of GPUs, we still only require the original 4TB. This
        allows for a linear increase in model size with the aggregate system memory.
        For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion
        parameter model with 4 nodes and 32 GPUs.

S
Stas Bekman 已提交
409 410 411
        Important: If the fp16 weights of the model can't fit onto a single GPU memory
        this feature must be used.

S
Samyam Rajbhandari 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
        .. note::
            Initializes ``torch.distributed`` if it has not already been done so.
            See :meth:`deepseed.init_distributed` for more information.

        .. note::
            Can also be used as a decorator:

            .. code-block:: python

                @deepspeed.zero.Init()
                def get_model():
                    return MyLargeModel()

        .. note::
            Only applicable to training with ZeRO-3.

        Examples
        --------

        #. Allocate a model and partition it among all processes:

            .. code-block:: python

                with deepspeed.zero.Init():
                    model = MyLargeModel()


        #. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:

            .. code-block:: python

                with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
                                         remote_device="cpu",
                                         pin_memory=True):
                    model = MyLargeModel()


        #. Partition an already-allocated model in CPU memory:

            .. code-block:: python

                model = deepspeed.zero.Init(module=model)
        """
S
Stas Bekman 已提交
455 456 457 458 459 460
        if config is not None:
            config_dict_or_path = config
            logger.warning(
                f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.'
            )

O
Olatunji Ruwase 已提交
461 462
        _ds_config = DeepSpeedConfig(config_dict_or_path,
                                     mpu) if config_dict_or_path is not None else None
463 464
        super().__init__(enabled=enabled,
                         mem_efficient_linear=mem_efficient_linear,
O
Olatunji Ruwase 已提交
465
                         ds_config=_ds_config,
466
                         dtype=dtype)
S
Samyam Rajbhandari 已提交
467 468 469 470 471 472 473 474 475 476 477
        if not torch.distributed.is_initialized():
            init_distributed()
            assert torch.distributed.is_initialized(), "Parameters cannot be scattered without initializing torch.distributed"
        if data_parallel_group is None:
            self.ds_process_group = torch.distributed.group.WORLD
        else:
            self.ds_process_group = data_parallel_group

        self.rank = torch.distributed.get_rank(group=self.ds_process_group)
        self.world_size = torch.distributed.get_world_size(group=self.ds_process_group)

478 479
        # Local device is the device where the parameters are consumed
        # It is the device where parameters are fully instantiated using allgather
S
Samyam Rajbhandari 已提交
480 481
        self.local_device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))

J
Jeff Rasley 已提交
482 483 484 485
        if _ds_config is not None and _ds_config.zero_config.offload_param is not None:
            remote_device = _ds_config.zero_config.offload_param[OFFLOAD_PARAM_DEVICE]
            pin_memory = _ds_config.zero_config.offload_param[OFFLOAD_PARAM_PIN_MEMORY]

O
Olatunji Ruwase 已提交
486
        self._validate_remote_device(remote_device, _ds_config)
J
Jeff Rasley 已提交
487

488 489
        # Remote device is the device where parameter partiitons are stored
        # It can be same as local_device or it could be CPU or NVMe.
S
Samyam Rajbhandari 已提交
490
        self.remote_device = self.local_device if remote_device is None else remote_device
A
Alex Hedges 已提交
491 492
        self.pin_memory = pin_memory if (self.remote_device
                                         == OFFLOAD_CPU_DEVICE) else False
J
Jeff Rasley 已提交
493 494 495 496 497 498

        # Enable fp16 param swapping to NVMe
        if self.remote_device == OFFLOAD_NVME_DEVICE:
            self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config)
        else:
            self.param_swapper = None
S
Samyam Rajbhandari 已提交
499 500 501 502

        # If we are provided an already-allocated module to prepare.
        if module is not None:
            assert isinstance(module, torch.nn.Module)
503 504
            self._convert_to_zero_parameters(module.parameters(recurse=True))

505 506 507 508 509 510 511 512
        self.use_all_gather_base = False
        try:
            from torch.distributed.distributed_c10d import _all_gather_base as all_gather
            self.use_all_gather_base = True
        except:
            logger.info(
                f"_all_gather_base API is not available in torch {torch.__version__}")

513 514 515 516 517 518
    def _convert_to_zero_parameters(self, param_list):
        for param in param_list:
            if is_zero_param(param):
                continue
            self._convert_to_deepspeed_param(param)
            param.partition()
S
Samyam Rajbhandari 已提交
519

520
    def _validate_remote_device(self, remote_device, ds_config):
J
Jeff Rasley 已提交
521 522
        if ds_config is not None:
            if remote_device in [None, OFFLOAD_CPU_DEVICE]:
O
Olatunji Ruwase 已提交
523 524
                if ds_config.zero_config.offload_param is not None:
                    offload_param_device = ds_config.zero_config.offload_param[
J
Jeff Rasley 已提交
525 526
                        OFFLOAD_PARAM_DEVICE]
                    assert offload_param_device != OFFLOAD_NVME_DEVICE, \
527
                        f"{OFFLOAD_PARAM_DEVICE} in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}."
J
Jeff Rasley 已提交
528 529

            if remote_device == OFFLOAD_NVME_DEVICE:
O
Olatunji Ruwase 已提交
530
                assert ds_config.zero_config.offload_param is not None, \
J
Jeff Rasley 已提交
531 532
                f'{OFFLOAD_PARAM} must be defined in DeepSpeed Config if remote device is {OFFLOAD_NVME_DEVICE}.'

O
Olatunji Ruwase 已提交
533
                assert ds_config.zero_config.offload_param[OFFLOAD_PARAM_NVME_PATH] is not None, \
J
Jeff Rasley 已提交
534 535
                f'{OFFLOAD_PARAM_NVME_PATH} in DeepSpeed Config cannot be None if remote device is {OFFLOAD_NVME_DEVICE}'

S
Samyam Rajbhandari 已提交
536 537 538 539 540 541 542 543
    def _post_init_method(self, module):
        #see_memory_usage(f"Before converting parmas in {module.__class__.__name__}", force=False)
        print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False)
        see_memory_usage(
            f"Before converting and partitioning parmas in {module.__class__.__name__}",
            force=False)

        global param_count
S
Stas Bekman 已提交
544
        for param in module.parameters(recurse=False):
S
Samyam Rajbhandari 已提交
545 546 547 548
            param_count += param.numel()
            if not is_zero_param(param):
                self._convert_to_deepspeed_param(param)
                print_rank_0(
S
Stas Bekman 已提交
549
                    f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}"
S
Samyam Rajbhandari 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
                )
                param.partition()
        see_memory_usage(
            f"Param count {param_count}. After converting and partitioning parmas in {module.__class__.__name__}",
            force=False)

    def _convert_to_deepspeed_param(self, param):

        # Partitioned, Normal, Remote
        param.ds_param_type = ZeroParamType.PARTITIONED

        # Replicated vs Partitioned vs Inflight
        param.ds_status = ZeroParamStatus.AVAILABLE

        # Stores the shape of the original tensor
        param.ds_shape = param.shape

567
        # Stores the number of elements in the original parameter without padding
S
Samyam Rajbhandari 已提交
568 569
        param.ds_numel = param.numel()

570
        # Stores the partitioned copy of the tensor
S
Samyam Rajbhandari 已提交
571 572 573 574 575 576 577 578 579 580 581 582
        param.ds_tensor = None

        # Keeps track of how many active sub-modules need this param at any given point in time
        param.ds_active_sub_modules = 0

        # If this flag is true, then the parameters are replicated throughput training
        # And only partitioned before the step
        param.ds_persist = False

        # The group that the parameter is scattered across.
        param.ds_process_group = self.ds_process_group

J
Jeff Rasley 已提交
583 584 585 586
        # This is set to the Async Param swapper if remote device is nvme
        # else this is set to None
        param.nvme_swapper = self.param_swapper

S
Samyam Rajbhandari 已提交
587 588 589 590 591 592 593 594 595 596 597 598 599
        # DeepSped Param ID
        param.ds_id = Init.param_id
        Init.param_id += 1

        def all_gather(param_list=None, async_op=False, hierarchy=0):
            cls = param
            if param_list is None:
                param_list = [cls]
            return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy)

        def partition(param_list=None, hierarchy=0, has_been_updated=False):
            cls = param
            print_rank_0(
S
Stas Bekman 已提交
600
                f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}"
S
Samyam Rajbhandari 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
            )
            if param_list is None:
                param_list = [cls]
            self._partition(param_list, has_been_updated=has_been_updated)

        def reduce_gradients_at_owner(param_list=None, hierarchy=0):
            cls = param
            if param_list is None:
                param_list = [cls]
            print_rank_0(
                f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner"
            )
            self._reduce_scatter_gradients(param_list)

        def partition_gradients(param_list=None,
                                partition_buffers=None,
                                hierarchy=0,
                                accumulate=False):
            cls = param
            print_rank_0(
S
Stas Bekman 已提交
621 622
                f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}"
            )
S
Samyam Rajbhandari 已提交
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
            if param_list is None:
                param_list = [cls]
                if isinstance(partition_buffers, torch.Tensor):
                    partition_buffers = [partition_buffers]

            self._partition_gradients(param_list,
                                      partition_buffers=partition_buffers,
                                      accumulate=accumulate)

        def aligned_size():
            return self._aligned_size(param)

        def padding_size():
            return self._padding_size(param)

J
Jeff Rasley 已提交
638 639 640
        def partitioned_size():
            return self._partitioned_size(param)

641 642 643
        def convert_to_zero_parameters(param_list):
            self._convert_to_zero_parameters(param_list)

S
Samyam Rajbhandari 已提交
644 645 646 647 648 649 650 651 652 653 654
        # Collectives for gathering and partitioning parameters
        param.all_gather = all_gather
        param.partition = partition

        # Collective for averaging gradients
        param.reduce_gradients_at_owner = reduce_gradients_at_owner
        param.partition_gradients = partition_gradients

        # Partitioning size utilities
        param.aligned_size = aligned_size
        param.padding_size = padding_size
J
Jeff Rasley 已提交
655
        param.partitioned_size = partitioned_size
S
Samyam Rajbhandari 已提交
656

657 658
        param.convert_to_zero_parameters = convert_to_zero_parameters

S
Samyam Rajbhandari 已提交
659 660 661 662 663 664 665
    def _aligned_size(self, param):
        return param.ds_numel + self._padding_size(param)

    def _padding_size(self, param):
        remainder = param.ds_numel % self.world_size
        return (self.world_size - remainder) if remainder else 0

J
Jeff Rasley 已提交
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
    def _partitioned_size(self, param):
        return param.ds_tensor.ds_numel

    def _ensure_availability_of_partitioned_params(self, params):
        swap_in_list = []
        swap_in_flight = []
        for param in params:
            if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
                assert param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
                swap_in_list.append(param)
            if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT:
                assert param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
                swap_in_flight.append(param)
        if len(swap_in_list) > 0:
            swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False)
        elif len(swap_in_flight) > 0:
            swap_in_flight[0].nvme_swapper.synchronize_reads()

S
Samyam Rajbhandari 已提交
684
    def _all_gather(self, param_list, async_op=False, hierarchy=None):
J
Jeff Rasley 已提交
685

686
        # fetches from nvme if the partition is not available and in nvme
J
Jeff Rasley 已提交
687 688
        self._ensure_availability_of_partitioned_params(param_list)

S
Samyam Rajbhandari 已提交
689 690 691 692 693 694 695 696 697 698 699 700 701 702
        handles = []
        all_gather_list = []
        for param in param_list:
            if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
                if async_op:
                    handle = self._allgather_param(param,
                                                   async_op=async_op,
                                                   hierarchy=hierarchy)
                    param.ds_status = ZeroParamStatus.INFLIGHT  # if async_op else ZeroParamStatus.AVAILABLE
                    handles.append(handle)
                else:
                    all_gather_list.append(param)

        if not async_op:
703 704 705
            # ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy)
            ret_value = self._allgather_params_coalesced(all_gather_list, hierarchy)

S
Samyam Rajbhandari 已提交
706 707 708 709 710 711 712 713 714
            for param in all_gather_list:
                param.ds_status = ZeroParamStatus.AVAILABLE
            return ret_value

        return handles

    def _partition(self, param_list, force=False, has_been_updated=False):
        for param in param_list:
            #print_rank_0(f"Before Partitioning Param {param.ds_id}")
715
            # self._param_status(param)
S
Samyam Rajbhandari 已提交
716 717
            self._partition_param(param, has_been_updated=has_been_updated)
            param.ds_status = ZeroParamStatus.NOT_AVAILABLE
718
            # if param.ds_tensor is not None:
S
Samyam Rajbhandari 已提交
719 720 721 722 723
            #    assert id(param.data) == id(param.ds_tensor.data), \
            #    "After the parameters are initially partitioned, make sure we are not recreating the partition."
            #print_rank_0(f"After Partitioning Param {param.ds_id}")
            # self._param_status(param)

J
Jeff Rasley 已提交
724
    def _partition_param(self, param, buffer=None, has_been_updated=False):
725
        assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
J
Jeff Rasley 已提交
726

S
Samyam Rajbhandari 已提交
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
        global reuse_buffers
        #print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}")
        if param.ds_status is ZeroParamStatus.AVAILABLE:
            print_rank_0(
                f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}",
                force=False)
            # if reuse_buffers and False:
            #     numel = buffer.numel()
            #     buffer = param.data.view(-1)
            #     print_rank_0(
            #         "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers",
            #         force=False)
            #     if numel in empty_buffers:
            #         empty_buffers[numel].append(buffer)

742
            # if torch.distributed.get_rank():
S
Samyam Rajbhandari 已提交
743 744 745 746 747
            #    print(f"Releasing {param.data.numel()}")
            if param.ds_tensor is not None and not has_been_updated:

                #param.data = param.ds_tensor.data

J
Jeff Rasley 已提交
748 749 750
                see_memory_usage(
                    f'Before partitioning param {param.ds_id} {param.shape}',
                    force=False)
751
                # param.data does not store anything meaningful in partitioned state
752 753
                param.data = torch.empty(1, dtype=self.dtype, device=param.device)

J
Jeff Rasley 已提交
754 755 756 757 758 759 760 761 762
                see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}',
                                 force=False)

                if param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE:
                    print_rank_0(
                        f"Param {param.ds_id} partition released since it exists in nvme",
                        force=False)
                    param.nvme_swapper.remove_partition_and_release_buffers([param])

S
Samyam Rajbhandari 已提交
763 764 765 766 767 768
                return

            tensor_size = self._aligned_size(param)
            partition_size = tensor_size // self.world_size

            if param.ds_tensor is None:
J
Jeff Rasley 已提交
769 770 771 772 773
                final_location = None
                if self.remote_device == OFFLOAD_NVME_DEVICE and self.param_swapper.swappable_tensor(
                        numel=partition_size):
                    final_location = OFFLOAD_NVME_DEVICE
                    buffer = self.param_swapper.get_buffer(param, partition_size)
774
                    partitioned_tensor = torch.empty(1,
J
Jeff Rasley 已提交
775 776 777 778 779 780
                                                     dtype=param.dtype,
                                                     device=buffer.device)
                    partitioned_tensor.data = buffer.data
                    print_rank_0(
                        f"ID {param.ds_id} Initializing partition for the first time for nvme offload."
                    )
S
Samyam Rajbhandari 已提交
781

J
Jeff Rasley 已提交
782
                else:
783
                    partitioned_tensor = torch.empty(
J
Jeff Rasley 已提交
784 785
                        partition_size,
                        dtype=param.dtype,
A
Alex Hedges 已提交
786 787
                        device=OFFLOAD_CPU_DEVICE if self.remote_device
                        == OFFLOAD_NVME_DEVICE else self.remote_device)
J
Jeff Rasley 已提交
788 789 790 791
                    if self.pin_memory:
                        partitioned_tensor = partitioned_tensor.pin_memory()

                partitioned_tensor.requires_grad = False
S
Samyam Rajbhandari 已提交
792
                param.ds_tensor = partitioned_tensor
J
Jeff Rasley 已提交
793 794 795
                param.ds_tensor.ds_numel = partition_size
                param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
                param.ds_tensor.final_location = final_location
S
Samyam Rajbhandari 已提交
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828

            start = partition_size * self.rank
            end = start + partition_size

            one_dim_param = param.contiguous().view(-1)

            if start < param.ds_numel and end <= param.ds_numel:
                src_tensor = one_dim_param.narrow(0, start, partition_size)

                param.ds_tensor.copy_(src_tensor)
                #partitioned_tensor = src_tensor.clone().detach().to(self.remote_device)

            else:
                # partitioned_tensor = torch.zeros(partition_size,
                #                                  dtype=param.dtype,
                #                                  device=self.remote_device )

                if start < param.ds_numel:
                    elements_to_copy = param.ds_numel - start
                    param.ds_tensor.narrow(0,
                                           0,
                                           elements_to_copy).copy_(
                                               one_dim_param.narrow(
                                                   0,
                                                   start,
                                                   elements_to_copy))

            #print(f"Remote device {self.remote_device}")

            #param.ds_tensor = partitioned_tensor

            #param.data = param.ds_tensor.data

829
            # param.data does not store anything meaningful in partitioned state
J
Jeff Rasley 已提交
830 831 832

            see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}',
                             force=False)
833
            param.data = torch.ones(1, dtype=self.dtype).to(param.device)
J
Jeff Rasley 已提交
834 835 836 837 838 839 840 841 842 843
            see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}',
                             force=False)

            if param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE:
                self.param_swapper.swap_out_and_release([param])
                print_rank_0(
                    f"ID {param.ds_id} Offloaded to nvme offload and buffers released.")
                see_memory_usage(
                    f"ID {param.ds_id} Offloaded to nvme offload and buffers released.",
                    force=False)
S
Samyam Rajbhandari 已提交
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860

            print_rank_0(
                f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}"
            )

    def _param_status(self, param):
        if param.ds_tensor is not None:
            print_rank_0(
                f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}"
            )
        else:
            print_rank_0(
                f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}"
            )

    def _allgather_param(self, param, async_op=False, hierarchy=0):

J
Jeff Rasley 已提交
861
        partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
862 863 864 865 866 867

        tensor_size = partition_size * self.world_size
        aligned_param_size = self._aligned_size(param)
        assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}'

        print_rank_0(
S
Stas Bekman 已提交
868
            f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}"
S
Samyam Rajbhandari 已提交
869
        )
J
Jeff Rasley 已提交
870 871

        see_memory_usage(
S
Stas Bekman 已提交
872
            f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ',
J
Jeff Rasley 已提交
873
            force=False)
S
Samyam Rajbhandari 已提交
874 875 876
        flat_tensor = torch.zeros(aligned_param_size,
                                  dtype=param.dtype,
                                  device=param.device).view(-1)
J
Jeff Rasley 已提交
877
        see_memory_usage(
S
Stas Bekman 已提交
878
            f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ',
J
Jeff Rasley 已提交
879
            force=False)
S
Samyam Rajbhandari 已提交
880 881 882 883

        torch.cuda.synchronize()

        print_rank_0(
S
Stas Bekman 已提交
884
            f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}"
S
Samyam Rajbhandari 已提交
885 886 887 888 889 890 891
        )
        #        if not flat_tensor.numel() > 100000:
        #            replicated_tensor = flat_tensor.narrow(0,
        #                                                   0,
        #                                                   param.ds_numel).view(param.ds_shape)
        #            param.data = replicated_tensor.data
        #            return None
892 893 894
        if self.use_all_gather_base:
            # try the _all_gather_base on PyTorch master branch
            handle = dist._all_gather_base(flat_tensor,
895
                                           param.ds_tensor.cuda(),
896 897 898 899 900 901 902 903 904
                                           group=self.ds_process_group,
                                           async_op=async_op)
        else:
            partitions = []
            for i in range(self.world_size):
                partitions.append(
                    flat_tensor.narrow(0,
                                       partition_size * i,
                                       partition_size))
S
Samyam Rajbhandari 已提交
905

906 907
                if i == dist.get_rank(group=self.ds_process_group):
                    partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True)
S
Samyam Rajbhandari 已提交
908

909 910 911 912
            handle = dist.all_gather(partitions,
                                     partitions[self.rank],
                                     group=self.ds_process_group,
                                     async_op=async_op)
S
Samyam Rajbhandari 已提交
913 914 915 916 917

        replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape)
        param.data = replicated_tensor.data
        return handle

918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
    def _allgather_params_coalesced(self, param_list, hierarchy=0):
        """ blocking call
        avoid explicit memory copy in _allgather_params
        """
        if len(param_list) == 0:
            return
        # collect local tensors and partition sizes
        partition_sizes = []
        local_tensors = []
        for param in param_list:
            partition_sizes.append(param.ds_tensor.ds_numel)
            local_tensors.append(param.ds_tensor.cuda())

        # allocate memory for allgather params
        allgather_params = []
        for psize in partition_sizes:
            tensor_size = psize * self.world_size
            flat_tensor = torch.empty(tensor_size,
                                      dtype=param_list[0].dtype,
                                      device=self.local_device).view(-1)
            flat_tensor.requires_grad = False
            allgather_params.append(flat_tensor)

        # launch
        launch_handles = []
        # backend = get_backend(self.ds_process_group)
        # with _batch_p2p_manager(backend):
        for param_idx, param in enumerate(param_list):
            input_tensor = local_tensors[param_idx].view(-1)

            if self.use_all_gather_base:
                # try the _all_gather_base from Pytorch master
                h = dist._all_gather_base(allgather_params[param_idx],
                                          input_tensor,
                                          group=self.ds_process_group,
                                          async_op=True)
            else:
                output_list = []
                for i in range(self.world_size):
                    psize = partition_sizes[param_idx]
                    partition = allgather_params[param_idx].narrow(0, i * psize, psize)
                    output_list.append(partition)
                    if not partition.is_cuda:
                        logger.warning(
                            f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}'
                        )

                # back to old all_gather function signature
                h = dist.all_gather(output_list,
                                    input_tensor,
                                    group=self.ds_process_group,
                                    async_op=True)
            launch_handles.append(h)

        # Wait ensures the operation is enqueued, but not necessarily complete.
        launch_handles[-1].wait()

        # assign to param.data (not copy)
        for i, param in enumerate(param_list):
            gathered_tensor = allgather_params[i]
            param.data = gathered_tensor.narrow(0,
                                                0,
                                                param.ds_numel).view(param.ds_shape).data

        # guarantee the communication to be completed
        torch.cuda.synchronize()

        return None

S
Samyam Rajbhandari 已提交
987 988 989 990
    def _allgather_params(self, param_list, hierarchy=0):
        if len(param_list) == 0:
            return

J
Jeff Rasley 已提交
991
        partition_size = sum([param.ds_tensor.ds_numel for param in param_list])
S
Samyam Rajbhandari 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006

        tensor_size = partition_size * self.world_size
        flat_tensor = torch.empty(tensor_size,
                                  dtype=param_list[0].dtype,
                                  device=self.local_device)
        flat_tensor.requres_grad = False
        partitions = []
        for i in range(self.world_size):
            start = partition_size * i

            partitions.append(flat_tensor.narrow(0, start, partition_size))

            if i == self.rank:
                offset = 0
                for param in param_list:
J
Jeff Rasley 已提交
1007
                    param_numel = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

                    partitions[i].narrow(0,
                                         offset,
                                         param_numel).copy_(param.ds_tensor.data)

                    offset += param_numel

        torch.distributed.all_gather(partitions,
                                     partitions[self.rank],
                                     group=self.ds_process_group,
                                     async_op=False)
        param_offset = 0

        for param in param_list:
J
Jeff Rasley 已提交
1022
            param_partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
            param_size = param.ds_numel
            replicated_tensor = torch.empty(param.ds_shape,
                                            dtype=param.dtype,
                                            device=self.local_device)

            for i in range(self.world_size):

                start = i * partition_size

                param_start = i * param_partition_size

                if param_start < param_size:
                    numel_to_copy = min(param_size - param_start, param_partition_size)

                    part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy)

                    replicated_tensor.view(-1).narrow(0,
                                                      param_start,
                                                      numel_to_copy).copy_(part_to_copy)
            #param_offset += param.data.numel()
J
Jeff Rasley 已提交
1043
            param_offset += param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066

            param.data = replicated_tensor.data

        return None

    def _reduce_scatter_gradients(self, param_list):
        #print_rank_0([param.grad for param in param_list])
        #assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered"

        handles_and_reduced_partitions = []
        for param in param_list:
            assert param.grad.numel(
            ) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params"

            handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param))

        for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions):
            if handle is not None:
                handle.wait()

            # some ranks may have partitions that are padded to go beyond the grad size.
            # For these ranks the output of reduce scatter is a separate buffer and needs
            # to be copied in
J
Jeff Rasley 已提交
1067
            partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
            start = self.rank * partition_size
            end = start + partition_size
            #print_rank_0("REduce scatter was executed for praam {param.ds_id}")
            if start < param.ds_numel and end > param.ds_numel:
                elements = param.ds_numel - start
                param.grad.view(-1).narrow(0,
                                           start,
                                           elements).copy_(
                                               reduced_partition.narrow(0,
                                                                        0,
                                                                        elements))

    def _reduce_scatter_gradient(self, param):

J
Jeff Rasley 已提交
1082
        partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
        #output = torch.empty(partition_size, dtype=param.dtype, device=param.device)

        total_size = partition_size * self.world_size
        input_list = []

        for i in range(self.world_size):

            start = i * partition_size
            end = start + partition_size

            #print("before reduce scatter gradients")
            if start < param.ds_numel and end <= param.ds_numel:
                input = param.grad.view(-1).narrow(0, start, partition_size)
            else:
                input = torch.zeros(partition_size,
                                    dtype=param.dtype,
                                    device=param.device)

                if start < param.ds_numel:
                    elements = param.ds_numel - start
                    input.narrow(0,
                                 0,
                                 elements).copy_(
                                     param.grad.view(-1).narrow(0,
                                                                start,
                                                                elements))
            #print("after reduce scatter gradients")
            input_list.append(input)

        rank = torch.distributed.get_rank(group=self.ds_process_group)
        handle = torch.distributed.reduce_scatter(input_list[rank],
                                                  input_list,
                                                  group=self.ds_process_group,
                                                  async_op=True)

        return handle, input_list[rank]

    def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False):
        if partition_buffers is None:
            partition_buffers = [None] * len(param_list)

        for param, partition_buffer in zip(param_list, partition_buffers):
            self._partition_gradient(param,
                                     partition_buffer=partition_buffer,
                                     accumulate=accumulate)

    def _partition_gradient(self, param, partition_buffer=None, accumulate=False):
        #import pdb;pdb.set_trace()
        # param.grad=None
        # param.grad.test()
        print_rank_0(
J
Jeff Rasley 已提交
1134
            f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}"
S
Samyam Rajbhandari 已提交
1135 1136
        )
        see_memory_usage("Before partitioning gradients", force=False)
J
Jeff Rasley 已提交
1137
        partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1138 1139 1140 1141 1142 1143 1144

        if partition_buffer is None:
            assert not accumulate, "No buffer to accumulate to"
            partition_buffer = torch.zeros(partition_size,
                                           dtype=param.dtype,
                                           device=param.device)
        else:
1145 1146
            assert partition_buffer.numel(
            ) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}"
S
Samyam Rajbhandari 已提交
1147 1148 1149 1150 1151

        rank = torch.distributed.get_rank(group=self.ds_process_group)
        start = partition_size * rank
        end = start + partition_size

1152
        dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size)
S
Samyam Rajbhandari 已提交
1153 1154 1155 1156 1157

        #print("before partition gradients")
        if start < param.ds_numel:
            elements = min(param.ds_numel - start, partition_size)

1158
            dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements)
S
Samyam Rajbhandari 已提交
1159 1160 1161 1162 1163 1164
            src_tensor = param.grad.view(-1).narrow(0, start, elements)

            # just copy the grad partition to the buffer
            if not accumulate:
                dest_tensor.copy_(src_tensor)

A
Alex Hedges 已提交
1165
            # if source and destination are on same device,
S
Samyam Rajbhandari 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
            # add to the provided buffer
            elif src_tensor.device == dest_tensor.device:
                dest_tensor.add_(src_tensor)

            # if source and destination are on different device, copy first to src
            # then add and move back to the destination. This seems to run faster
            # when src is gpu and dest is cpu
            # adding directly to cpu is very slow
            else:
                acc_tensor = torch.empty(src_tensor.numel(),
                                         dtype=param.dtype,
                                         device=param.device)

                acc_tensor.copy_(dest_tensor)
                acc_tensor.add_(src_tensor)
                dest_tensor.copy_(acc_tensor)

            # partition_buffer.view(-1).narrow(
            #     0,
            #     0,
            #     elements).copy_(param.grad.view(-1).narrow(0,
            #                                             start,
            #                                             elements))

        #print("after partition gradients")
1191
        param.grad.data = dest_tensor_full_buffer.data
S
Samyam Rajbhandari 已提交
1192 1193 1194 1195
        see_memory_usage("After partitioning gradients", force=False)


class GatheredParameters:
1196 1197 1198
    def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True):
        """A context that collects parameters that were partitioned via a
        :class:`deepspeed.zero.Init` context. The parameters are partitioned
S
Samyam Rajbhandari 已提交
1199 1200 1201
        again upon exit.

        Args:
1202 1203
            params (``torch.nn.Parameter``): A single parameter or a list of parameters to collect.
                It's assumed that all parameters are zero params.
S
Samyam Rajbhandari 已提交
1204
            modifier_rank (int, optional): If specified, this rank's parameter will be
1205 1206
                broadcasted on exit from the context. This argument is required if ``params`` are
                modified, so that all processes have a consistent view of the data. Defaults
S
Samyam Rajbhandari 已提交
1207
                to ``None``.
1208 1209
            fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be
                registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`.
S
Samyam Rajbhandari 已提交
1210 1211
            enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``.

1212 1213 1214
        Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g. ``modifier_rank=0``)
        if you need the GPU memory allocated by gather to be released upon exit from the context manager.

S
Samyam Rajbhandari 已提交
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
        Examples
        ========

        #. Allocate a partitioned module, initialize its weight on rank 0, and update all
           processes.

            .. code-block:: python

                with deepspeed.zero.Init():
                    linear = torch.nn.Linear(1000,1000)

                with deepspeed.zero.GatheredParameters(linear.weight,
                                                       modifier_rank=0):
                    if torch.distributed.get_rank() == 0:
                        linear.weight.zero_()

J
Jeff Rasley 已提交
1231 1232 1233 1234
                with deepspeed.zero.GatheredParameters(linear.weight,
                                                       modifier_rank=0):
                    if torch.distributed.get_rank() == 0:
                        linear.weight.zero_()
S
Samyam Rajbhandari 已提交
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250

        #. Collect a partitioned weight to pass to another module during
           training. The parameter will be registered as an external parameter
           and made available during the backward pass.

            .. code-block:: python
                :emphasize-lines: 6

                def forward(self, input):
                    x = self.layer1(input)

                    # self.layer1.weight is required by self.layer2.forward
                    with deepspeed.zero.GatheredParameters(self.layer1.weight,
                                                           fwd_module=self):
                        y = self.layer2(x, self.layer1.weight)
                    return y
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277


        #. Pretrained model loading

            .. code-block:: python

                with deepspeed.zero.Init():
                    model = MyModel()

                state_dict = torch.load(model_path, map_location="cpu")

                def load(module: nn.Module, prefix=""):
                    # because zero3 puts placeholders in model params, this context
                    # manager gathers (unpartitions) the params of the current layer, then loads from
                    # the state dict and then re-partitions them again
                    with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
                        if torch.distributed.get_rank() == 0:
                            module._load_from_state_dict(state_dict, prefix)

                    for name, child in module._modules.items():
                        if child is not None:
                            load(child, prefix + name + ".")

                load(model, prefix="")

        If this approach is not used, then the full model will first get copied to each GPU. For models
        bigger than the memory of a single gpu this method is required.
S
Samyam Rajbhandari 已提交
1278 1279 1280 1281 1282 1283
        """

        self.enabled = enabled
        if not enabled:
            return

1284 1285 1286 1287 1288
        if not isinstance(params, list):
            params = [params]

        # enable if at least one is zero-param, otherwise a noop
        if not any(is_zero_param(p) for p in params):
S
Samyam Rajbhandari 已提交
1289 1290 1291
            self.enabled = False
            return

1292
        self.params = [p for p in params if hasattr(p, "ds_id")]
S
Samyam Rajbhandari 已提交
1293 1294
        self.src_rank = None
        if modifier_rank is not None:
1295
            if self.params[0].ds_process_group == torch.distributed.group.WORLD:
S
Samyam Rajbhandari 已提交
1296 1297 1298
                self.src_rank = modifier_rank
            else:
                # A group was specified; convert DP rank to global rank
1299
                self.src_rank = _get_global_rank(self.params[0].ds_process_group,
S
Samyam Rajbhandari 已提交
1300 1301 1302 1303
                                                 modifier_rank)
        self.fwd_module = fwd_module
        if self.fwd_module is not None:
            # is a no-op if already registered
1304 1305
            for p in self.params:
                register_external_parameter(self.fwd_module, p)
S
Samyam Rajbhandari 已提交
1306 1307 1308 1309

    def __enter__(self):
        if not self.enabled:
            return
1310
        self.params[0].all_gather(param_list=self.params)
S
Samyam Rajbhandari 已提交
1311 1312 1313 1314

    def __exit__(self, *exc):
        if not self.enabled:
            return
1315 1316 1317 1318 1319
        if self.src_rank is None:
            return

        handles = [
            torch.distributed.broadcast(p,
S
Samyam Rajbhandari 已提交
1320
                                        self.src_rank,
1321 1322 1323 1324 1325 1326
                                        group=p.ds_process_group,
                                        async_op=True) for p in self.params
        ]
        for h in handles:
            h.wait()
        self.params[0].partition(param_list=self.params, has_been_updated=True)