partition_parameters.py 69.2 KB
Newer Older
J
Jeff Rasley 已提交
1 2 3 4 5
"""
"Copyright 2020 The Microsoft DeepSpeed Team.
Licensed under the MIT license.
"""

6
import math
S
Samyam Rajbhandari 已提交
7 8 9
import os
import time
import types
10
from typing import Callable, Iterable
S
Samyam Rajbhandari 已提交
11 12 13
from enum import Enum
import functools
import itertools
14
from typing import List
S
Samyam Rajbhandari 已提交
15 16

import torch
17
from torch import Tensor
18
import torch.distributed as dist
19 20 21
from torch.distributed.distributed_c10d import _get_global_rank, group
from torch.nn import Module
from torch.nn import Parameter
S
Samyam Rajbhandari 已提交
22

J
Jeff Rasley 已提交
23 24 25
from .linear import LinearModuleForZeroStage3, LinearFunctionForZeroStage3
from .offload_constants import *

26
import deepspeed
27 28 29
from ..utils import get_only_unique_item, see_memory_usage
from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks
from deepspeed.utils import init_distributed, instrument_w_nvtx, logger
S
Stas Bekman 已提交
30
from deepspeed.utils.debug import debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name, debug_param2name, debug_param2name_id_shape_status, printflock, log_rank_file
31
from deepspeed.utils.logging import logger
S
Samyam Rajbhandari 已提交
32

J
Jeff Rasley 已提交
33 34
from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus

S
Samyam Rajbhandari 已提交
35
param_count = 0
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
partitioned_param_data_shape = [0]

if hasattr(torch.distributed, "_all_gather_base"):

    def torch_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group):
        try:
            return instrument_w_nvtx(torch.distributed._all_gather_base)(
                output_tensor,
                input_tensor,
                group=group,
                async_op=True,
            )
        except RuntimeError as e:
            raise RuntimeError(
                f"output_tensor: {output_tensor.device}, input_tensor: {input_tensor.device}"
            ) from e
else:
    logger.warning(
        "unable to find torch.distributed._all_gather_base. will fall back to "
        "torch.distributed.all_gather which will result in suboptimal performance. "
        "please consider upgrading your pytorch installation.")

    def torch_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group):
        output_tensors = list(
            torch.chunk(output_tensor,
                        torch.distributed.get_world_size(group)))
        return instrument_w_nvtx(torch.distributed.all_gather)(
            output_tensors,
            input_tensor,
            group=group,
            async_op=True,
        )
S
Samyam Rajbhandari 已提交
68 69 70


def print_rank_0(message, debug=False, force=False):
S
Stas Bekman 已提交
71 72
    rank = torch.distributed.get_rank()
    if rank == 0 and (debug or force):
S
Samyam Rajbhandari 已提交
73
        print(message)
S
Stas Bekman 已提交
74 75 76 77 78
    # other variations
    # - print for all ranks w/o interleaving
    # printflock(f"[{rank}] {message}")
    # - print to log file per rank
    # log_rank_file(rank, message)
S
Samyam Rajbhandari 已提交
79 80


81 82 83 84 85
def debug_rank0(msg: str) -> None:
    if torch.distributed.get_rank() == 0:
        logger.debug(msg)


S
Samyam Rajbhandari 已提交
86
def is_zero_param(parameter):
J
Jeff Rasley 已提交
87 88
    if not torch.is_tensor(parameter):
        return False
S
Samyam Rajbhandari 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    return hasattr(parameter, 'ds_id')


def _init_external_params(module):
    if not hasattr(module, '_external_params'):
        module._external_params = {}

        def external_parameters(self):
            return self._external_params.items()

        def all_parameters(self):
            return itertools.chain(self.named_parameters(self,
                                                         recurse=False),
                                   external_parameters(self))

        module.ds_external_parameters = types.MethodType(external_parameters, module)
        module.all_parameters = types.MethodType(all_parameters, module)


def register_external_parameter(module, parameter):
    """Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in
    the forward and backward passes of ``module``.

    This is used when a parameter is accessed outside of its owning module's
    ``forward()``. DeepSpeed must know to collect it from its partitioned
    state and when to release the memory.

    .. note::
        This is only applicable to training with ZeRO stage 3.

    Args:
        module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass.
        parameter (``torch.nn.Parameter``): The parameter to register.

    Raises:
        RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.


    Examples
    ========

    #. Register a weight that is used in another module's forward pass (line 6).
       Parameter ``layer1.weight`` is used by ``layer2`` (line 11).

        .. code-block:: python
            :linenos:
            :emphasize-lines: 6,11

            class ModuleZ3(torch.nn.Module):
                def __init__(self, *args):
                    super().__init__(self, *args)
                    self.layer1 = SomeLayer()
                    self.layer2 = OtherLayer()
                    deepspeed.zero.register_external_parameter(self, self.layer1.weight)

                def forward(self, input):
                    x = self.layer1(input)
                    # self.layer1.weight is required by self.layer2.forward
                    y = self.layer2(x, self.layer1.weight)
                    return y
    """
    if not isinstance(parameter, torch.nn.Parameter):
        raise RuntimeError('Parameter is not a torch.nn.Parameter')

    if not hasattr(module, '_external_params'):
        _init_external_params(module)

    key = id(parameter)
    module._external_params[key] = parameter


J
Jeff Rasley 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
def unregister_external_parameter(module, parameter):
    """Reverses the effects of :meth:`register_external_parameter`.

    Args:
        module (``torch.nn.Module``): The module to affect.
        parameter (``torch.nn.Parameter``): The parameter to unregister.

    Raises:
        RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``.
        RuntimeError: If ``parameter`` is not a registered external parameter of ``module``.
    """
    if not isinstance(parameter, torch.nn.Parameter):
        raise RuntimeError('Parameter is not a torch.nn.Parameter')

    if not hasattr(module,
                   '_external_params') or id(parameter) not in module._external_params:
        raise RuntimeError('Parameter is not a registered external parameter of module.')

    key = id(parameter)
    del module._external_params[key]


S
Samyam Rajbhandari 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
class ZeroParamType(Enum):

    # same as regular pytorch parameters
    NORMAL = 1

    # parameters are partitioned across data parallel process
    PARTITIONED = 2

    # the parameter is held with a unique process rank
    # and is not available on all other process
    REMOTE = 3


class ZeroParamStatus(Enum):
    # parameters are fully present and ready for use on all processes
    AVAILABLE = 1

    # parameters are either partitioned or remote in some or all process
    NOT_AVAILABLE = 2

    # parameters are being gathered.
    INFLIGHT = 3


_orig_torch_empty = torch.empty
207 208 209
_orig_torch_zeros = torch.zeros
_orig_torch_ones = torch.ones
_orig_torch_full = torch.full
S
Samyam Rajbhandari 已提交
210 211


212 213 214 215 216 217 218 219
def zero_wrapper_for_fp_tensor_constructor(fn: Callable,
                                           target_fp_dtype: torch.dtype) -> Callable:
    def wrapped_fn(*args, **kwargs) -> Tensor:
        if kwargs.get("device", None) is None:
            kwargs['device'] = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
        tensor: Tensor = fn(*args, **kwargs)
        if tensor.is_floating_point():
            tensor = tensor.to(target_fp_dtype)
S
Samyam Rajbhandari 已提交
220 221 222

        return tensor

223 224
    return wrapped_fn

S
Samyam Rajbhandari 已提交
225

226 227 228 229 230 231
def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable:
    def new_tensor(cls, *args) -> Tensor:
        device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
        tensor = _orig_torch_empty(0, device=device).new_empty(*args)
        if tensor.is_floating_point():
            tensor = tensor.to(dtype)
232

233
        return tensor
234

235
    return new_tensor
236 237


238 239 240 241 242 243 244 245 246 247 248 249 250 251
# https://stackoverflow.com/a/63851681/9201239
def get_all_subclasses(cls):
    subclass_list = []

    def recurse(cl):
        for subclass in cl.__subclasses__():
            subclass_list.append(subclass)
            recurse(subclass)

    recurse(cls)

    return set(subclass_list)


252 253 254 255 256 257 258 259 260 261 262 263 264
@instrument_w_nvtx
def free_param(param: Parameter) -> None:
    """Free underlying storage of a parameter."""
    assert not param.ds_active_sub_modules, param.ds_summary()
    if param.data.is_cuda:
        # need to make sure that we don't free the parameter while it is still
        # being used for computation
        param.data.record_stream(torch.cuda.current_stream())
    # param.data doesn't store anything meaningful in partitioned state
    param.data = torch.empty(0, dtype=param.dtype, device=param.device)
    param.ds_status = ZeroParamStatus.NOT_AVAILABLE


S
Samyam Rajbhandari 已提交
265 266 267 268 269 270 271 272
reuse_buffers = False
temp_contiguous_tensor = None
empty_buffers = {}


# Inserts _post_init_method at the end of init method
# for all sub classes of torch.nn.Module
class InsertPostInitMethodToModuleSubClasses(object):
O
Olatunji Ruwase 已提交
273 274 275 276 277
    def __init__(self,
                 enabled=True,
                 mem_efficient_linear=True,
                 ds_config=None,
                 dtype=None):
S
Samyam Rajbhandari 已提交
278 279
        self.mem_efficient_linear = mem_efficient_linear
        self.enabled = enabled
O
Olatunji Ruwase 已提交
280
        self._set_dtype(ds_config, dtype)
281
        assert self.dtype in [torch.half, torch.bfloat16, torch.float], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]"
S
Samyam Rajbhandari 已提交
282 283 284 285 286

    def __enter__(self):
        if not self.enabled:
            return

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
        def apply_with_gather(orig_module_apply_fn: Callable) -> Callable:
            """many models make use of child modules like Linear or Embedding which
            perform their own weight initialization in their __init__ methods,
            but will then have more weight initialization in a parent module's __init__
            method that modifies weights of child modules, which is typically done
            using the Module.apply method.

            since the Init context manager partitions child modules immediately after
            they are initialized, without modifying apply we would entirely skip
            any initialization done by parent modules.

            to get around this issue, we wrap the function passed to Module.apply
            so that the applied function is applied to child modules correctly.
            """
            def get_wrapped_fn_to_apply(fn_to_apply: Callable) -> Callable:
                if hasattr(fn_to_apply, "wrapped"):
                    return fn_to_apply

                @functools.wraps(fn_to_apply)
                def wrapped_fn_to_apply(module_to_apply_fn_to: Module) -> None:
                    """gathers parameters before calling apply function. afterwards
                    parameters are broadcasted to ensure consistency across all ranks
                    then re-partitioned.

                    takes the following steps:
                    1. allgathers parameters for the current module being worked on
                    2. calls the original function
                    3. broadcasts root rank's parameters to the other ranks
                    4. re-partitions the parameters
                    """
                    if not all(
                            is_zero_param(p)
                            for p in module_to_apply_fn_to.parameters(recurse=False)):
                        raise RuntimeError(
                            f"not all parameters for {module_to_apply_fn_to.__class__.__name__}, "
                            f"were zero params, is it possible that the parameters were "
                            f"overwritten after they were initialized? "
                            f"params: {[p for p in module_to_apply_fn_to.parameters(recurse=False)]} "
                        )

                    params_to_apply_fn_to: Iterable[Parameter] = list(
                        sorted(module_to_apply_fn_to.parameters(recurse=False),
                               key=lambda p: p.ds_id))

                    for param in params_to_apply_fn_to:
                        param.all_gather()

                    fn_to_apply(module_to_apply_fn_to)

                    for param in params_to_apply_fn_to:
                        torch.distributed.broadcast(param.data,
                                                    0,
                                                    group=param.ds_process_group)

                    for param in params_to_apply_fn_to:
                        param.partition(has_been_updated=True)

                wrapped_fn_to_apply.wrapped = True

                return wrapped_fn_to_apply

            @functools.wraps(orig_module_apply_fn)
            def wrapped_apply(module: Module, fn_to_apply: Callable) -> None:
                orig_module_apply_fn(module, get_wrapped_fn_to_apply(fn_to_apply))

            return wrapped_apply

S
Samyam Rajbhandari 已提交
354 355 356
        def partition_after(f):
            @functools.wraps(f)
            def wrapper(module, *args, **kwargs):
357 358 359 360 361 362 363 364 365

                # important logic: We want to run post_init only after child's __init__ is
                # completed, and do nothing after __init__ of any of its parents and grandparents in
                # the inheritance ancestry. This way the partitioning will need to happen only once
                # when the whole object is ready to be partitioned and not before. This is because
                # often the child module will need to tweak the weights - for example running a
                # custom weights init function. So if a parent created the weights param, the child
                # won't need to gather it in order to tweak it

S
Samyam Rajbhandari 已提交
366 367
                print_rank_0(f'Before initializing {module.__class__.__name__}',
                             force=False)
368 369 370 371 372 373 374

                is_child_module = False
                if not hasattr(module, "_ds_child_entered"):
                    # child's __init__ was called, since parents all see the same object they can now skip post_init
                    is_child_module = True
                    setattr(module, "_ds_child_entered", True)

S
Samyam Rajbhandari 已提交
375
                f(module, *args, **kwargs)
376 377 378 379 380 381 382 383 384

                if is_child_module:
                    # child's __init__ is done, now we can run a single post_init on the child object
                    delattr(module, "_ds_child_entered")

                    print_rank_0(f'Running post_init for {module.__class__.__name__}',
                                 force=False)
                    self._post_init_method(module)

S
Samyam Rajbhandari 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397
                print_rank_0(
                    f'After initializing followed by post init for {module.__class__.__name__}',
                    force=False)

            return wrapper

        def _enable_class(cls):
            cls._old_init = cls.__init__
            cls.__init__ = partition_after(cls.__init__)

        def _init_subclass(cls, **kwargs):
            cls.__init__ = partition_after(cls.__init__)

398 399 400
        # Replace .__init__() for all existing subclasses of torch.nn.Module recursively
        for subclass in get_all_subclasses(torch.nn.modules.module.Module):
            # print(f"subclass={subclass.__module__}.{subclass.__qualname__}")
S
Samyam Rajbhandari 已提交
401 402
            _enable_class(subclass)

403
        # holding onto some methods so we can put them back the way they were in __exit__
S
Samyam Rajbhandari 已提交
404
        torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__
405
        torch.nn.modules.module.Module._old_apply = torch.nn.modules.module.Module.apply
S
Samyam Rajbhandari 已提交
406 407 408 409
        torch.Tensor.__old_new__ = torch.Tensor.__new__

        # Replace .__init__() for future subclasses of torch.nn.Module
        torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass)
410 411 412 413 414 415 416 417 418 419
        torch.nn.modules.module.Module.apply = apply_with_gather(
            torch.nn.modules.module.Module._old_apply)

        torch.Tensor.__new__ = get_new_tensor_fn_for_dtype(self.dtype)
        torch.empty = zero_wrapper_for_fp_tensor_constructor(_orig_torch_empty,
                                                             self.dtype)
        torch.zeros = zero_wrapper_for_fp_tensor_constructor(_orig_torch_zeros,
                                                             self.dtype)
        torch.ones = zero_wrapper_for_fp_tensor_constructor(_orig_torch_ones, self.dtype)
        torch.full = zero_wrapper_for_fp_tensor_constructor(_orig_torch_full, self.dtype)
S
Samyam Rajbhandari 已提交
420 421

        if self.mem_efficient_linear:
422
            print_rank_0(
423
                "nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.",
S
Stas Bekman 已提交
424
                force=False)
S
Samyam Rajbhandari 已提交
425 426 427 428 429 430 431 432 433 434 435
            self.linear_bk = torch.nn.functional.linear
            torch.nn.functional.linear = LinearFunctionForZeroStage3.apply

    def __exit__(self, exc_type, exc_value, traceback):
        if not self.enabled:
            return

        def _disable_class(cls):
            cls.__init__ = cls._old_init

        # Replace .__init__() for all existing subclasses of torch.nn.Module
436
        for subclass in get_all_subclasses(torch.nn.modules.module.Module):
S
Samyam Rajbhandari 已提交
437 438
            _disable_class(subclass)

439
        # putting methods back the way we found them
S
Samyam Rajbhandari 已提交
440
        torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass
441
        torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply
S
Samyam Rajbhandari 已提交
442 443 444

        torch.Tensor.__new__ = torch.Tensor.__old_new__
        torch.empty = _orig_torch_empty
445 446 447
        torch.zeros = _orig_torch_zeros
        torch.ones = _orig_torch_ones
        torch.full = _orig_torch_full
S
Samyam Rajbhandari 已提交
448

449 450
        # un doing it here will undo it during training
        # if self.mem_efficient_linear:
451
        #    torch.nn.functional.linear = self.linear_bk
J
Jeff Rasley 已提交
452 453
        #        if self.mem_efficient_linear:
        #            torch.nn.functional.linear = self.linear_bk
S
Samyam Rajbhandari 已提交
454

455 456 457 458
        if torch.distributed.get_rank() == 0:
            logger.info("finished initializing model with %.2fB parameters",
                        param_count / 1e9)

S
Samyam Rajbhandari 已提交
459 460 461 462 463 464 465 466
        # Now that we cleaned up the metaclass injection, raise the exception.
        if exc_type is not None:
            return False

    # To be implemented by inheriting classes
    def _post_init_method(self, module):
        pass

467 468
    def _set_dtype(self, ds_config, dtype):
        if ds_config is not None and dtype is None:
469 470 471 472 473 474 475 476 477
            if ds_config.bfloat16_enabled and ds_config.fp16_enabled:
                raise RuntimeError("bfloat16 and fp16 cannot be enabled at once")

            if ds_config.bfloat16_enabled:
                self.dtype = torch.bfloat16
            elif ds_config.fp16_enabled:
                self.dtype = torch.half
            else:
                self.dtype = torch.float
478
        else:
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
            self.dtype = dtype or torch.half


class AllGatherHandle:
    def __init__(self, handle, param: Parameter) -> None:
        if param.ds_status != ZeroParamStatus.INFLIGHT:
            raise RuntimeError(f"expected param {param.ds_summary()} to be available")

        self.__handle = handle
        self.__param = param

    def wait(self) -> None:
        instrument_w_nvtx(self.__handle.wait)()
        self.__param.ds_status = ZeroParamStatus.AVAILABLE


class AllGatherCoalescedHandle:
    def __init__(
        self,
        allgather_handle,
        params: List[Parameter],
        partitions: List[Tensor],
        world_size: int,
    ) -> None:
        self.__allgather_handle = allgather_handle
        self.__params = params
        self.__partitions = partitions
        self.__world_size = world_size
        self.__complete = False

        for param in self.__params:
            if param.ds_status != ZeroParamStatus.INFLIGHT:
                raise RuntimeError(
                    f"expected param {param.ds_summary()} to not be available")

    @instrument_w_nvtx
    def wait(self) -> None:
        if self.__complete:
            return

        instrument_w_nvtx(self.__allgather_handle.wait)()

        # split the single tensor out into individual tensors
        param_offset = 0
        for param in self.__params:
            assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight"
            partitions: List[Tensor] = []
            for rank in range(self.__world_size):
                param_start = rank * param.ds_tensor.ds_numel
                if param_start < param.ds_numel:
                    part_to_copy = self.__partitions[rank].narrow(
                        0,
                        param_offset,
                        min(param.ds_numel - param_start,
                            param.ds_tensor.ds_numel))
                    partitions.append(part_to_copy)

            param.data = instrument_w_nvtx(torch.cat)(partitions).view(param.ds_shape)
            param.ds_status = ZeroParamStatus.AVAILABLE

            for part_to_copy in partitions:
                part_to_copy.record_stream(torch.cuda.current_stream())

            param_offset += param.ds_tensor.ds_numel

        self.__complete = True
545

S
Samyam Rajbhandari 已提交
546 547 548 549 550 551 552 553 554 555 556

# Replaces all parameters in module with Scattered Parameters
class Init(InsertPostInitMethodToModuleSubClasses):
    param_id = 0

    def __init__(self,
                 module=None,
                 data_parallel_group=None,
                 mem_efficient_linear=True,
                 remote_device=None,
                 pin_memory=False,
O
Olatunji Ruwase 已提交
557
                 config_dict_or_path=None,
558
                 config=None,
559
                 enabled=True,
O
Olatunji Ruwase 已提交
560 561
                 dtype=None,
                 mpu=None):
S
Samyam Rajbhandari 已提交
562 563 564 565 566 567 568 569 570 571 572 573
        """A context to enable massive model construction for training with
        ZeRO-3. Models are automatically partitioned (or, sharded) across the
        system and converted to half precision.

        Args:
            module (``torch.nn.Module``, optional): If provided, partition the model as
                if it was constructed in the context.
            data_parallel_group (``torch.distributed`` process group, optional):
                The group of processes to partition among. Defaults to all processes.
            mem_efficient_linear (bool, optional): Replace
                torch.nn.functional.linear with an implementation that allows
                DeepSpeed to partition parameters. Defaults to ``True``.
J
Jeff Rasley 已提交
574 575 576
            remote_device (string, optional): The initial device to store model
                weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU
                memory. The model may still be moved to GPU based on the
J
Jeff Rasley 已提交
577 578
                offload settings for training. Defaults to param offload device if a config is
                defined, otherwise GPU.
S
Samyam Rajbhandari 已提交
579 580
            pin_memory (bool, optional): Potentially increase performance by
                using pinned memory for model weights. ``remote_device`` must be
J
Jeff Rasley 已提交
581
                ``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``.
582
            config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration
J
Jeff Rasley 已提交
583
                for swapping fp16 params to NVMe.
584
            config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead.
S
Samyam Rajbhandari 已提交
585 586
            enabled (bool, optional): If ``False``, this context has no
                effect. Defaults to ``True``.
S
Stas Bekman 已提交
587 588
            dtype (``dtype``, optional): Can be used to change the data type of the parameters.
                Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None``
A
Alex Hedges 已提交
589
            mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}.
S
Samyam Rajbhandari 已提交
590 591 592 593 594

        This context accelerates model initialization and enables models that
        are too large to allocate in their entirety in CPU memory. It has the
        following effects:

J
Jeff Rasley 已提交
595
        #. allocates tensors to either GPU or CPU memory or NVMe
S
Samyam Rajbhandari 已提交
596 597 598 599 600 601
        #. converts floating point tensors to half precision
        #. immediately partitions tensors among the group of data-parallel devices
        #. (*optional*) replaces ``torch.nn.functional.linear`` with a more
           memory-efficient implementation

        These modifications allow for models that exceed the size of local CPU/GPU
J
Jeff Rasley 已提交
602 603
        memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU
        or GPU memory or NVMe) across all nodes. Consider initializing a model with one
S
Samyam Rajbhandari 已提交
604 605 606 607 608 609 610 611 612 613
        trillion parameters, whose weights occupy two terabytes (TB) in half
        precision. The initial CPU allocation in full precision requires 4TB of
        memory *per process*, and so a system with 8 GPUs per node would need 32TB of
        CPU memory due to data-parallel redundancies. Instead, by immediately
        partitioning tensors we remove the redundancies. The result is that
        regardless of the number of GPUs, we still only require the original 4TB. This
        allows for a linear increase in model size with the aggregate system memory.
        For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion
        parameter model with 4 nodes and 32 GPUs.

S
Stas Bekman 已提交
614 615 616
        Important: If the fp16 weights of the model can't fit onto a single GPU memory
        this feature must be used.

S
Samyam Rajbhandari 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
        .. note::
            Initializes ``torch.distributed`` if it has not already been done so.
            See :meth:`deepseed.init_distributed` for more information.

        .. note::
            Can also be used as a decorator:

            .. code-block:: python

                @deepspeed.zero.Init()
                def get_model():
                    return MyLargeModel()

        .. note::
            Only applicable to training with ZeRO-3.

        Examples
        --------

        #. Allocate a model and partition it among all processes:

            .. code-block:: python

                with deepspeed.zero.Init():
                    model = MyLargeModel()


        #. Allocate a model in pinned CPU memory and partition it among a subgroup of processes:

            .. code-block:: python

                with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(),
                                         remote_device="cpu",
                                         pin_memory=True):
                    model = MyLargeModel()


        #. Partition an already-allocated model in CPU memory:

            .. code-block:: python

                model = deepspeed.zero.Init(module=model)
        """
S
Stas Bekman 已提交
660 661 662 663 664 665
        if config is not None:
            config_dict_or_path = config
            logger.warning(
                f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.'
            )

666 667 668
        _ds_config = deepspeed.runtime.config.DeepSpeedConfig(
            config_dict_or_path,
            mpu) if config_dict_or_path is not None else None
669 670
        super().__init__(enabled=enabled,
                         mem_efficient_linear=mem_efficient_linear,
O
Olatunji Ruwase 已提交
671
                         ds_config=_ds_config,
672
                         dtype=dtype)
S
Samyam Rajbhandari 已提交
673 674 675 676 677 678 679 680 681 682 683
        if not torch.distributed.is_initialized():
            init_distributed()
            assert torch.distributed.is_initialized(), "Parameters cannot be scattered without initializing torch.distributed"
        if data_parallel_group is None:
            self.ds_process_group = torch.distributed.group.WORLD
        else:
            self.ds_process_group = data_parallel_group

        self.rank = torch.distributed.get_rank(group=self.ds_process_group)
        self.world_size = torch.distributed.get_world_size(group=self.ds_process_group)

684
        # Local device is the device where the parameters are consumed, must be default device.
685
        # It is the device where parameters are fully instantiated using allgather
S
Samyam Rajbhandari 已提交
686
        self.local_device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"]))
687
        torch.cuda.set_device(self.local_device)
S
Samyam Rajbhandari 已提交
688

J
Jeff Rasley 已提交
689 690 691 692
        if _ds_config is not None and _ds_config.zero_config.offload_param is not None:
            remote_device = _ds_config.zero_config.offload_param[OFFLOAD_PARAM_DEVICE]
            pin_memory = _ds_config.zero_config.offload_param[OFFLOAD_PARAM_PIN_MEMORY]

O
Olatunji Ruwase 已提交
693
        self._validate_remote_device(remote_device, _ds_config)
J
Jeff Rasley 已提交
694

695
        # Remote device is the device where parameter partitions are stored
696
        # It can be same as local_device or it could be CPU or NVMe.
S
Samyam Rajbhandari 已提交
697
        self.remote_device = self.local_device if remote_device is None else remote_device
A
Alex Hedges 已提交
698 699
        self.pin_memory = pin_memory if (self.remote_device
                                         == OFFLOAD_CPU_DEVICE) else False
J
Jeff Rasley 已提交
700 701 702

        # Enable fp16 param swapping to NVMe
        if self.remote_device == OFFLOAD_NVME_DEVICE:
703
            self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config, self.dtype)
J
Jeff Rasley 已提交
704 705
        else:
            self.param_swapper = None
S
Samyam Rajbhandari 已提交
706 707 708 709

        # If we are provided an already-allocated module to prepare.
        if module is not None:
            assert isinstance(module, torch.nn.Module)
710 711
            self._convert_to_zero_parameters(module.parameters(recurse=True))

712 713 714 715 716 717 718 719
        self.use_all_gather_base = False
        try:
            from torch.distributed.distributed_c10d import _all_gather_base as all_gather
            self.use_all_gather_base = True
        except:
            logger.info(
                f"_all_gather_base API is not available in torch {torch.__version__}")

720 721 722 723 724 725
    def _convert_to_zero_parameters(self, param_list):
        for param in param_list:
            if is_zero_param(param):
                continue
            self._convert_to_deepspeed_param(param)
            param.partition()
S
Samyam Rajbhandari 已提交
726

727
    def _validate_remote_device(self, remote_device, ds_config):
J
Jeff Rasley 已提交
728 729
        if ds_config is not None:
            if remote_device in [None, OFFLOAD_CPU_DEVICE]:
O
Olatunji Ruwase 已提交
730 731
                if ds_config.zero_config.offload_param is not None:
                    offload_param_device = ds_config.zero_config.offload_param[
J
Jeff Rasley 已提交
732 733
                        OFFLOAD_PARAM_DEVICE]
                    assert offload_param_device != OFFLOAD_NVME_DEVICE, \
734
                        f"{OFFLOAD_PARAM_DEVICE} in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}."
J
Jeff Rasley 已提交
735 736

            if remote_device == OFFLOAD_NVME_DEVICE:
O
Olatunji Ruwase 已提交
737
                assert ds_config.zero_config.offload_param is not None, \
J
Jeff Rasley 已提交
738 739
                f'{OFFLOAD_PARAM} must be defined in DeepSpeed Config if remote device is {OFFLOAD_NVME_DEVICE}.'

O
Olatunji Ruwase 已提交
740
                assert ds_config.zero_config.offload_param[OFFLOAD_PARAM_NVME_PATH] is not None, \
J
Jeff Rasley 已提交
741 742
                f'{OFFLOAD_PARAM_NVME_PATH} in DeepSpeed Config cannot be None if remote device is {OFFLOAD_NVME_DEVICE}'

S
Samyam Rajbhandari 已提交
743 744 745 746 747 748 749 750
    def _post_init_method(self, module):
        #see_memory_usage(f"Before converting parmas in {module.__class__.__name__}", force=False)
        print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False)
        see_memory_usage(
            f"Before converting and partitioning parmas in {module.__class__.__name__}",
            force=False)

        global param_count
751
        for name, param in module.named_parameters(recurse=False):
S
Samyam Rajbhandari 已提交
752 753 754 755
            param_count += param.numel()
            if not is_zero_param(param):
                self._convert_to_deepspeed_param(param)
                print_rank_0(
S
Stas Bekman 已提交
756
                    f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}"
S
Samyam Rajbhandari 已提交
757
                )
758 759 760 761 762

                if param.is_cuda:
                    torch.distributed.broadcast(param, 0, self.ds_process_group)
                else:
                    if torch.distributed.get_rank() == 0:
763
                        logger.warn(f"param `{name}` in {module.__class__.__name__} "
764 765
                                    f"not on GPU so was not broadcasted from rank 0")

S
Samyam Rajbhandari 已提交
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
                param.partition()
        see_memory_usage(
            f"Param count {param_count}. After converting and partitioning parmas in {module.__class__.__name__}",
            force=False)

    def _convert_to_deepspeed_param(self, param):

        # Partitioned, Normal, Remote
        param.ds_param_type = ZeroParamType.PARTITIONED

        # Replicated vs Partitioned vs Inflight
        param.ds_status = ZeroParamStatus.AVAILABLE

        # Stores the shape of the original tensor
        param.ds_shape = param.shape

782
        # Stores the number of elements in the original parameter without padding
S
Samyam Rajbhandari 已提交
783 784
        param.ds_numel = param.numel()

785
        # Stores the partitioned copy of the tensor
S
Samyam Rajbhandari 已提交
786 787 788
        param.ds_tensor = None

        # Keeps track of how many active sub-modules need this param at any given point in time
789
        param.ds_active_sub_modules = set()
S
Samyam Rajbhandari 已提交
790 791 792 793 794

        # If this flag is true, then the parameters are replicated throughput training
        # And only partitioned before the step
        param.ds_persist = False

795 796
        param.is_external_param = False

S
Samyam Rajbhandari 已提交
797 798 799
        # The group that the parameter is scattered across.
        param.ds_process_group = self.ds_process_group

J
Jeff Rasley 已提交
800 801 802 803
        # This is set to the Async Param swapper if remote device is nvme
        # else this is set to None
        param.nvme_swapper = self.param_swapper

S
Samyam Rajbhandari 已提交
804 805 806 807 808 809 810 811 812 813
        # DeepSped Param ID
        param.ds_id = Init.param_id
        Init.param_id += 1

        def all_gather(param_list=None, async_op=False, hierarchy=0):
            cls = param
            if param_list is None:
                param_list = [cls]
            return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy)

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
        @instrument_w_nvtx
        def all_gather_coalesced(params: Iterable[Parameter],
                                 safe_mode: bool = False) -> AllGatherCoalescedHandle:

            # fetches from nvme if the partition is not available and in nvme
            self._ensure_availability_of_partitioned_params(params)

            for param in params:
                if param.ds_status != ZeroParamStatus.NOT_AVAILABLE:
                    raise RuntimeError(param.ds_summary())
                param.ds_status = ZeroParamStatus.INFLIGHT

            # ensure that each rank has params in same order. the allgather
            # is done by flattening the parameter list into a single tensor that
            # can be allgathered in a single call - this means that if each rank
            # gives a list of the same parameters in a different order we will
            # silently get incorrect parameter values, and have very difficult
            # to debug correctness issues.
            params = sorted(params, key=lambda p: p.ds_id)

            debug_rank0(f"-allgather_coalesced: {[p.ds_id for p in params]}")

            if safe_mode:
                # ensure that same list (with same ordering) of parameters are
                # being allgathered across all ranks, otherwise could mix
                # data between tensors.
                assert_ints_same_as_other_ranks([p.ds_id for p in params])
                # ensure that tensors from each rank agree on the same ds_numel
                # otherwise could mix data between tensors.
                assert_ints_same_as_other_ranks([p.ds_tensor.ds_numel for p in params])

            if len(params) == 1:
                # have an opportunity to avoid some intermediate memory allocations
                param, = params
                param_buffer = torch.empty(
                    math.ceil(param.ds_numel / self.world_size) * self.world_size,
                    dtype=param.dtype,
                    device=torch.cuda.current_device(),
                    requires_grad=False,
                )
                handle = torch_allgather_fn(
                    param.ds_tensor.to(torch.cuda.current_device()),
                    param_buffer,
                    self.ds_process_group,
                )
                param.data = param_buffer.narrow(0,
                                                 0,
                                                 param.ds_numel).view(param.ds_shape).to(
                                                     param.device)
                return AllGatherHandle(handle, param)
            else:
                partition_sz = sum(p.ds_tensor.ds_numel for p in params)
                flat_tensor = torch.empty(partition_sz * self.world_size,
                                          dtype=get_only_unique_item(p.dtype
                                                                     for p in params),
                                          device=torch.cuda.current_device(),
                                          requires_grad=False)
                partitions: List[Parameter] = []
                for i in range(self.world_size):
                    partitions.append(
                        flat_tensor.narrow(0,
                                           partition_sz * i,
                                           partition_sz))

                instrument_w_nvtx(torch.cat)(
                    [p.ds_tensor.to(torch.cuda.current_device()) for p in params],
                    out=partitions[self.rank])
                handle = torch_allgather_fn(partitions[self.rank],
                                            flat_tensor,
                                            self.ds_process_group)

                return AllGatherCoalescedHandle(
                    allgather_handle=handle,
                    params=params,
                    partitions=partitions,
                    world_size=self.world_size,
                )

S
Samyam Rajbhandari 已提交
892 893 894
        def partition(param_list=None, hierarchy=0, has_been_updated=False):
            cls = param
            print_rank_0(
S
Stas Bekman 已提交
895
                f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}"
S
Samyam Rajbhandari 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
            )
            if param_list is None:
                param_list = [cls]
            self._partition(param_list, has_been_updated=has_been_updated)

        def reduce_gradients_at_owner(param_list=None, hierarchy=0):
            cls = param
            if param_list is None:
                param_list = [cls]
            print_rank_0(
                f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner"
            )
            self._reduce_scatter_gradients(param_list)

        def partition_gradients(param_list=None,
                                partition_buffers=None,
                                hierarchy=0,
                                accumulate=False):
            cls = param
            print_rank_0(
S
Stas Bekman 已提交
916 917
                f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}"
            )
S
Samyam Rajbhandari 已提交
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
            if param_list is None:
                param_list = [cls]
                if isinstance(partition_buffers, torch.Tensor):
                    partition_buffers = [partition_buffers]

            self._partition_gradients(param_list,
                                      partition_buffers=partition_buffers,
                                      accumulate=accumulate)

        def aligned_size():
            return self._aligned_size(param)

        def padding_size():
            return self._padding_size(param)

J
Jeff Rasley 已提交
933 934 935
        def partitioned_size():
            return self._partitioned_size(param)

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
        def item_override():
            param.all_gather()
            return param._orig_item()

        def ds_summary(slf: torch.Tensor) -> dict:
            return {
                "id": slf.ds_id,
                "status": slf.ds_status.name,
                "numel": slf.numel(),
                "ds_numel": slf.ds_numel,
                "shape": tuple(slf.shape),
                "ds_shape": tuple(slf.ds_shape),
                "requires_grad": slf.requires_grad,
                "grad_shape": tuple(slf.grad.shape) if slf.grad is not None else None,
                "persist": slf.ds_persist,
                "active_sub_modules": slf.ds_active_sub_modules,
            }

954 955 956
        def convert_to_zero_parameters(param_list):
            self._convert_to_zero_parameters(param_list)

957 958 959 960 961 962 963
        def allgather_before(func: Callable) -> Callable:
            def wrapped(*args, **kwargs):
                param.all_gather()
                return func(*args, **kwargs)

            return wrapped

S
Samyam Rajbhandari 已提交
964 965
        # Collectives for gathering and partitioning parameters
        param.all_gather = all_gather
966
        param.all_gather_coalesced = all_gather_coalesced
S
Samyam Rajbhandari 已提交
967 968 969 970 971 972 973 974 975
        param.partition = partition

        # Collective for averaging gradients
        param.reduce_gradients_at_owner = reduce_gradients_at_owner
        param.partition_gradients = partition_gradients

        # Partitioning size utilities
        param.aligned_size = aligned_size
        param.padding_size = padding_size
J
Jeff Rasley 已提交
976
        param.partitioned_size = partitioned_size
977 978 979
        param.ds_summary = types.MethodType(ds_summary, param)

        param.item = allgather_before(param.item)
S
Samyam Rajbhandari 已提交
980

981 982
        param.convert_to_zero_parameters = convert_to_zero_parameters

S
Samyam Rajbhandari 已提交
983 984 985 986 987 988 989
    def _aligned_size(self, param):
        return param.ds_numel + self._padding_size(param)

    def _padding_size(self, param):
        remainder = param.ds_numel % self.world_size
        return (self.world_size - remainder) if remainder else 0

J
Jeff Rasley 已提交
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
    def _partitioned_size(self, param):
        return param.ds_tensor.ds_numel

    def _ensure_availability_of_partitioned_params(self, params):
        swap_in_list = []
        swap_in_flight = []
        for param in params:
            if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE:
                assert param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
                swap_in_list.append(param)
            if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT:
                assert param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE and param.ds_status == ZeroParamStatus.NOT_AVAILABLE
                swap_in_flight.append(param)
        if len(swap_in_list) > 0:
            swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False)
        elif len(swap_in_flight) > 0:
            swap_in_flight[0].nvme_swapper.synchronize_reads()

1008
    @instrument_w_nvtx
S
Samyam Rajbhandari 已提交
1009
    def _all_gather(self, param_list, async_op=False, hierarchy=None):
J
Jeff Rasley 已提交
1010

1011
        # fetches from nvme if the partition is not available and in nvme
J
Jeff Rasley 已提交
1012 1013
        self._ensure_availability_of_partitioned_params(param_list)

S
Samyam Rajbhandari 已提交
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
        handles = []
        all_gather_list = []
        for param in param_list:
            if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
                if async_op:
                    handle = self._allgather_param(param,
                                                   async_op=async_op,
                                                   hierarchy=hierarchy)
                    param.ds_status = ZeroParamStatus.INFLIGHT  # if async_op else ZeroParamStatus.AVAILABLE
                    handles.append(handle)
                else:
                    all_gather_list.append(param)

        if not async_op:
1028 1029 1030 1031
            if len(param_list) == 1:
                ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy)
            else:
                ret_value = self._allgather_params_coalesced(all_gather_list, hierarchy)
1032

S
Samyam Rajbhandari 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041
            for param in all_gather_list:
                param.ds_status = ZeroParamStatus.AVAILABLE
            return ret_value

        return handles

    def _partition(self, param_list, force=False, has_been_updated=False):
        for param in param_list:
            #print_rank_0(f"Before Partitioning Param {param.ds_id}")
1042
            # self._param_status(param)
S
Samyam Rajbhandari 已提交
1043 1044
            self._partition_param(param, has_been_updated=has_been_updated)
            param.ds_status = ZeroParamStatus.NOT_AVAILABLE
1045
            # if param.ds_tensor is not None:
S
Samyam Rajbhandari 已提交
1046 1047 1048 1049 1050
            #    assert id(param.data) == id(param.ds_tensor.data), \
            #    "After the parameters are initially partitioned, make sure we are not recreating the partition."
            #print_rank_0(f"After Partitioning Param {param.ds_id}")
            # self._param_status(param)

1051
    @instrument_w_nvtx
J
Jeff Rasley 已提交
1052
    def _partition_param(self, param, buffer=None, has_been_updated=False):
1053
        assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight"
J
Jeff Rasley 已提交
1054

S
Samyam Rajbhandari 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
        global reuse_buffers
        #print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}")
        if param.ds_status is ZeroParamStatus.AVAILABLE:
            print_rank_0(
                f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}",
                force=False)
            # if reuse_buffers and False:
            #     numel = buffer.numel()
            #     buffer = param.data.view(-1)
            #     print_rank_0(
            #         "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers",
            #         force=False)
            #     if numel in empty_buffers:
            #         empty_buffers[numel].append(buffer)

1070
            # if torch.distributed.get_rank():
S
Samyam Rajbhandari 已提交
1071 1072 1073 1074 1075
            #    print(f"Releasing {param.data.numel()}")
            if param.ds_tensor is not None and not has_been_updated:

                #param.data = param.ds_tensor.data

J
Jeff Rasley 已提交
1076 1077 1078
                see_memory_usage(
                    f'Before partitioning param {param.ds_id} {param.shape}',
                    force=False)
1079
                # param.data does not store anything meaningful in partitioned state
1080
                free_param(param)
J
Jeff Rasley 已提交
1081 1082 1083 1084 1085 1086 1087 1088 1089
                see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}',
                                 force=False)

                if param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE:
                    print_rank_0(
                        f"Param {param.ds_id} partition released since it exists in nvme",
                        force=False)
                    param.nvme_swapper.remove_partition_and_release_buffers([param])

S
Samyam Rajbhandari 已提交
1090 1091 1092 1093 1094 1095
                return

            tensor_size = self._aligned_size(param)
            partition_size = tensor_size // self.world_size

            if param.ds_tensor is None:
J
Jeff Rasley 已提交
1096 1097 1098 1099 1100
                final_location = None
                if self.remote_device == OFFLOAD_NVME_DEVICE and self.param_swapper.swappable_tensor(
                        numel=partition_size):
                    final_location = OFFLOAD_NVME_DEVICE
                    buffer = self.param_swapper.get_buffer(param, partition_size)
1101
                    partitioned_tensor = torch.empty(0,
J
Jeff Rasley 已提交
1102 1103 1104 1105 1106 1107
                                                     dtype=param.dtype,
                                                     device=buffer.device)
                    partitioned_tensor.data = buffer.data
                    print_rank_0(
                        f"ID {param.ds_id} Initializing partition for the first time for nvme offload."
                    )
S
Samyam Rajbhandari 已提交
1108

J
Jeff Rasley 已提交
1109
                else:
1110
                    partitioned_tensor = torch.empty(
J
Jeff Rasley 已提交
1111 1112
                        partition_size,
                        dtype=param.dtype,
A
Alex Hedges 已提交
1113 1114
                        device=OFFLOAD_CPU_DEVICE if self.remote_device
                        == OFFLOAD_NVME_DEVICE else self.remote_device)
J
Jeff Rasley 已提交
1115 1116 1117 1118
                    if self.pin_memory:
                        partitioned_tensor = partitioned_tensor.pin_memory()

                partitioned_tensor.requires_grad = False
S
Samyam Rajbhandari 已提交
1119
                param.ds_tensor = partitioned_tensor
J
Jeff Rasley 已提交
1120 1121 1122
                param.ds_tensor.ds_numel = partition_size
                param.ds_tensor.status = PartitionedParamStatus.AVAILABLE
                param.ds_tensor.final_location = final_location
S
Samyam Rajbhandari 已提交
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155

            start = partition_size * self.rank
            end = start + partition_size

            one_dim_param = param.contiguous().view(-1)

            if start < param.ds_numel and end <= param.ds_numel:
                src_tensor = one_dim_param.narrow(0, start, partition_size)

                param.ds_tensor.copy_(src_tensor)
                #partitioned_tensor = src_tensor.clone().detach().to(self.remote_device)

            else:
                # partitioned_tensor = torch.zeros(partition_size,
                #                                  dtype=param.dtype,
                #                                  device=self.remote_device )

                if start < param.ds_numel:
                    elements_to_copy = param.ds_numel - start
                    param.ds_tensor.narrow(0,
                                           0,
                                           elements_to_copy).copy_(
                                               one_dim_param.narrow(
                                                   0,
                                                   start,
                                                   elements_to_copy))

            #print(f"Remote device {self.remote_device}")

            #param.ds_tensor = partitioned_tensor

            #param.data = param.ds_tensor.data

1156
            # param.data does not store anything meaningful in partitioned state
J
Jeff Rasley 已提交
1157 1158 1159

            see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}',
                             force=False)
1160
            free_param(param)
J
Jeff Rasley 已提交
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
            see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}',
                             force=False)

            if param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE:
                self.param_swapper.swap_out_and_release([param])
                print_rank_0(
                    f"ID {param.ds_id} Offloaded to nvme offload and buffers released.")
                see_memory_usage(
                    f"ID {param.ds_id} Offloaded to nvme offload and buffers released.",
                    force=False)
S
Samyam Rajbhandari 已提交
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187

            print_rank_0(
                f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}"
            )

    def _param_status(self, param):
        if param.ds_tensor is not None:
            print_rank_0(
                f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}"
            )
        else:
            print_rank_0(
                f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}"
            )

    def _allgather_param(self, param, async_op=False, hierarchy=0):

J
Jeff Rasley 已提交
1188
        partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1189 1190 1191 1192 1193 1194

        tensor_size = partition_size * self.world_size
        aligned_param_size = self._aligned_size(param)
        assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}'

        print_rank_0(
S
Stas Bekman 已提交
1195
            f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}"
S
Samyam Rajbhandari 已提交
1196
        )
J
Jeff Rasley 已提交
1197 1198

        see_memory_usage(
S
Stas Bekman 已提交
1199
            f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ',
J
Jeff Rasley 已提交
1200
            force=False)
S
Samyam Rajbhandari 已提交
1201 1202 1203
        flat_tensor = torch.zeros(aligned_param_size,
                                  dtype=param.dtype,
                                  device=param.device).view(-1)
J
Jeff Rasley 已提交
1204
        see_memory_usage(
S
Stas Bekman 已提交
1205
            f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ',
J
Jeff Rasley 已提交
1206
            force=False)
S
Samyam Rajbhandari 已提交
1207 1208 1209 1210

        torch.cuda.synchronize()

        print_rank_0(
S
Stas Bekman 已提交
1211
            f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}"
S
Samyam Rajbhandari 已提交
1212 1213 1214 1215 1216 1217 1218
        )
        #        if not flat_tensor.numel() > 100000:
        #            replicated_tensor = flat_tensor.narrow(0,
        #                                                   0,
        #                                                   param.ds_numel).view(param.ds_shape)
        #            param.data = replicated_tensor.data
        #            return None
1219 1220 1221
        if self.use_all_gather_base:
            # try the _all_gather_base on PyTorch master branch
            handle = dist._all_gather_base(flat_tensor,
1222
                                           param.ds_tensor.cuda(),
1223 1224 1225 1226 1227 1228 1229 1230 1231
                                           group=self.ds_process_group,
                                           async_op=async_op)
        else:
            partitions = []
            for i in range(self.world_size):
                partitions.append(
                    flat_tensor.narrow(0,
                                       partition_size * i,
                                       partition_size))
S
Samyam Rajbhandari 已提交
1232

1233 1234
                if i == dist.get_rank(group=self.ds_process_group):
                    partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True)
S
Samyam Rajbhandari 已提交
1235

1236 1237 1238 1239
            handle = dist.all_gather(partitions,
                                     partitions[self.rank],
                                     group=self.ds_process_group,
                                     async_op=async_op)
S
Samyam Rajbhandari 已提交
1240 1241 1242 1243 1244

        replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape)
        param.data = replicated_tensor.data
        return handle

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
    def _allgather_params_coalesced(self, param_list, hierarchy=0):
        """ blocking call
        avoid explicit memory copy in _allgather_params
        """
        if len(param_list) == 0:
            return
        # collect local tensors and partition sizes
        partition_sizes = []
        local_tensors = []
        for param in param_list:
            partition_sizes.append(param.ds_tensor.ds_numel)
            local_tensors.append(param.ds_tensor.cuda())

        # allocate memory for allgather params
        allgather_params = []
        for psize in partition_sizes:
            tensor_size = psize * self.world_size
            flat_tensor = torch.empty(tensor_size,
                                      dtype=param_list[0].dtype,
                                      device=self.local_device).view(-1)
            flat_tensor.requires_grad = False
            allgather_params.append(flat_tensor)

        # launch
        launch_handles = []
        # backend = get_backend(self.ds_process_group)
        # with _batch_p2p_manager(backend):
        for param_idx, param in enumerate(param_list):
            input_tensor = local_tensors[param_idx].view(-1)

            if self.use_all_gather_base:
                # try the _all_gather_base from Pytorch master
                h = dist._all_gather_base(allgather_params[param_idx],
                                          input_tensor,
                                          group=self.ds_process_group,
                                          async_op=True)
            else:
                output_list = []
                for i in range(self.world_size):
                    psize = partition_sizes[param_idx]
                    partition = allgather_params[param_idx].narrow(0, i * psize, psize)
                    output_list.append(partition)
                    if not partition.is_cuda:
                        logger.warning(
                            f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}'
                        )

                # back to old all_gather function signature
                h = dist.all_gather(output_list,
                                    input_tensor,
                                    group=self.ds_process_group,
                                    async_op=True)
            launch_handles.append(h)

        # Wait ensures the operation is enqueued, but not necessarily complete.
        launch_handles[-1].wait()

        # assign to param.data (not copy)
        for i, param in enumerate(param_list):
            gathered_tensor = allgather_params[i]
            param.data = gathered_tensor.narrow(0,
                                                0,
                                                param.ds_numel).view(param.ds_shape).data

        # guarantee the communication to be completed
        torch.cuda.synchronize()

        return None

S
Samyam Rajbhandari 已提交
1314 1315 1316 1317
    def _allgather_params(self, param_list, hierarchy=0):
        if len(param_list) == 0:
            return

J
Jeff Rasley 已提交
1318
        partition_size = sum([param.ds_tensor.ds_numel for param in param_list])
S
Samyam Rajbhandari 已提交
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333

        tensor_size = partition_size * self.world_size
        flat_tensor = torch.empty(tensor_size,
                                  dtype=param_list[0].dtype,
                                  device=self.local_device)
        flat_tensor.requres_grad = False
        partitions = []
        for i in range(self.world_size):
            start = partition_size * i

            partitions.append(flat_tensor.narrow(0, start, partition_size))

            if i == self.rank:
                offset = 0
                for param in param_list:
J
Jeff Rasley 已提交
1334
                    param_numel = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348

                    partitions[i].narrow(0,
                                         offset,
                                         param_numel).copy_(param.ds_tensor.data)

                    offset += param_numel

        torch.distributed.all_gather(partitions,
                                     partitions[self.rank],
                                     group=self.ds_process_group,
                                     async_op=False)
        param_offset = 0

        for param in param_list:
J
Jeff Rasley 已提交
1349
            param_partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
            param_size = param.ds_numel
            replicated_tensor = torch.empty(param.ds_shape,
                                            dtype=param.dtype,
                                            device=self.local_device)

            for i in range(self.world_size):

                start = i * partition_size

                param_start = i * param_partition_size

                if param_start < param_size:
                    numel_to_copy = min(param_size - param_start, param_partition_size)

                    part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy)

                    replicated_tensor.view(-1).narrow(0,
                                                      param_start,
                                                      numel_to_copy).copy_(part_to_copy)
            #param_offset += param.data.numel()
J
Jeff Rasley 已提交
1370
            param_offset += param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393

            param.data = replicated_tensor.data

        return None

    def _reduce_scatter_gradients(self, param_list):
        #print_rank_0([param.grad for param in param_list])
        #assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered"

        handles_and_reduced_partitions = []
        for param in param_list:
            assert param.grad.numel(
            ) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params"

            handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param))

        for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions):
            if handle is not None:
                handle.wait()

            # some ranks may have partitions that are padded to go beyond the grad size.
            # For these ranks the output of reduce scatter is a separate buffer and needs
            # to be copied in
J
Jeff Rasley 已提交
1394
            partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
            start = self.rank * partition_size
            end = start + partition_size
            #print_rank_0("REduce scatter was executed for praam {param.ds_id}")
            if start < param.ds_numel and end > param.ds_numel:
                elements = param.ds_numel - start
                param.grad.view(-1).narrow(0,
                                           start,
                                           elements).copy_(
                                               reduced_partition.narrow(0,
                                                                        0,
                                                                        elements))

    def _reduce_scatter_gradient(self, param):

J
Jeff Rasley 已提交
1409
        partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
        #output = torch.empty(partition_size, dtype=param.dtype, device=param.device)

        total_size = partition_size * self.world_size
        input_list = []

        for i in range(self.world_size):

            start = i * partition_size
            end = start + partition_size

            #print("before reduce scatter gradients")
            if start < param.ds_numel and end <= param.ds_numel:
                input = param.grad.view(-1).narrow(0, start, partition_size)
            else:
                input = torch.zeros(partition_size,
                                    dtype=param.dtype,
                                    device=param.device)

                if start < param.ds_numel:
                    elements = param.ds_numel - start
                    input.narrow(0,
                                 0,
                                 elements).copy_(
                                     param.grad.view(-1).narrow(0,
                                                                start,
                                                                elements))
            #print("after reduce scatter gradients")
            input_list.append(input)

        rank = torch.distributed.get_rank(group=self.ds_process_group)
        handle = torch.distributed.reduce_scatter(input_list[rank],
                                                  input_list,
                                                  group=self.ds_process_group,
                                                  async_op=True)

        return handle, input_list[rank]

    def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False):
        if partition_buffers is None:
            partition_buffers = [None] * len(param_list)

        for param, partition_buffer in zip(param_list, partition_buffers):
            self._partition_gradient(param,
                                     partition_buffer=partition_buffer,
                                     accumulate=accumulate)

    def _partition_gradient(self, param, partition_buffer=None, accumulate=False):
        #import pdb;pdb.set_trace()
        # param.grad=None
        # param.grad.test()
        print_rank_0(
J
Jeff Rasley 已提交
1461
            f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}"
S
Samyam Rajbhandari 已提交
1462 1463
        )
        see_memory_usage("Before partitioning gradients", force=False)
J
Jeff Rasley 已提交
1464
        partition_size = param.ds_tensor.ds_numel
S
Samyam Rajbhandari 已提交
1465 1466 1467 1468 1469 1470 1471

        if partition_buffer is None:
            assert not accumulate, "No buffer to accumulate to"
            partition_buffer = torch.zeros(partition_size,
                                           dtype=param.dtype,
                                           device=param.device)
        else:
1472 1473
            assert partition_buffer.numel(
            ) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}"
S
Samyam Rajbhandari 已提交
1474 1475 1476 1477 1478

        rank = torch.distributed.get_rank(group=self.ds_process_group)
        start = partition_size * rank
        end = start + partition_size

1479
        dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size)
S
Samyam Rajbhandari 已提交
1480 1481 1482 1483 1484

        #print("before partition gradients")
        if start < param.ds_numel:
            elements = min(param.ds_numel - start, partition_size)

1485
            dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements)
S
Samyam Rajbhandari 已提交
1486 1487 1488 1489 1490 1491
            src_tensor = param.grad.view(-1).narrow(0, start, elements)

            # just copy the grad partition to the buffer
            if not accumulate:
                dest_tensor.copy_(src_tensor)

A
Alex Hedges 已提交
1492
            # if source and destination are on same device,
S
Samyam Rajbhandari 已提交
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
            # add to the provided buffer
            elif src_tensor.device == dest_tensor.device:
                dest_tensor.add_(src_tensor)

            # if source and destination are on different device, copy first to src
            # then add and move back to the destination. This seems to run faster
            # when src is gpu and dest is cpu
            # adding directly to cpu is very slow
            else:
                acc_tensor = torch.empty(src_tensor.numel(),
                                         dtype=param.dtype,
                                         device=param.device)

                acc_tensor.copy_(dest_tensor)
                acc_tensor.add_(src_tensor)
                dest_tensor.copy_(acc_tensor)

            # partition_buffer.view(-1).narrow(
            #     0,
            #     0,
            #     elements).copy_(param.grad.view(-1).narrow(0,
            #                                             start,
            #                                             elements))

        #print("after partition gradients")
1518
        param.grad.data = dest_tensor_full_buffer.data
S
Samyam Rajbhandari 已提交
1519 1520 1521 1522
        see_memory_usage("After partitioning gradients", force=False)


class GatheredParameters:
1523 1524 1525
    def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True):
        """A context that collects parameters that were partitioned via a
        :class:`deepspeed.zero.Init` context. The parameters are partitioned
S
Samyam Rajbhandari 已提交
1526 1527 1528
        again upon exit.

        Args:
1529 1530
            params (``torch.nn.Parameter``): A single parameter or a list of parameters to collect.
                It's assumed that all parameters are zero params.
S
Samyam Rajbhandari 已提交
1531
            modifier_rank (int, optional): If specified, this rank's parameter will be
1532 1533
                broadcasted on exit from the context. This argument is required if ``params`` are
                modified, so that all processes have a consistent view of the data. Defaults
S
Samyam Rajbhandari 已提交
1534
                to ``None``.
1535 1536
            fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be
                registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`.
S
Samyam Rajbhandari 已提交
1537 1538
            enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``.

1539 1540 1541
        Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g. ``modifier_rank=0``)
        if you need the GPU memory allocated by gather to be released upon exit from the context manager.

S
Samyam Rajbhandari 已提交
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
        Examples
        ========

        #. Allocate a partitioned module, initialize its weight on rank 0, and update all
           processes.

            .. code-block:: python

                with deepspeed.zero.Init():
                    linear = torch.nn.Linear(1000,1000)

                with deepspeed.zero.GatheredParameters(linear.weight,
                                                       modifier_rank=0):
                    if torch.distributed.get_rank() == 0:
                        linear.weight.zero_()

J
Jeff Rasley 已提交
1558 1559 1560 1561
                with deepspeed.zero.GatheredParameters(linear.weight,
                                                       modifier_rank=0):
                    if torch.distributed.get_rank() == 0:
                        linear.weight.zero_()
S
Samyam Rajbhandari 已提交
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577

        #. Collect a partitioned weight to pass to another module during
           training. The parameter will be registered as an external parameter
           and made available during the backward pass.

            .. code-block:: python
                :emphasize-lines: 6

                def forward(self, input):
                    x = self.layer1(input)

                    # self.layer1.weight is required by self.layer2.forward
                    with deepspeed.zero.GatheredParameters(self.layer1.weight,
                                                           fwd_module=self):
                        y = self.layer2(x, self.layer1.weight)
                    return y
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604


        #. Pretrained model loading

            .. code-block:: python

                with deepspeed.zero.Init():
                    model = MyModel()

                state_dict = torch.load(model_path, map_location="cpu")

                def load(module: nn.Module, prefix=""):
                    # because zero3 puts placeholders in model params, this context
                    # manager gathers (unpartitions) the params of the current layer, then loads from
                    # the state dict and then re-partitions them again
                    with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
                        if torch.distributed.get_rank() == 0:
                            module._load_from_state_dict(state_dict, prefix)

                    for name, child in module._modules.items():
                        if child is not None:
                            load(child, prefix + name + ".")

                load(model, prefix="")

        If this approach is not used, then the full model will first get copied to each GPU. For models
        bigger than the memory of a single gpu this method is required.
S
Samyam Rajbhandari 已提交
1605 1606 1607 1608 1609 1610
        """

        self.enabled = enabled
        if not enabled:
            return

1611 1612 1613 1614 1615
        if not isinstance(params, list):
            params = [params]

        # enable if at least one is zero-param, otherwise a noop
        if not any(is_zero_param(p) for p in params):
S
Samyam Rajbhandari 已提交
1616 1617 1618
            self.enabled = False
            return

1619
        self.params = [p for p in params if hasattr(p, "ds_id")]
S
Samyam Rajbhandari 已提交
1620 1621
        self.src_rank = None
        if modifier_rank is not None:
1622
            if self.params[0].ds_process_group == torch.distributed.group.WORLD:
S
Samyam Rajbhandari 已提交
1623 1624 1625
                self.src_rank = modifier_rank
            else:
                # A group was specified; convert DP rank to global rank
1626
                self.src_rank = _get_global_rank(self.params[0].ds_process_group,
S
Samyam Rajbhandari 已提交
1627 1628 1629 1630
                                                 modifier_rank)
        self.fwd_module = fwd_module
        if self.fwd_module is not None:
            # is a no-op if already registered
1631 1632
            for p in self.params:
                register_external_parameter(self.fwd_module, p)
S
Samyam Rajbhandari 已提交
1633 1634 1635 1636

    def __enter__(self):
        if not self.enabled:
            return
1637
        self.params[0].all_gather(param_list=self.params)
S
Samyam Rajbhandari 已提交
1638 1639 1640 1641

    def __exit__(self, *exc):
        if not self.enabled:
            return
1642 1643 1644 1645 1646
        if self.src_rank is None:
            return

        handles = [
            torch.distributed.broadcast(p,
S
Samyam Rajbhandari 已提交
1647
                                        self.src_rank,
1648 1649 1650 1651 1652 1653
                                        group=p.ds_process_group,
                                        async_op=True) for p in self.params
        ]
        for h in handles:
            h.wait()
        self.params[0].partition(param_list=self.params, has_been_updated=True)