zero_to_fp32.py 14.6 KB
Newer Older
1 2 3 4 5 6 7
#!/usr/bin/env python

# This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
# the future. Once extracted, the weights don't require DeepSpeed and can be used in any
# application.
#
8
# example: python zero_to_fp32.py . pytorch_model.bin
9 10 11 12

import argparse
import torch
import glob
13
import math
14 15 16 17 18
import os
from collections import OrderedDict

# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
# DeepSpeed data structures it has to be available in the current python environment.
19 20
import deepspeed
from deepspeed.utils import logger
21

22 23
debug = 0

24 25
# load to cpu
device = torch.device('cpu')
26

27

28
def get_model_state_file(checkpoint_dir, zero_stage):
29 30 31 32
    if not os.path.isdir(checkpoint_dir):
        raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")

    # there should be only one file
33 34 35 36
    if zero_stage == 2:
        file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
    elif zero_stage == 3:
        file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
37 38

    if not os.path.exists(file):
39
        raise FileNotFoundError(f"can't find model states file at '{file}'")
40 41 42 43

    return file


44 45
def get_optim_files(checkpoint_dir):
    # XXX: need to test that this simple glob rule works for multi-node setup too
46
    optim_files = sorted(glob.glob(os.path.join(checkpoint_dir, "*_optim_states.pt")))
47 48 49 50 51 52 53 54

    if len(optim_files) == 0:
        raise FileNotFoundError(
            f"can't find '*_optim_states.pt' files in directory '{checkpoint_dir}'")

    return optim_files


55
def parse_model_state(file):
56 57 58

    # load to cpu
    device = torch.device('cpu')
59 60 61 62 63 64
    state_dict = torch.load(file, map_location=device)

    if "buffer_names" not in state_dict:
        raise ValueError(f"{file} is not a model state checkpoint")
    buffer_names = state_dict["buffer_names"]
    if debug:
65
        print("Found buffers:", buffer_names)
66 67 68 69 70 71 72 73 74 75

    # recover just the buffers while restoring them to fp32 if they were saved in fp16
    buffers = {
        k: v.float()
        for k,
        v in state_dict["module"].items() if k in buffer_names
    }
    return buffers


76 77 78
def parse_optim_states(files, ds_checkpoint_dir):

    total_files = len(files)
79 80
    state_dicts = []
    for f in files:
81
        state_dicts.append(torch.load(f, map_location=device))
82 83

    if not "zero_stage" in state_dicts[0]['optimizer_state_dict']:
84
        raise ValueError(f"{files[0]} is not a zero checkpoint")
85
    zero_stage = state_dicts[0]['optimizer_state_dict']["zero_stage"]
86 87
    world_size = state_dicts[0]['optimizer_state_dict']["partition_count"]
    param_shapes = state_dicts[0]["param_shapes"]
A
Ammar Ahmad Awan 已提交
88 89 90 91 92 93
    '''For ZeRO-2 each param group can have different partiiton_count as data parallelism for expert
    parameters can be different from data parallelism for non-expert parameters. So we can just use the max of
    the partition_count to get the dp world_size.
    '''
    if type(world_size) is list:
        world_size = max(world_size)
94

95 96 97 98 99 100
    if world_size != total_files:
        raise ValueError(
            f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
            "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
        )

101 102 103 104 105 106 107 108
    # the groups are named differently in each stage
    if zero_stage == 2:
        fp32_groups_key = "single_partition_of_fp32_groups"
    elif zero_stage == 3:
        fp32_groups_key = "fp32_flat_groups"
    else:
        raise ValueError(f"unknown zero stage {zero_stage}")

109 110 111 112 113
    # if there is more than one param group, there will be multiple flattened tensors - one
    # flattened tensor per group - for simplicity merge them into a single tensor
    #
    # XXX: could make the script more memory efficient for when there are multiple groups - it
    # will require matching the sub-lists of param_shapes for each param group flattened tensor
114
    fp32_flat_groups = [
115 116
        torch.cat(state_dicts[i]['optimizer_state_dict'][fp32_groups_key],
                  0) for i in range(len(state_dicts))
117 118 119 120 121 122 123 124
    ]

    return zero_stage, world_size, param_shapes, fp32_flat_groups


def zero3_partitioned_param_info(unpartitioned_numel, world_size):
    remainder = unpartitioned_numel % world_size
    padding_numel = (world_size - remainder) if remainder else 0
125
    partitioned_numel = math.ceil(unpartitioned_numel / world_size)
126 127 128
    return partitioned_numel, padding_numel


129
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
130
    """
131
    Returns fp32 state_dict reconstructed from ds checkpoint
132 133

    Args:
134
        - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
135 136

    """
137
    print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
138

139 140
    optim_files = get_optim_files(ds_checkpoint_dir)
    zero_stage, world_size, param_shapes, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
141 142 143
    print(
        f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")

144 145 146
    model_file = get_model_state_file(ds_checkpoint_dir, zero_stage)
    buffers = parse_model_state(model_file)

147 148 149 150 151 152 153 154 155
    # Reconstruction protocol:
    #
    # - for zero2 we just need to concat the partitions back to back and reconsolidate over one huge
    # flat buffer - no need to deal with padding since if there is any it will be only in the tail
    # of the last partition so there it will be just left out
    #
    # - for zero3 we need to zip the partitions together at boundary of each param, re-consolidating
    # each param, while dealing with padding if any

156 157 158 159
    if debug:
        for i in range(world_size):
            print(f"fp32_flat_groups[i].shape={fp32_flat_groups[i].shape}")

160 161 162
    if zero_stage == 2:
        # XXX: memory usage doubles here (zero2)
        full_single_fp32_vector = torch.cat(fp32_flat_groups, 0)
163 164 165 166 167 168 169 170 171 172
        avail_numel = full_single_fp32_vector.numel()
    elif zero_stage == 3:
        avail_numel = fp32_flat_groups[0].numel() * world_size

    if debug:
        wanted_params = len(param_shapes)
        wanted_numel = sum(shape.numel() for shape in param_shapes.values())
        # not asserting if there is a mismatch due to possible padding
        print(f"Have {avail_numel} numels to process.")
        print(f"Need {wanted_numel} numels in {wanted_params} params.")
173

174 175 176 177 178 179 180 181
    state_dict = OrderedDict()

    # buffers
    state_dict.update(buffers)
    if debug:
        print(f"added {len(buffers)} buffers")

    # params
182 183 184 185
    # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
    # out-of-core computing solution
    offset = 0
    total_numel = 0
186
    total_params = 0
187
    for name, shape in param_shapes.items():
188

189 190
        unpartitioned_numel = shape.numel()
        total_numel += unpartitioned_numel
191
        total_params += 1
192 193

        if zero_stage == 2:
194 195 196 197
            if debug:
                print(
                    f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} "
                )
198 199 200 201 202 203 204 205
            state_dict[name] = full_single_fp32_vector.narrow(
                0,
                offset,
                unpartitioned_numel).view(shape)
            offset += unpartitioned_numel

        elif zero_stage == 3:
            partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
206 207 208

            if debug:
                print(
209
                    f"{total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
210 211
                )

212 213 214 215 216 217 218 219 220 221
            # XXX: memory usage doubles here (zero3)
            state_dict[name] = torch.cat(
                tuple(fp32_flat_groups[i].narrow(0,
                                                 offset,
                                                 partitioned_numel)
                      for i in range(world_size)),
                0).narrow(0,
                          0,
                          unpartitioned_numel).view(shape)
            offset += partitioned_numel
222

223 224 225 226 227 228
    if zero_stage == 2:
        # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
        # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
        # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
        # live optimizer object, so we are checking that the numbers are within the right range
        align_to = 2 * world_size
229

230 231
        def zero2_align(x):
            return align_to * math.ceil(x / align_to)
232

233 234 235 236 237 238 239 240 241 242 243
        if debug:
            print(f"original offset={offset}, avail_numel={avail_numel}")

        offset = zero2_align(offset)
        avail_numel = zero2_align(avail_numel)

        if debug:
            print(f"aligned  offset={offset}, avail_numel={avail_numel}")

    elif zero_stage == 3:
        offset *= world_size
244

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
    # Sanity check
    if offset != avail_numel:
        raise ValueError(
            f"consumed {offset} numels out of {avail_numel} - something is wrong")

    print(
        f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements"
    )

    return state_dict


def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
    """
    Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
    ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
    via a model hub.

    Args:
        - ``checkpoint_dir``: path to the desired checkpoint folder
        - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``

    Returns:
        - pytorch ``state_dict``

    Note: this approach may not work if your application doesn't have sufficient free CPU memory and
    you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
    the checkpoint.

    A typical usage might be ::
275

276 277 278 279 280 281 282
        from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
        # do the training and checkpoint saving
        state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
        model = model.cpu() # move to cpu
        model.load_state_dict(state_dict)
        # submit to model hub or save the model to share with others

A
Alex Hedges 已提交
283
    In this example the ``model`` will no longer be usable in the deepspeed context of the same
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
    application. i.e. you will need to re-initialize the deepspeed engine, since
    ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.

    If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.

    """
    if tag is None:
        latest_path = os.path.join(checkpoint_dir, 'latest')
        if os.path.isfile(latest_path):
            with open(latest_path, 'r') as fd:
                tag = fd.read().strip()
        else:
            raise ValueError(f"Unable to find 'latest' file at {latest_path}")

    ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)

    if not os.path.isdir(ds_checkpoint_dir):
        raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")

    return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)


def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
    """
    Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
    loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.

    Args:
        - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
        - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
        - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
    """

    state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
    print(f"Saving fp32 state dict to {output_file}")
319 320 321
    torch.save(state_dict, output_file)


322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
    """
    1. Put the provided model to cpu
    2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
    3. Load it into the provided model

    Args:
        - ``model``: the model object to update
        - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
        - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``

    Returns:
        - ``model`: modified model

    Make sure you have plenty of CPU memory available before you call this function. If you don't
    have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
    conveniently placed for you in the checkpoint folder.

    A typical usage might be ::

        from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
        model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
        # submit to model hub or save the model to share with others

A
Alex Hedges 已提交
346
    Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
347 348 349 350 351 352 353 354 355 356 357 358 359 360
    of the same application. i.e. you will need to re-initialize the deepspeed engine, since
    ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.

    """
    logger.info(f"Extracting fp32 weights")
    state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)

    logger.info(f"Overwriting model with fp32 weights")
    model = model.cpu()
    model.load_state_dict(state_dict, strict=False)

    return model


361 362 363 364 365 366
if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument(
        "checkpoint_dir",
        type=str,
367
        help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
368 369 370 371
    parser.add_argument(
        "output_file",
        type=str,
        help=
372
        "path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)"
373
    )
374
    parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
375 376
    args = parser.parse_args()

377 378 379
    debug = args.debug

    convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)