dist_tensor.py 16.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import copy
16 17 18
import inspect

import paddle
19 20
from paddle.framework import Block
from paddle.static import Parameter, Variable
21

22
from .dist_attribute import TensorDistAttr
23
from .utils import __no_shape_var_type__, _linear_idx2coordinate
24 25 26


class DistributedTensor:
27
    """
28
    DistributedTensor represents the distribution of tensor on the process group and
29 30
    local tensors can be created by DistributedTensor.
    Only support even sharding now and uneven sharding will be supported in the future.
31 32
    Local tensor information can be obtained from the DistributedTensor instance object,
    or obtained by the static methods provided by DistributedTensor,
33 34 35 36
    including shard (i.e. the index in the serial tensor), offsets, and sizes.
    """

    @staticmethod
37 38 39 40 41
    def _validate_sizes_and_dist_attr(
        sizes, dims_mapping, topology, processes, rank=None, shard_sizes=None
    ):
        if not (
            isinstance(sizes, (list, tuple))
42
            and all(isinstance(x, int) and x >= 0 for x in sizes)
43
        ):
44
            raise ValueError(
45 46 47 48 49 50
                "The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format(
                    sizes
                )
            )
        if not (
            isinstance(dims_mapping, (list, tuple))
51
            and all(isinstance(x, int) and x >= -1 for x in dims_mapping)
52
        ):
53
            raise ValueError(
54 55 56 57 58 59
                "The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format(
                    dims_mapping
                )
            )
        if not (
            isinstance(processes, (list, tuple))
60
            and all(isinstance(x, int) and x >= 0 for x in processes)
61
        ):
62
            raise ValueError(
63 64 65 66 67 68
                "The processes must be list or tuple and item in processes must be integer, but got {}".format(
                    processes
                )
            )
        if not (
            isinstance(topology, (list, tuple))
69
            and all(isinstance(x, int) and x > 0 for x in topology)
70
        ):
71
            raise ValueError(
72 73 74 75
                "The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format(
                    topology
                )
            )
76
        if rank is not None and not (isinstance(rank, int) and rank >= 0):
77
            raise ValueError(f"The rank must >= 0, but got {rank}")
78

79 80 81
        # # NOTE: Only support even sharding now
        # if shard_sizes is not None:
        #     raise ValueError("Only support even sharding now.")
82 83

    @staticmethod
84 85 86 87 88 89 90 91 92 93 94
    def get_local_sizes(
        global_sizes,
        dims_mapping,
        topology,
        processes,
        rank=None,
        shard_sizes=None,
    ):
        DistributedTensor._validate_sizes_and_dist_attr(
            global_sizes, dims_mapping, topology, processes, rank, shard_sizes
        )
95 96 97

        local_sizes = []
        # for even sharding, the local sizes of every rank are equal
98

99
        for idx, item in enumerate(global_sizes):
100 101 102
            # This is a trick to avoid dims_mapping is []
            val = dims_mapping[idx] if idx < len(dims_mapping) else -1
            if val == -1:
103 104 105 106 107 108 109
                local_sizes.append(item)
            else:
                local_sizes.append(item // topology[dims_mapping[idx]])

        return local_sizes

    @staticmethod
110 111 112 113 114 115
    def get_local_offsets(
        global_sizes, dims_mapping, topology, processes, rank, shard_sizes=None
    ):
        local_sizes = DistributedTensor.get_local_sizes(
            global_sizes, dims_mapping, topology, processes, rank, shard_sizes
        )
116 117 118 119 120 121 122 123
        local_offsets = []
        rank_relatvie = processes.index(rank)
        coordinate = _linear_idx2coordinate(topology, rank_relatvie)

        for i in range(len(global_sizes)):
            if dims_mapping[i] == -1:
                local_offsets.append(0)
            else:
124 125 126
                local_offsets.append(
                    coordinate[dims_mapping[i]] * local_sizes[i]
                )
127 128 129
        return local_offsets

    @staticmethod
130 131 132 133 134 135 136 137 138 139 140
    def get_global_sizes(
        local_sizes,
        dims_mapping,
        topology,
        processes,
        rank=None,
        shard_sizes=None,
    ):
        DistributedTensor._validate_sizes_and_dist_attr(
            local_sizes, dims_mapping, topology, processes, rank, shard_sizes
        )
141 142 143 144 145 146 147 148 149
        global_sizes = []
        for idx, item in enumerate(local_sizes):
            if dims_mapping[idx] == -1:
                global_sizes.append(item)
            else:
                global_sizes.append(item * topology[dims_mapping[idx]])
        return global_sizes

    @staticmethod
150 151 152
    def get_local_shard(
        global_sizes, dims_mapping, topology, processes, rank, shard_sizes=None
    ):
153
        local_offsets = DistributedTensor.get_local_offsets(
154 155 156 157 158
            global_sizes, dims_mapping, topology, processes, rank, shard_sizes
        )
        local_sizes = DistributedTensor.get_local_sizes(
            global_sizes, dims_mapping, topology, processes, rank, shard_sizes
        )
159 160 161
        assert len(local_sizes) == len(
            local_offsets
        ), "The length of local_sizes must be equal to local_offsets, but got {} and {}.".format(
162 163
            len(local_sizes), len(local_offsets)
        )
164

165 166 167
        local_end_offsets = [
            x[0] + x[1] for x in zip(local_offsets, local_sizes)
        ]
168 169 170 171
        local_shard = list(zip(local_offsets, local_end_offsets))
        return local_shard

    def __init__(self, serial_tensor, dist_attr=None, dist_context=None):
172
        self._serial_tensor = serial_tensor
173 174 175 176 177 178 179
        if dist_attr is not None and isinstance(dist_attr, TensorDistAttr):
            # TODO: remove this deepcopy after we fix the issue
            self._dist_attr = copy.deepcopy(dist_attr)
            # self._dist_attr = dist_attr
            # TODO: Do we really need to write dist_attr back to serial_tensor?
            self._serial_tensor.dist_attr = dist_attr
        else:
180
            assert dist_attr is None, f"{dist_attr}"
181 182 183
            # Use the dist attr of serial_tensor to do the initialization
            self._dist_attr = self._serial_tensor.dist_attr

184
        self._batch_dim = 0
185 186 187 188 189
        self._local_offsets_map = {}
        self._local_shard_map = {}
        self._local_tensor_map = {}

        from .dist_context import get_default_distributed_context
190 191 192 193 194

        self._dist_context = (
            dist_context
            if dist_context is not None
            else get_default_distributed_context()
195 196 197
        )
        # TODO: Add Automatically to dist_context after initialized and it will be adapted in the future.
        # self._dist_context.add_dist_tensor_for_program(self)
198 199 200 201 202 203 204 205 206

    @property
    def serial_tensor(self):
        return self._serial_tensor

    @property
    def dist_attr(self):
        return self._dist_attr

207 208 209 210 211 212
    @dist_attr.setter
    def dist_attr(self, dist_attr):
        self._dist_attr = dist_attr
        # TODO: Do we really need to write back dist_attr to serial_tensor?
        self._serial_tensor.dist_attr = dist_attr

213 214 215 216
    @property
    def dist_context(self):
        return self._dist_context

217 218 219 220 221 222 223 224
    # def _init_default_dist_attr(self):
    #     if self._dist_attr.dims_mapping is None:
    #         if self.serial_tensor.type in __no_shape_var_type__:
    #             tensor_shape = []
    #         else:
    #             tensor_shape = self._serial_tensor.shape
    #         tensor_dims_mapping = [-1 for _ in range(len(tensor_shape))]
    #         self._dist_attr.dims_mapping = tensor_dims_mapping
225 226

    def validate_dist_attr(self):
Z
zhaoyingli 已提交
227
        if self.serial_tensor.type in __no_shape_var_type__:
228 229 230 231 232 233
            return True
        tensor_shape = self.serial_tensor.shape
        if len(tensor_shape) != len(self.dist_attr.dims_mapping):
            return False
        for i in range(len(self.dist_attr.dims_mapping)):
            if self.dist_attr.dims_mapping[
234 235
                i
            ] < -1 or self.dist_attr.dims_mapping[i] >= len(
236
                self.dist_attr.process_mesh.shape
237
            ):
238
                return False
239
        for i in range(len(self.dist_attr.process_mesh.shape)):
240 241 242 243
            if self.dist_attr.dims_mapping.count(i) > 1:
                return False
        return True

244
    def local_sizes(self, rank=None):
245
        """Get local sizes of the given rank."""
246
        rank = paddle.distributed.get_rank() if rank is None else rank
247 248
        global_sizes = self.serial_tensor.shape
        dims_mapping = self.dist_attr.dims_mapping
249
        # shard_sizes = self.dist_attr.shard_sizes
250 251
        processes = self.dist_attr.process_mesh.process_ids
        topology = self.dist_attr.process_mesh.shape
252
        local_sizes = DistributedTensor.get_local_sizes(
253
            global_sizes, dims_mapping, topology, processes, rank
254
        )
255 256 257 258 259 260 261 262 263 264 265

        return local_sizes

    def local_offsets(self, rank=None):
        rank = paddle.distributed.get_rank() if rank is None else rank
        local_offsets = None
        if rank in self._local_offsets_map.keys():
            local_offsets = self._local_offsets_map[rank]
        else:
            global_sizes = self.serial_tensor.shape
            dims_mapping = self.dist_attr.dims_mapping
266
            # shard_sizes = self.dist_attr.shard_sizes
267 268
            processes = self.dist_attr.process_mesh.process_ids
            topology = self.dist_attr.process_mesh.shape
269
            local_offsets = DistributedTensor.get_local_offsets(
270
                global_sizes, dims_mapping, topology, processes, rank
271
            )
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
            self._local_offsets_map[rank] = local_offsets

        return local_offsets

    def global_sizes(self):
        return self.serial_tensor.shape

    def local_shard(self, rank=None):
        rank = paddle.distributed.get_rank() if rank is None else rank
        local_shard = None
        if rank in self._local_shard_map.keys():
            local_shard = self._local_shard_map[rank]
        else:
            global_sizes = self.serial_tensor.shape
            dims_mapping = self.dist_attr.dims_mapping
287
            # shard_sizes = self.dist_attr.shard_sizes
288 289
            processes = self.dist_attr.process_mesh.process_ids
            topology = self.dist_attr.process_mesh.shape
290
            local_shard = DistributedTensor.get_local_shard(
291
                global_sizes, dims_mapping, topology, processes, rank
292
            )
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
            self._local_shard_map[rank] = local_shard

        return local_shard

    def new_local_tensor(self, block=None, rank=None, name=None):
        """
        Create a new local tensor of serial tensor corresponding to rank.
        Args:
            block (Block): The block contains the new tensor. Default value is recommend and it will be created in the block of dist main program corresponding to the serial tensor block id. Default: None.
            rank (int): The rank id. Default value is recommend and it will be the current rank. Default: None.
        """

        def _copy_kwargs(serial_tensor):
            kwargs = {}
            no_need_copy_args = ["self", "block", "shape", "name"]
308
            arg_spec = inspect.getfullargspec(Variable.__init__)
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

            for key in arg_spec.args:
                # TODO: Check the copied attribute from serial tensor whether valid
                if key in no_need_copy_args:
                    continue
                elif key not in kwargs:
                    if key == "type":
                        kwargs[key] = serial_tensor.desc.type()
                    elif key == "dtype":
                        kwargs[key] = serial_tensor.desc.dtype()
                    elif key == "lod_level":
                        kwargs[key] = serial_tensor.desc.lod_level()
                    elif key == "persistable":
                        kwargs[key] = serial_tensor.desc.persistable()
                    elif key == "stop_gradient":
                        kwargs[key] = serial_tensor.desc.stop_gradient()
                    elif key == "need_check_feed":
                        kwargs[key] = serial_tensor.desc.need_check_feed()
                    # TODO: Get capacity by framework
                    elif key == "capacity":
                        continue
                    else:
                        kwargs[key] = self.serial_tensor.__dict__[key]

            if isinstance(serial_tensor, Parameter):
                kwargs["trainable"] = serial_tensor.trainable
                kwargs["optimize_attr"] = serial_tensor.trainable
                kwargs["regularizer"] = serial_tensor.regularizer
                kwargs["do_model_average"] = serial_tensor.do_model_average
                kwargs["need_clip"] = serial_tensor.need_clip
                kwargs["is_distributed"] = serial_tensor.is_distributed
                kwargs["is_parameter"] = serial_tensor.is_parameter

            return kwargs

        if rank is not None and not (isinstance(rank, int) and rank >= 0):
345
            raise ValueError(f"The rank must >= 0, but got {rank}")
346
        if block is not None and not isinstance(block, Block):
347
            raise TypeError(f"The block must be Block, but got {type(block)}.")
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
        rank = paddle.distributed.get_rank() if rank is None else rank

        if block is None:
            block_id = self.serial_tensor.block.idx
            block = self.dist_context.dist_main_programs[rank].block(block_id)

        # copy serial tensor attribute
        kwargs = _copy_kwargs(self.serial_tensor)
        kwargs["name"] = name
        kwargs["shape"] = self.local_sizes(rank)

        if isinstance(self.serial_tensor, Parameter):
            kwargs.pop("persistable")
            local_tensor = Parameter(block=block, **kwargs)
        else:
            local_tensor = block.create_var(**kwargs)

        # TODO: Set original id when set original_id is approved
        local_tensor.desc.set_original_id(self.serial_tensor.desc.id())
        self._local_tensor_map[rank] = local_tensor
        return local_tensor

    def local_tensor(self, rank=None):
        rank = paddle.distributed.get_rank() if rank is None else rank
372 373
        assert (
            rank in self._local_tensor_map
374
        ), f"The rank {rank} local tensor has not been created."
375 376
        return self._local_tensor_map[rank]

Z
zhaoyingli 已提交
377 378 379 380 381
    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
382
            if k == "_serial_tensor" or k == "_local_tensor_map":
Z
zhaoyingli 已提交
383 384 385 386 387
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
        return result

388
    def __str__(self):
389 390 391 392
        str = "{{tensor name: {}, tensor id: {}, tensor original_id {}".format(
            self.serial_tensor.desc.name(),
            self.serial_tensor.desc.id(),
            self.serial_tensor.desc.original_id(),
393
        )
394 395 396 397 398 399 400 401

        # str += ", {}".format(self.dist_attr)
        # return str

        if self.dist_attr.is_annotated("process_mesh"):
            annotated_str = "annotated"
        else:
            annotated_str = "non-annotated"
402 403 404
        str += ", process_mesh ({}): {}".format(
            annotated_str, self.dist_attr.process_mesh
        )
405

406
        str += f", is_parameter: {self.serial_tensor.is_parameter}"
407 408 409 410 411

        if self.dist_attr.is_annotated("dims_mapping"):
            annotated_str = "annotated"
        else:
            annotated_str = "non-annotated"
412
        str += ", dims_mapping ({}): {} }}".format(
413 414
            annotated_str, self.dist_attr.dims_mapping
        )
415

416 417 418 419 420 421 422 423 424 425 426
        # if self.dist_attr.is_annotated("shard_mask"):
        #     annotated_str = "annotated"
        # else:
        #     annotated_str = "non-annotated"
        # str += ", shard_mask ({}): {}".format(annotated_str, None)

        # if self.dist_attr.is_annotated("offload_device"):
        #     annotated_str = "annotated"
        # else:
        #     annotated_str = "non-annotated"
        # str += ", offload_device ({}): {} }}".format(annotated_str, None)
427
        return str