interface.py 11.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from collections import defaultdict

17
import paddle
18 19 20 21 22
from paddle.fluid import core
from .process_mesh import ProcessMesh
from .process_mesh import get_current_process_mesh
from .process_mesh import set_current_process_mesh
from .process_mesh import reset_current_process_mesh
23 24
from .dist_context import get_default_distributed_context
from .dist_tensor import DistributedTensor
25 26
from .dist_op import DistributedOperatorHelper
from .utils import verify_shard_spec, convert_to_dims_mapping
27 28


29
def shard_tensor(x, process_mesh=None, shard_spec=None):
30
    """
31
    Shard a tensor on a process mesh according to the shard specification.
32 33

    Args:
34
        x (Tensor): the tensor to be sharded.
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
        process_mesh (ProcessMesh, optional): An instance of ProcessMesh describes a mesh
            topology of the used logical processes where the tensor is sharded. If it is None,
            the found current process mesh will be used. And an error will be raised if the
            current process mesh cannot be found. Default: None.
        shard_spec (list, optional): a list to describe the sharding mapping between `x` and `process_mesh`,
            which means the dimension `i` of `x` is split across the dimension `shard_spec[i]` of `process_mesh`,
            where `None` means that tensor dimension is not split. For example, given a tensor wih
            the shape [6, 12] and a process mesh with the shape [2, 3] and the dimension names ["x", "y"]:
                If `shard_spec=["x", "y"]`, each shard of the tensor will have a shape [3, 4];
                If `shard_spec=["y", "x"]`, each shard of the tensor will have a shape [2, 6];
                If `shard_spec=["x", None]`, each shard of the tensor will have a shape [3, 12];
                If `shard_spec=[None, "x"]`, each shard of the tensor will have a shape [6, 4];
                If `shard_spec=["y", None]`, each shard of the tensor will have a shape [2, 12];
                If `shard_spec=[None, "y"]`, each shard of the tensor will have a shape [6, 4];
                If `shard_spec=[None, None]`, each shard of the tensor will have a shape [6, 12];
        If the `shard_spec` is None, the tensor will be replicated across all the processes of `process_mesh`.
        In the above example, the `shard_spec=None` is same as 'shard_spec=[None, None]'. Defaults: None.
52 53

    Returns:
54
        Tensor: the tensor `x` annotated with sharding information.
55 56 57 58 59

    Examples:
        .. code-block:: python

            import paddle
60
            from paddle.distributed.fleet import auto
61

62
            mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
63
            x = paddle.ones([4, 6])
64 65
            shard_spec = ["x", "y"]
            auto.shard_tensor(x, mesh, shard_spec)
66 67

    """
68 69

    if process_mesh is not None:
70 71 72 73 74
        assert isinstance(
            process_mesh, ProcessMesh
        ), "Argument process_mesh {} is not an instance of ProcessMesh".format(
            process_mesh
        )
75 76
    else:
        process_mesh = get_current_process_mesh()
77 78 79 80 81 82 83 84 85 86 87
        assert (
            process_mesh is not None
        ), "Specify the process mesh argument or use ProcessMesh context manager first."
    assert isinstance(
        shard_spec, list
    ), "Argument shard_spec {} is not an instance of list".format(shard_spec)
    if isinstance(x, str):
        x = paddle.fluid.default_main_program().global_block()._var_recursive(x)
        dist_tensor = DistributedTensor(x)
    else:
        dist_tensor = DistributedTensor(x)
88 89
    serial_tensor = dist_tensor.serial_tensor
    dist_tensor.dist_attr.process_mesh = process_mesh
90 91 92 93 94
    if (
        serial_tensor.type == core.VarDesc.VarType.READER
        or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES
    ):
95 96 97 98
        tensor_shape = []
    else:
        tensor_shape = serial_tensor.shape
    if shard_spec is not None:
99 100 101 102 103
        assert verify_shard_spec(
            shard_spec, tensor_shape, process_mesh
        ), "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format(
            serial_tensor.name, shard_spec, tensor_shape, process_mesh
        )
104
        dist_tensor.dist_attr.dims_mapping = convert_to_dims_mapping(
105 106
            shard_spec, process_mesh
        )
107 108 109 110
    if process_mesh is not None:
        dist_tensor.dist_attr.mark_annotated("process_mesh")
    if shard_spec is not None:
        dist_tensor.dist_attr.mark_annotated("dims_mapping")
111 112
    default_dist_ctx = get_default_distributed_context()
    default_dist_ctx.add_dist_tensor_for_program(dist_tensor)
113
    dist_tensor = default_dist_ctx.get_dist_tensor_for_program(x)
114
    default_dist_ctx.add_process_mesh(process_mesh)
115 116 117
    return x


118
def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
119
    """
120
    Shard an operation on a process mesh according to its input and output shard specification.
121 122

    Args:
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
        op (Callable): a callable operator or module to be sharded.
        process_mesh (ProcessMesh, optional): An instance of ProcessMesh describes a mesh
            topology of the used logical processes where the op is sharded. All of its inputs and
            outputs are sharded by this process mesh. If it is None, the found current process mesh
            will be used. And an error will be raised if the current process mesh cannot be found.
            Default: None.
        in_shard_specs (list of list, optional): a list of list to describe the sharding specifications
            for the inputs. Each item of `in_shard_specs` is a `shard_spec` between the correspoinding input
            and `process_mesh`. If one item is None, the cooresponding input is replicated across all processes
            If it is None, all inputs are replicated accross all processes. Note that the lenght of the
            `in_shard_specs` should be equal to the actual number of inputs when calling this operation.
            Default: None.
        out_shard_specs (list of list, optional): a list of list to describe the sharding specifications
            for the outputs. Each item of `out_shard_specs` is a `shard_spec` between the correspoinding output
            and `process_mesh`. If one item is None, the cooresponding output is replicated across all processes
            If it is None, all outputs are replicated accross all processes. Note that the lenght of the
            `in_shard_specs` should be equal to the actual number of inputs when calling this operation.
            Default: None. Default: None.
141 142

    Returns:
143
        Outputs of `op`, each of which is annotated with sharding information.
144 145 146 147 148

    Examples:
        .. code-block:: python

            import paddle
149
            from paddle.distributed.fleet import auto
150 151 152

            x = paddle.ones([4, 6])
            y = paddle.zeros([4, 6])
153 154 155 156
            mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
            dist_add = auto.shard_op(paddle.add,
                                     in_shard_specs=[["x", "y"], ["y", None]],
                                     out_shard_specs=[[None, "x"]])
157
            dist_add(x, y)
158 159

    """
160 161

    if process_mesh is not None:
162 163 164 165 166
        assert isinstance(
            process_mesh, ProcessMesh
        ), "Argument process_mesh {} is not an instance of ProcessMesh".format(
            process_mesh
        )
167 168
    else:
        process_mesh = get_current_process_mesh()
169 170 171
        assert (
            process_mesh is not None
        ), "Specify the process mesh argument or use ProcessMesh context manager first."
172 173
    in_dims_mappings = []
    if in_shard_specs is not None:
174 175 176 177 178 179
        assert all(
            (isinstance(shard_spec, list) or shard_spec is None)
            for shard_spec in in_shard_specs
        ), "in_shard_spec {} is not a list of list or None".format(
            in_shard_specs
        )
180 181 182
        for shard_spec in in_shard_specs:
            if shard_spec is not None:
                in_dims_mappings.append(
183 184
                    convert_to_dims_mapping(shard_spec, process_mesh)
                )
185 186 187 188
            else:
                in_dims_mappings.append(None)
    out_dims_mappings = []
    if out_shard_specs is not None:
189 190 191 192 193 194
        assert all(
            (isinstance(shard_spec, list) or shard_spec is None)
            for shard_spec in out_shard_specs
        ), "out_shard_spec {} is not a list of list or None".format(
            out_shard_specs
        )
195 196 197
        for shard_spec in out_shard_specs:
            if shard_spec is not None:
                out_dims_mappings.append(
198 199
                    convert_to_dims_mapping(shard_spec, process_mesh)
                )
200 201
            else:
                out_dims_mappings.append(None)
202 203 204
    op = DistributedOperatorHelper(
        op, process_mesh, in_dims_mappings, out_dims_mappings
    )
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
    return op


def recompute(op):
    class RecomputeOperator:
        def __init__(self, op):
            self._op = op

        def __call__(self, *args, **kwargs):
            default_prog = paddle.fluid.default_main_program()
            cur_block = default_prog.current_block()
            op_size = len(cur_block.ops)
            output = self._op(*args, **kwargs)
            new_op_size = len(cur_block.ops)

            for idx in range(op_size, new_op_size):
                op = cur_block.ops[idx]
                op._set_attr("is_recompute@auto_parallel", True)

            return output

    return RecomputeOperator(op)


229 230 231 232 233 234 235 236 237 238 239 240 241 242
_g_collections = {}


class CollectionNames(object):
    FETCHES = "fetches"
    LOGGING = "logging"


def get_collection(name):
    collection = _g_collections.get(name, None)
    if collection is None:
        collection = []
        _g_collections[name] = collection
    return _g_collections[name]
243 244


245 246 247 248 249
def add_to_collection(collection_name, value, name=None):
    if collection_name not in _g_collections:
        _g_collections[collection_name] = []
    if name is not None:
        for _, v in _g_collections[collection_name]:
250 251
            if v == value:
                return
252
        _g_collections[collection_name].append((name, value))
253
    else:
254
        for _, v in _g_collections[collection_name]:
255 256
            if v == value:
                return
257
        _g_collections[collection_name].append((None, value))
258 259


260
def fetch(tensor, name=None, logging=False):
J
JZ-LIANG 已提交
261 262 263 264 265 266 267 268 269 270
    if isinstance(tensor, paddle.fluid.framework.Variable):
        tensor = tensor.name
    elif isinstance(tensor, str):
        tensor = tensor
    else:
        raise TypeError(
            "Only support fetch `Variable` or `str`[`Variable`'s name], but got `{}`".format(
                type(tensor)
            )
        )
271 272 273
    add_to_collection(CollectionNames.FETCHES, tensor, name)
    if logging:
        add_to_collection(CollectionNames.LOGGING, tensor, name)