interface.py 10.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import paddle
16 17 18
from paddle.fluid import core
from .process_mesh import ProcessMesh
from .process_mesh import get_current_process_mesh
19 20
from .dist_context import get_default_distributed_context
from .dist_tensor import DistributedTensor
21 22
from .dist_op import DistributedOperatorHelper
from .utils import verify_shard_spec, convert_to_dims_mapping
23 24


25
def shard_tensor(x, process_mesh=None, shard_spec=None):
26
    """
27
    Shard a tensor on a process mesh according to the shard specification.
28 29

    Args:
30
        x (Tensor): the tensor to be sharded.
31 32
        process_mesh (ProcessMesh, optional): An instance of ProcessMesh describes a mesh
            topology of the used logical processes where the tensor is sharded. If it is None,
33
            the found current process mesh will be used. And an error will be raised if the
34 35 36
            current process mesh cannot be found. Default: None.
        shard_spec (list, optional): a list to describe the sharding mapping between `x` and `process_mesh`,
            which means the dimension `i` of `x` is split across the dimension `shard_spec[i]` of `process_mesh`,
37
            where `None` means that tensor dimension is not split. For example, given a tensor wih
38 39 40 41 42 43 44 45 46 47
            the shape [6, 12] and a process mesh with the shape [2, 3] and the dimension names ["x", "y"]:
                If `shard_spec=["x", "y"]`, each shard of the tensor will have a shape [3, 4];
                If `shard_spec=["y", "x"]`, each shard of the tensor will have a shape [2, 6];
                If `shard_spec=["x", None]`, each shard of the tensor will have a shape [3, 12];
                If `shard_spec=[None, "x"]`, each shard of the tensor will have a shape [6, 4];
                If `shard_spec=["y", None]`, each shard of the tensor will have a shape [2, 12];
                If `shard_spec=[None, "y"]`, each shard of the tensor will have a shape [6, 4];
                If `shard_spec=[None, None]`, each shard of the tensor will have a shape [6, 12];
        If the `shard_spec` is None, the tensor will be replicated across all the processes of `process_mesh`.
        In the above example, the `shard_spec=None` is same as 'shard_spec=[None, None]'. Defaults: None.
48 49

    Returns:
50
        Tensor: the tensor `x` annotated with sharding information.
51 52 53 54 55

    Examples:
        .. code-block:: python

            import paddle
56
            from paddle.distributed.fleet import auto
57

58
            mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
59
            x = paddle.ones([4, 6])
60 61
            shard_spec = ["x", "y"]
            auto.shard_tensor(x, mesh, shard_spec)
62 63

    """
64 65

    if process_mesh is not None:
66 67 68 69 70
        assert isinstance(
            process_mesh, ProcessMesh
        ), "Argument process_mesh {} is not an instance of ProcessMesh".format(
            process_mesh
        )
71 72
    else:
        process_mesh = get_current_process_mesh()
73 74 75 76 77 78
        assert (
            process_mesh is not None
        ), "Specify the process mesh argument or use ProcessMesh context manager first."
    assert isinstance(
        shard_spec, list
    ), "Argument shard_spec {} is not an instance of list".format(shard_spec)
79 80 81
    dist_tensor = DistributedTensor(x)
    serial_tensor = dist_tensor.serial_tensor
    dist_tensor.dist_attr.process_mesh = process_mesh
82 83 84 85 86
    if (
        serial_tensor.type == core.VarDesc.VarType.READER
        or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
        or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES
    ):
87 88 89 90
        tensor_shape = []
    else:
        tensor_shape = serial_tensor.shape
    if shard_spec is not None:
91 92 93 94 95
        assert verify_shard_spec(
            shard_spec, tensor_shape, process_mesh
        ), "For tensor {}, shard_spec {} is invalid with tensor_shape {} and process_mesh {}.".format(
            serial_tensor.name, shard_spec, tensor_shape, process_mesh
        )
96
        dist_tensor.dist_attr.dims_mapping = convert_to_dims_mapping(
97 98
            shard_spec, process_mesh
        )
99 100 101 102
    if process_mesh is not None:
        dist_tensor.dist_attr.mark_annotated("process_mesh")
    if shard_spec is not None:
        dist_tensor.dist_attr.mark_annotated("dims_mapping")
103 104
    default_dist_ctx = get_default_distributed_context()
    default_dist_ctx.add_dist_tensor_for_program(dist_tensor)
105
    dist_tensor = default_dist_ctx.get_dist_tensor_for_program(x)
106 107 108
    return x


109
def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
110
    """
111
    Shard an operation on a process mesh according to its input and output shard specification.
112 113

    Args:
114 115 116 117 118 119 120 121 122
        op (Callable): a callable operator or module to be sharded.
        process_mesh (ProcessMesh, optional): An instance of ProcessMesh describes a mesh
            topology of the used logical processes where the op is sharded. All of its inputs and
            outputs are sharded by this process mesh. If it is None, the found current process mesh
            will be used. And an error will be raised if the current process mesh cannot be found.
            Default: None.
        in_shard_specs (list of list, optional): a list of list to describe the sharding specifications
            for the inputs. Each item of `in_shard_specs` is a `shard_spec` between the correspoinding input
            and `process_mesh`. If one item is None, the cooresponding input is replicated across all processes
123
            If it is None, all inputs are replicated accross all processes. Note that the lenght of the
124 125 126 127 128
            `in_shard_specs` should be equal to the actual number of inputs when calling this operation.
            Default: None.
        out_shard_specs (list of list, optional): a list of list to describe the sharding specifications
            for the outputs. Each item of `out_shard_specs` is a `shard_spec` between the correspoinding output
            and `process_mesh`. If one item is None, the cooresponding output is replicated across all processes
129
            If it is None, all outputs are replicated accross all processes. Note that the lenght of the
130 131
            `in_shard_specs` should be equal to the actual number of inputs when calling this operation.
            Default: None. Default: None.
132 133

    Returns:
134
        Outputs of `op`, each of which is annotated with sharding information.
135 136 137 138 139

    Examples:
        .. code-block:: python

            import paddle
140
            from paddle.distributed.fleet import auto
141

142 143
            x = paddle.ones([4, 6])
            y = paddle.zeros([4, 6])
144 145 146 147
            mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
            dist_add = auto.shard_op(paddle.add,
                                     in_shard_specs=[["x", "y"], ["y", None]],
                                     out_shard_specs=[[None, "x"]])
148
            dist_add(x, y)
149 150

    """
151 152

    if process_mesh is not None:
153 154 155 156 157
        assert isinstance(
            process_mesh, ProcessMesh
        ), "Argument process_mesh {} is not an instance of ProcessMesh".format(
            process_mesh
        )
158 159
    else:
        process_mesh = get_current_process_mesh()
160 161 162
        assert (
            process_mesh is not None
        ), "Specify the process mesh argument or use ProcessMesh context manager first."
163 164
    in_dims_mappings = []
    if in_shard_specs is not None:
165 166 167 168 169 170
        assert all(
            (isinstance(shard_spec, list) or shard_spec is None)
            for shard_spec in in_shard_specs
        ), "in_shard_spec {} is not a list of list or None".format(
            in_shard_specs
        )
171 172 173
        for shard_spec in in_shard_specs:
            if shard_spec is not None:
                in_dims_mappings.append(
174 175
                    convert_to_dims_mapping(shard_spec, process_mesh)
                )
176 177 178 179
            else:
                in_dims_mappings.append(None)
    out_dims_mappings = []
    if out_shard_specs is not None:
180 181 182 183 184 185
        assert all(
            (isinstance(shard_spec, list) or shard_spec is None)
            for shard_spec in out_shard_specs
        ), "out_shard_spec {} is not a list of list or None".format(
            out_shard_specs
        )
186 187 188
        for shard_spec in out_shard_specs:
            if shard_spec is not None:
                out_dims_mappings.append(
189 190
                    convert_to_dims_mapping(shard_spec, process_mesh)
                )
191 192
            else:
                out_dims_mappings.append(None)
193 194 195
    op = DistributedOperatorHelper(
        op, process_mesh, in_dims_mappings, out_dims_mappings
    )
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
    return op


def recompute(op):
    class RecomputeOperator:
        def __init__(self, op):
            self._op = op

        def __call__(self, *args, **kwargs):
            default_prog = paddle.fluid.default_main_program()
            cur_block = default_prog.current_block()
            op_size = len(cur_block.ops)
            output = self._op(*args, **kwargs)
            new_op_size = len(cur_block.ops)

            for idx in range(op_size, new_op_size):
                op = cur_block.ops[idx]
                op._set_attr("is_recompute@auto_parallel", True)

            return output

    return RecomputeOperator(op)


220 221 222
_g_collections = {}


223
class CollectionNames:
224
    FETCHES = "fetches"
225
    LOGGING = "logging"
226 227 228 229 230 231 232 233 234 235


def get_collection(name):
    collection = _g_collections.get(name, None)
    if collection is None:
        collection = []
        _g_collections[name] = collection
    return _g_collections[name]


236
def add_to_collection(collection_name, value, name=None):
237 238
    if collection_name not in _g_collections:
        _g_collections[collection_name] = []
239
    if name is not None:
Z
zhaoyingli 已提交
240
        for _, v in _g_collections[collection_name]:
241 242
            if v == value:
                return
243
        _g_collections[collection_name].append((name, value))
244
    else:
Z
zhaoyingli 已提交
245
        for _, v in _g_collections[collection_name]:
246 247
            if v == value:
                return
248
        _g_collections[collection_name].append((None, value))
249 250


251
def fetch(tensor, name=None, logging=False):
252
    add_to_collection(CollectionNames.FETCHES, tensor, name)
253 254
    if logging:
        add_to_collection(CollectionNames.LOGGING, tensor, name)