partitioner.py 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import copy
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import framework as framework
from paddle.fluid import core, unique_name
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
23 24 25 26
from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container
from paddle.distributed.auto_parallel.dist_context import DistributedContext, DistributedOperatorContext
from .dist_attribute import OperatorDistributedAttribute
from .process_group import new_process_group
27
from .utils import set_dist_op_desc_original_id
28
from .utils import print_program_with_dist_attr, is_forward_op, is_backward_op
J
JZ-LIANG 已提交
29
from .operators.common import BACKWARD_ONLY_DIST_OPS
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

__varname_not_in_block__ = ["lod_tensor_blocking_queue_0"]


class Partitioner(object):
    """
    warning:: Partitioner is experimental and subject to change.

    Partitioner convert a program into another program.
    Given a serial program which has been auto completed with shard annotation, the Partitioner 
    convert the serial program into a "distributed" program. The Partitioner will  modify the serial
    program in following two ways, which is also the major difference between serial and distributed program:
        1. partition op: replace a serial op into its corresponding dist op infered from the shard annotation
        2. partition var: if a var is sharded, modify the shape of var according to its shard annotation

    Partitioner is supposed to be call by the auto parallel framework, and not supposed to be directly called by user.
    """

48
    def __init__(self, dist_context, rank_id=0):
49 50
        """
        Args:
51
            dist_context (paddle.fluid.DistributedContext): used to access the distributed_attr of var & op, every Partitioner object could maintain its own DistributedContext member, and partition program base on that shard scenario.
52 53
            rank_id (int): global rank id to which the partitioned distributed program belong.
        """
54
        if not isinstance(dist_context, DistributedContext):
55
            raise TypeError(
56 57
                "dist_context be paddle.fluid.DistributedContext, got %s here" %
                type(dist_context))
58

59
        self._dist_context = dist_context
60 61 62 63
        self._rank_id = rank_id
        self._serial2dist_varname_mapping = {}
        self._dist_varname_suffix = ""

64 65
    def partition(self, serial_main_program, serial_startup_program,
                  params_grads):
66

67
        if not isinstance(serial_main_program, (Program)):
68
            raise TypeError(
69 70
                "main_program be paddle.fluid.framework.program, got %s here" %
                type(serial_main_program))
71 72

        # check if shard annotated serial program valid
73
        if not self._is_valid_annotated_program(serial_main_program):
74 75 76
            raise RuntimeError(
                "Not all vars or ops are annotated in main program !")

77 78 79 80
        # init distop helper
        dist_op_context = self._dist_context.dist_op_context
        dist_op_context.set_varname_mapping(self._serial2dist_varname_mapping)
        dist_op_context.set_rank_id(self._rank_id)
81

82 83 84 85 86 87 88
        # partition startup program
        if serial_startup_program == None:
            partitioned_startup_prog = None
        else:
            partitioned_startup_prog = self.partition_startup_program(
                serial_main_program, serial_startup_program)
        dist_op_context.set_dst_startup_program(partitioned_startup_prog)
89

90 91 92
        # partition main program 
        partitioned_main_prog, partitioned_params_grads = self.partition_main_program(
            serial_main_program, params_grads)
93

94
        return partitioned_main_prog, partitioned_startup_prog, partitioned_params_grads
95

96 97
    def partition_startup_program(self, serial_main_program,
                                  serial_startup_program):
98

99 100 101 102
        if not isinstance(serial_startup_program, (Program)):
            raise TypeError(
                "dist_context be paddle.fluid.framework.program, got %s here" %
                type(serial_startup_program))
103

104 105 106
        partitioned_startup_prog = fluid.Program()
        ref_block = serial_main_program.global_block()
        target_block = partitioned_startup_prog.global_block()
J
JZ-LIANG 已提交
107
        var2shape = {}
108
        temp_varname_map = {}
109

110 111
        # tensors
        for var in serial_startup_program.list_vars():
J
JZ-LIANG 已提交
112 113 114 115 116 117
            assert var.persistable
            new_name = var.name + self._dist_varname_suffix
            temp_varname_map[var.name] = new_name
            target_shape = _partition_var(self._dist_context, ref_block,
                                          target_block, var.name, new_name)
            var2shape[new_name] = target_shape
118 119 120 121 122 123 124 125 126 127

        # ops
        for op in serial_startup_program.global_block().ops:
            # TODO if var not belong to this rank, should be filtered
            output_vars = op.desc.output_arg_names()
            assert len(
                output_vars
            ) == 1, "initializer should output only ONE variable, but got [{}]".format(
                str(op.desc))
            assert temp_varname_map[output_vars[
J
JZ-LIANG 已提交
128
                0]] in var2shape, "try to initialize [{}] which is not a persistable var".format(
129 130 131 132 133 134
                    output_vars[0])
            new_op_desc = target_block.desc.append_op()
            new_op_desc.copy_from(op.desc)
            new_op_desc._rename_output(output_vars[0],
                                       temp_varname_map[output_vars[0]])
            new_op_desc._set_attr("shape",
J
JZ-LIANG 已提交
135
                                  var2shape[temp_varname_map[output_vars[0]]])
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
            target_block._sync_with_cpp()

            # set distribute atrribute
            new_op = target_block.ops[-1]
            assert new_op.type == new_op_desc.type()
            assert new_op.desc == new_op_desc
            output_var = target_block.var(output_vars[0])
            output_var_attr = self._dist_context.get_tensor_dist_attr_for_program(
                output_var)
            op_attr = OperatorDistributedAttribute()
            op_attr.process_mesh = output_var_attr.process_mesh
            op_attr.set_output_dims_mapping(output_var.name,
                                            output_var_attr.dims_mapping)
            op_attr.set_input_dims_mapping(output_var.name,
                                           output_var_attr.dims_mapping)
            self._dist_context.set_op_dist_attr_for_program(new_op, op_attr)

        return partitioned_startup_prog

    def partition_main_program(self, serial_main_program, params_and_grads):
156 157 158 159 160
        """
        1. partition variables
        2. replace local op with corresponding dist op
        """

161
        dist_op_context = self._dist_context.dist_op_context
162
        partitioned_main_prog = fluid.Program()
163 164 165
        dist_op_context.set_dst_main_program(partitioned_main_prog)
        target_block = partitioned_main_prog.global_block()
        ref_block = serial_main_program.global_block()
166 167
        serial_ops = serial_main_program.global_block().ops

168 169 170 171 172 173 174
        # init mapping
        first_backward_op_idx = -1
        forward_op_id2forward_op = {}
        for idx in range(len(serial_ops)):
            if is_forward_op(serial_ops[idx]):
                forward_op_id2forward_op[serial_ops[idx].desc.id(
                )] = serial_ops[idx]
175

176
        # partiiton
177 178 179 180 181 182
        for op in serial_ops:

            # partititon input variables
            for serial_input_varname in op.desc.input_arg_names():
                if serial_input_varname not in self._serial2dist_varname_mapping:
                    new_varname = serial_input_varname + self._dist_varname_suffix
183 184 185 186
                    if ref_block.has_var(serial_input_varname):
                        _partition_var(self._dist_context, ref_block,
                                       target_block, serial_input_varname,
                                       new_varname)
187 188 189 190 191 192 193 194 195 196
                    else:
                        assert serial_input_varname in __varname_not_in_block__

                    self._serial2dist_varname_mapping[
                        serial_input_varname] = new_varname

            # partition output vars
            for serial_output_varname in op.desc.output_arg_names():
                if serial_output_varname not in self._serial2dist_varname_mapping:
                    new_varname = serial_output_varname + self._dist_varname_suffix
197
                    _partition_var(self._dist_context, ref_block, target_block,
198 199 200 201 202
                                   serial_output_varname, new_varname)
                    self._serial2dist_varname_mapping[
                        serial_output_varname] = new_varname

            # partition op
203 204
            op_dist_attr = self._dist_context.get_op_dist_attr_for_program(op)
            if is_forward_op(op) or op_dist_attr.is_recompute:
205 206 207 208 209 210 211 212 213 214 215 216
                kinputs, koutputs = dist_op_context.prepare_context(op)
                dist_op_forward_impl = _get_dist_op_forward_implement(
                    op, self._dist_context)
                dist_op_forward_impl.forward(self._dist_context, **kinputs,
                                             **koutputs)

            elif is_backward_op(op):
                kinputs, koutputs = dist_op_context.prepare_context(op)
                dist_op_backward_impl = _get_dist_op_backward_implement(
                    op, self._dist_context, forward_op_id2forward_op)
                dist_op_backward_impl.backward(self._dist_context, **kinputs,
                                               **koutputs)
217
            else:
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
                raise NotImplementedError(
                    "partitioner only support forward op and backward op, but got {}".
                    format(str(op)))

        partitioned_params_and_grads = []
        for p, g in params_and_grads:
            assert p.name in self._serial2dist_varname_mapping
            dist_p_name = self._serial2dist_varname_mapping[p.name]
            assert target_block.has_var(dist_p_name)
            dist_p = target_block.var(dist_p_name)
            if g is None:
                dist_g = None
            else:
                assert g.name in self._serial2dist_varname_mapping
                dist_g_name = self._serial2dist_varname_mapping[g.name]
                assert target_block.has_var(dist_g_name)
                dist_g = target_block.var(dist_g_name)
            partitioned_params_and_grads.append((dist_p, dist_g))
236

237
        return partitioned_main_prog, partitioned_params_and_grads
238 239 240 241 242 243 244

    def _is_valid_annotated_program(self, program):

        # TODO (ZJ-LIANG) should check all block
        ops = program.global_block().ops
        vars_ = program.list_vars()
        op_dist_attrs = [
245
            self._dist_context.get_op_dist_attr_for_program(op) for op in ops
246 247
        ]
        var_dist_attrs = [
248 249
            self._dist_context.get_tensor_dist_attr_for_program(var)
            for var in vars_
250 251 252 253 254 255 256 257 258 259 260 261 262
        ]

        all_ops_annotated = all(dist_attr is not None
                                for dist_attr in op_dist_attrs)
        all_vars_annotated = all(dist_attr is not None
                                 for dist_attr in var_dist_attrs)

        return all_ops_annotated and all_vars_annotated


def _get_dist_shape(var, dist_attr):

    var_shape = var.shape
263 264
    mapping = dist_attr.dims_mapping
    mesh = dist_attr.process_mesh.topology
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
    assert len(var_shape) == len(
        mapping
    ), "variable shape [{}] and dim_mapping [{}] is NOT match !".format(
        var_shape, mapping)
    new_shape = []
    for idx in range(len(var_shape)):
        if var_shape[idx] == -1 or mapping[idx] == -1:
            new_shape.append(var_shape[idx])
        else:
            assert var_shape[idx] % mesh[mapping[
                idx]] == 0, "un-event partition: var_shape[idx]=[{}], mesh[{}]".format(
                    var_shape[idx], mesh[mapping[idx]])
            new_shape.append(var_shape[idx] // mesh[mapping[idx]])

    return new_shape


282
def _partition_parameter(dist_context, src_var, dst_block, dst_varname,
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
                         dst_shape):
    # NOTE hack to copied Parameter
    # not initialized parameter, need to initialize it 
    copied_kwargs = {}
    copied_kwargs['trainable'] = src_var.trainable
    copied_kwargs['optimize_attr'] = src_var.optimize_attr
    copied_kwargs['regularizer'] = src_var.regularizer
    copied_kwargs['do_model_average'] = src_var.do_model_average
    copied_kwargs['need_clip'] = src_var.need_clip

    param = Parameter(
        block=dst_block,
        type=src_var.type,
        name=dst_varname,
        shape=dst_shape,
        dtype=src_var.dtype,
        lod_level=src_var.lod_level,
        error_clip=src_var.error_clip,
        stop_gradient=src_var.stop_gradient,
        is_data=src_var.is_data,
        belong_to_optimizer=src_var.belong_to_optimizer,
        **copied_kwargs)

    # set dist attr uid
    # distributed_attr_uid = src_var.desc.get_distributed_attr_uid()
    # param.desc.set_distributed_attr_uid(distributed_attr_uid)
    dist_attr = copy.deepcopy(
310
        dist_context.get_tensor_dist_attr_for_program(src_var))
311
    assert dist_attr is not None
312
    dist_context.set_tensor_dist_attr_for_program(param, dist_attr)
313 314


315 316
def _partition_intermediate_var(dist_context, src_var, dst_block, dst_varname,
                                dst_shape):
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
    var = dst_block.create_var(
        type=src_var.type,
        name=dst_varname,
        shape=dst_shape,
        dtype=src_var.dtype,
        lod_level=src_var.lod_level,
        persistable=src_var.persistable,
        error_clip=src_var.error_clip,
        stop_gradient=src_var.stop_gradient,
        is_data=src_var.is_data,
        belong_to_optimizer=src_var.belong_to_optimizer)

    # set dist attr uid
    # distributed_attr_uid = src_var.desc.get_distributed_attr_uid()
    # var.desc.set_distributed_attr_uid(distributed_attr_uid)
    dist_attr = copy.deepcopy(
333
        dist_context.get_tensor_dist_attr_for_program(src_var))
334
    assert dist_attr is not None
335
    dist_context.set_tensor_dist_attr_for_program(var, dist_attr)
336 337


338
def _partition_var(dist_context, src_block, dst_block, src_varname,
339 340 341 342 343 344 345 346 347 348 349 350
                   dst_varname):
    """
    partition include: split + replicate
    """
    src_var = src_block.var(src_varname)

    if src_var.type == core.VarDesc.VarType.READER:
        dst_block.create_var(
            type=src_var.type,
            name=dst_varname,
            persistable=True,
            stop_gradient=True)
J
JZ-LIANG 已提交
351
        target_shape = None
352
    else:
353
        dist_attr = dist_context.get_tensor_dist_attr_for_program(src_var)
354 355 356
        target_shape = _get_dist_shape(src_var, dist_attr)

        if isinstance(src_var, Parameter):
357 358
            _partition_parameter(dist_context, src_var, dst_block, dst_varname,
                                 target_shape)
359
        else:
360 361
            _partition_intermediate_var(dist_context, src_var, dst_block,
                                        dst_varname, target_shape)
J
JZ-LIANG 已提交
362
    return target_shape
363 364


365 366 367
def _get_dist_op_backward_implement(backward_op, dist_context,
                                    forward_op_id2forward_op):
    dist_op_context = dist_context.dist_op_context
368 369 370
    if backward_op.desc.id() in dist_op_context.grad_op_id_to_op_id:
        forward_op_id = dist_op_context.grad_op_id_to_op_id[backward_op.desc.id(
        )]
371 372 373
        forward_op = forward_op_id2forward_op[forward_op_id]
        forward_op_dist_attr = dist_context.get_op_dist_attr_for_program(
            forward_op)
J
JZ-LIANG 已提交
374
        dist_op = get_distributed_operator_impl_container(forward_op.type)
375 376

        # TODO backward should have its own impl_idx
J
JZ-LIANG 已提交
377
        if dist_op and forward_op_dist_attr.impl_idx >= 0 and dist_op.get_impl( \
378
            forward_op_dist_attr.impl_idx)._backward_implemented:
J
JZ-LIANG 已提交
379
            return dist_op.get_impl(forward_op_dist_attr.impl_idx)
380

J
JZ-LIANG 已提交
381 382 383
    # NOTE trick for dist ops that only have backward implement 
    if backward_op.type in BACKWARD_ONLY_DIST_OPS:
        op_dist_attr = dist_context.get_op_dist_attr_for_program(backward_op)
384 385 386
        dist_op = get_distributed_operator_impl_container(backward_op.type)
        if dist_op and op_dist_attr.impl_idx >= 0:
            return dist_op.get_impl(op_dist_attr.impl_idx)
J
JZ-LIANG 已提交
387 388 389

    dist_op = get_distributed_operator_impl_container("default")
    return dist_op.get_impl(0)
390 391 392 393


def _get_dist_op_forward_implement(forward_op, dist_context):
    dist_attr = dist_context.get_op_dist_attr_for_program(forward_op)
J
JZ-LIANG 已提交
394
    dist_op = get_distributed_operator_impl_container(forward_op.type)
395

J
JZ-LIANG 已提交
396
    if dist_op and dist_attr.impl_idx >= 0 and dist_op.get_impl(
397
            dist_attr.impl_idx)._forward_implemented:
J
JZ-LIANG 已提交
398
        return dist_op.get_impl(dist_attr.impl_idx)
399 400

    else:
J
JZ-LIANG 已提交
401 402
        dist_op = get_distributed_operator_impl_container("default")
        return dist_op.get_impl(0)