dist_softmax.py 6.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

C
caozhou 已提交
15
from paddle.distributed.fleet.meta_optimizers.common import OpRole
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
from ..cost import (
    SoftmaxGradOpCost,
    SoftmaxOpCost,
    build_comp_costs_from_descs,
    build_comp_desc_from_dist_op,
    build_dp_costs,
)
from ..utils import compute_compatible_and_update_dim_mapping, is_dim_shard
from .common import (
    DistributedOperatorImpl,
    DistributedOperatorImplContainer,
    is_parameter_related,
    register_distributed_operator_impl,
    register_distributed_operator_impl_container,
)
from .dist_default import DistributedDefaultImpl0

34

35
class DistributedSoftmax(DistributedOperatorImplContainer):
36
    def __init__(self, op_type):
37
        super().__init__(op_type)
38 39


40
register_distributed_operator_impl_container(DistributedSoftmax("softmax"))
41 42 43 44


class DistributedSoftmaxImpl(DistributedOperatorImpl):
    def __init__(self, name):
45
        super().__init__(name)
46
        self._forward_implemented = False
47
        self._backward_implemented = False
48

C
caozhou 已提交
49 50 51 52 53 54 55 56 57 58 59
    def calc_cost(self, op_role, dist_op, ctx, cluster):
        cost = None
        if int(op_role) == int(OpRole.Backward):
            cost = self.calc_bwd_cost(dist_op, ctx, cluster)
        else:
            cost = self.calc_fwd_cost(dist_op, ctx, cluster)
        assert cost is not None
        return cost

    def calc_fwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
60 61 62
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
63
        processes = dist_op.dist_attr.process_mesh.process_ids
64 65 66
        cost_mapping = build_comp_costs_from_descs(
            SoftmaxOpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
67 68 69 70 71 72 73

        res_cost = [cost_mapping]
        return res_cost

    def calc_bwd_cost(self, dist_op, ctx, cluster):
        # calc comp op cost
        res = []
74 75 76
        desc_mapping = build_comp_desc_from_dist_op(
            dist_op=dist_op, dist_context=ctx
        )
C
caozhou 已提交
77 78
        dist_attr = dist_op.dist_attr
        process_mesh = dist_attr.process_mesh
79
        processes = process_mesh.process_ids
80 81 82
        cost_mapping = build_comp_costs_from_descs(
            SoftmaxGradOpCost, ctx, processes, desc_mapping, cluster
        )
C
caozhou 已提交
83 84 85 86 87 88 89 90
        res.append(cost_mapping)

        backward_op = dist_op.serial_op
        main_block = backward_op.block
        need_gradient_allreduce = False
        for input_name in backward_op.desc.input_names():
            for varname in backward_op.desc.input(input_name):
                if "@GRAD" not in varname and is_parameter_related(
91 92
                    varname, main_block
                ):
C
caozhou 已提交
93 94 95
                    # NOTE input var's dim_mapping of backward op should be the same with input var instead of corresponding varname of forward op
                    var_dim_mapping = dist_attr.get_input_dims_mapping(varname)

96
                    mesh_shape = process_mesh.shape
C
caozhou 已提交
97 98 99 100 101
                    batch_size_axis = var_dim_mapping[0]
                    if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
                        parallel_axis = batch_size_axis
                        attrs = {"use_calc_stream": True}
                        var_names = [varname + "@GRAD"]
102 103 104 105 106 107 108 109 110
                        build_dp_costs(
                            res,
                            dist_op,
                            ctx,
                            var_names,
                            attrs,
                            parallel_axis,
                            cluster,
                        )
C
caozhou 已提交
111 112 113

        return res

114 115 116
    def is_input_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
117 118 119 120
        x_name = op_desc.input('X')[0]
        axis = op_desc.attr('axis')
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)

121 122
        # if axis != -1 and axis != len(x_dims_mapping) - 1:
        #     return False
123 124 125 126 127 128

        if is_dim_shard(x_dims_mapping[axis]):
            return False

        return True

129 130 131
    def is_output_compatible(self, dist_op):
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
132 133 134 135
        out_name = op_desc.output('Out')[0]
        axis = op_desc.attr('axis')
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

136 137
        # if axis != -1 and axis != len(out_dims_mapping) - 1:
        #     return False
138 139 140 141 142 143

        if is_dim_shard(out_dims_mapping[axis]):
            return False

        return True

沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
144
    def is_auto_compatible(self, dist_op):
145 146 147
        if (not self.is_input_compatible(dist_op)) or (
            not self.is_output_compatible(dist_op)
        ):
148 149
            return False

沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
150 151 152 153 154 155 156
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
        x_name = op_desc.input('X')[0]
        axis = op_desc.attr('axis')
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
157 158
        # if axis != -1 and axis != len(x_dims_mapping) - 1:
        #     return False
沉潜的鱼儿's avatar
沉潜的鱼儿 已提交
159 160 161 162 163 164

        if x_dims_mapping != out_dims_mapping:
            return False

        return True

165
    def update_dims_mapping(self, dist_op):
166
        changed = False
167 168
        op_desc = dist_op.serial_op.desc
        op_dist_attr = dist_op.dist_attr
169 170 171 172 173 174 175
        x_name = op_desc.input('X')[0]
        out_name = op_desc.output('Out')[0]
        x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
        out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)

        for i in range(len(x_dims_mapping)):
            dim_changed = compute_compatible_and_update_dim_mapping(
176 177
                [x_dims_mapping, out_dims_mapping], [i, i]
            )
178 179 180
            if dim_changed:
                changed = True

181 182 183 184
        if changed:
            op_dist_attr.set_input_dims_mapping(x_name, x_dims_mapping)
            op_dist_attr.set_output_dims_mapping(out_name, out_dims_mapping)

185 186
        return changed

187 188 189 190
    @staticmethod
    def forward(ctx, *args, **kwargs):
        DistributedDefaultImpl0.forward(ctx, *args, **kwargs)

191 192
    @staticmethod
    def backward(ctx, *args, **kwargs):
193
        DistributedDefaultImpl0.backward(ctx, *args, **kwargs)
194

195 196

register_distributed_operator_impl(
197 198
    "softmax", DistributedSoftmaxImpl("replicate_last_axis")
)