auto_parallel_gradient_merge.py 13.9 KB
Newer Older
1
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2
#
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
6
#
7
#     http://www.apache.org/licenses/LICENSE-2.0
8
#
9 10 11 12 13 14 15 16 17 18 19 20
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
from collections import OrderedDict
from typing import List, Tuple, Dict, Any

import paddle
from paddle.framework import core
21
from paddle.fluid import layers
22 23
from paddle.fluid.framework import program_guard, device_guard
from .pass_base import PassBase, PassType, register_pass
24
from paddle.distributed.auto_parallel.utils import set_var_dist_attr, is_optimize_op, OpRole, OP_ROLE_KEY
25 26
from paddle.distributed.auto_parallel.utils import naive_set_dist_op_attr_for_program_by_mesh_and_mapping
from paddle.distributed.auto_parallel.process_group import get_world_process_group
27

28
world_process_group = get_world_process_group()
29 30 31 32 33 34 35 36 37 38 39


def _remove_and_get_optimizer_op(main_program, dist_context):
    # 1 create tmp block
    # 2 mv optimizer op from global program to tmp block
    # 3 del the op from dist_context
    main_block = main_program.global_block()
    temp_block = main_program._create_block()
    removed_op_idx = []
    optimize_ops_desc = []
    for idx, op in enumerate(main_block.ops):
40
        if is_optimize_op(op):
41 42 43 44 45 46 47 48 49 50 51
            # append optimizer op to tmp block
            new_op_desc = temp_block.desc.append_op()
            new_op_desc.copy_from(op.desc)
            optimize_ops_desc.append(new_op_desc)
            removed_op_idx.append(idx)

            # del op from dist_context
            if dist_context:
                dist_context.del_dist_op_for_program(op)

    for idx in removed_op_idx[::-1]:
52 53
        main_block._remove_op(idx, sync=False)
    main_block._sync_with_cpp()
54 55 56 57

    return optimize_ops_desc


58
def _get_gm_cond_var(main_program, k_steps, dist_context):
59 60
    main_block = main_program.global_block()
    # Add const var
61 62 63 64 65 66
    k_step_var = layers.create_global_var(name="gradient_merge_k",
                                          shape=[1],
                                          value=int(k_steps),
                                          dtype='int32',
                                          persistable=True,
                                          force_cpu=True)
67
    set_var_dist_attr(dist_context, k_step_var, [-1], world_process_group.ranks)
68

69 70 71 72 73 74
    zero_var = layers.create_global_var(name="gradient_merge_zero",
                                        shape=[1],
                                        value=int(0),
                                        dtype='int32',
                                        persistable=True,
                                        force_cpu=True)
75
    set_var_dist_attr(dist_context, zero_var, [-1], world_process_group.ranks)
76 77

    # Add step var & cond var
78 79 80 81 82 83
    step_var = layers.create_global_var(name="gradient_merge_step",
                                        shape=[1],
                                        value=int(0),
                                        dtype='int32',
                                        persistable=True,
                                        force_cpu=True)
84
    set_var_dist_attr(dist_context, step_var, [-1], world_process_group.ranks)
85

86 87 88
    cond_var = main_block.create_var(name="gradient_merge_cond",
                                     shape=[1],
                                     dtype='bool')
89
    set_var_dist_attr(dist_context, cond_var, [-1], world_process_group.ranks)
90 91

    with device_guard("cpu"):
92 93 94 95 96 97
        # step_var += 1
        increment_op = main_block.append_op(type='increment',
                                            inputs={'X': [step_var]},
                                            outputs={'Out': [step_var]},
                                            attrs={
                                                'step': float(1.0),
98
                                                OP_ROLE_KEY: OpRole.Backward
99 100 101 102
                                            })
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
            increment_op, world_process_group.ranks, [-1], dist_context)
        # step_var %= k_step
103 104 105 106 107 108 109 110
        elementwise_mod_op = main_block.append_op(type='elementwise_mod',
                                                  inputs={
                                                      'X': step_var,
                                                      'Y': k_step_var
                                                  },
                                                  outputs={'Out': step_var},
                                                  attrs={
                                                      'axis': -1,
111
                                                      'use_mkldnn': False,
112 113
                                                      OP_ROLE_KEY:
                                                      OpRole.Backward
114
                                                  })
115 116
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
            elementwise_mod_op, world_process_group.ranks, [-1], dist_context)
117
        # cond_var = (step_var == 0)
118 119 120 121 122
        equal_op = main_block.append_op(type='equal',
                                        inputs={
                                            'X': step_var,
                                            'Y': zero_var
                                        },
123
                                        outputs={'Out': cond_var},
124
                                        attrs={OP_ROLE_KEY: OpRole.Backward})
125 126
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
            equal_op, world_process_group.ranks, [-1], dist_context)
127 128 129 130 131

    return cond_var


def _append_gradient_merge_backward_op(
132
        main_program, startup_program, params_grads: List[Tuple[Any, Any]],
133
        dist_context) -> Tuple[List[Tuple[Any, Any]], Dict[str, Any]]:
134 135 136 137 138 139 140 141 142
    main_block = main_program.global_block()
    startup_block = startup_program.global_block()

    # step1: remove grad.op's op_role_var
    for param, grad in params_grads:
        assert (
            param.type != core.VarDesc.VarType.SELECTED_ROWS
        ), "SELECTED_ROWS is not supported in GradientMergeOptimizer for now"

143 144 145
    # {grad.name: gradient_merge_var.name} to rename opt inputs
    grad_to_gradient_merge = {}
    # {param: gradient_merge_var} to insert scale op and fill_constant op
146 147 148 149 150 151
    new_params_to_grads = []
    # step2: create gradient_merge var and init with 0
    for param, grad in params_grads:
        param_name = param.name
        param_var = main_block.var(param_name)
        assert (param_var is not None)
152 153
        ref_dist_attr = dist_context.get_tensor_dist_attr_for_program(param_var)
        assert ref_dist_attr is not None
154 155 156 157 158
        gradient_merge_var = main_block.create_var(name=param_name +
                                                   "@GRAD@GradientMerge",
                                                   shape=param_var.shape,
                                                   dtype=param_var.dtype,
                                                   persistable=True)
159 160 161 162 163
        ref_process_mesh = ref_dist_attr.process_mesh
        ref_dims_mapping = ref_dist_attr.dims_mapping

        set_var_dist_attr(dist_context, gradient_merge_var, ref_dims_mapping,
                          ref_process_mesh)
164 165 166 167 168 169

        startup_gradient_merge_var = startup_block.create_var(
            name=param_name + "@GRAD@GradientMerge",
            shape=param_var.shape,
            dtype=param_var.dtype,
            persistable=True)
170 171 172 173 174 175 176
        startup_block.append_op(type="fill_constant",
                                outputs={"Out": startup_gradient_merge_var},
                                attrs={
                                    "shape": param_var.shape,
                                    "dtype": param_var.dtype,
                                    "value": float(0),
                                })
177 178

        # grad_merge += grad
179 180 181 182 183 184 185 186
        new_grad_op = main_block.append_op(type="elementwise_add",
                                           inputs={
                                               'X': grad,
                                               'Y': gradient_merge_var
                                           },
                                           outputs={'Out': gradient_merge_var},
                                           attrs={
                                               'axis': -1,
187
                                               'use_mkldnn': False,
188
                                               OP_ROLE_KEY: OpRole.Backward
189
                                           })
190
        new_params_to_grads.append([param, gradient_merge_var])
191
        grad_to_gradient_merge[grad.name] = gradient_merge_var.name
192 193
        naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
            new_grad_op, ref_process_mesh, ref_dims_mapping, dist_context)
194
    return new_params_to_grads, grad_to_gradient_merge
195 196 197


def _create_cond_block_and_update_optimizer(
198
        main_program, cond_var, new_params_to_grads: List[Tuple[Any, Any]],
199
        grad_to_gradient_merge: Dict[str, str], optimize_ops_desc: List[Any],
200 201
        k_steps, avg):

202 203 204 205 206 207 208 209 210 211
    def true_apply_gradient():
        cur_block_idx = main_program.current_block_idx
        cur_block = main_program.current_block()

        # cur_block's forward_block & backward_block is itself
        cur_block._set_forward_block_idx(cur_block_idx)
        op_maker = core.op_proto_and_checker_maker
        if avg:
            for param, new_grad in new_params_to_grads:
                # grad /= k_steps
212 213 214 215 216 217 218 219
                cur_block.append_op(type='scale',
                                    inputs={'X': new_grad},
                                    outputs={'Out': new_grad},
                                    attrs={
                                        'scale': 1.0 / k_steps,
                                        'bias': 0.0,
                                        'bias_after_scale': False
                                    })
220
                new_grad.op._set_attr(OP_ROLE_KEY, OpRole.Optimize)
221 222 223 224 225 226 227 228

        # append optimizer ops
        for op_desc in optimize_ops_desc:
            new_op_desc = cur_block.desc.append_op()
            new_op_desc.copy_from(op_desc)

            #update input/output
            for input_name in new_op_desc.input_arg_names():
229 230 231
                if input_name in grad_to_gradient_merge:
                    new_op_desc._rename_input(
                        input_name, grad_to_gradient_merge[input_name])
232 233

            for output_name in new_op_desc.output_arg_names():
234 235 236
                if output_name in grad_to_gradient_merge:
                    new_op_desc._rename_output(
                        output_name, grad_to_gradient_merge[output_name])
237 238 239 240 241 242

            # remove op_role_var
            if new_op_desc.has_attr(op_maker.kOpRoleVarAttrName()):
                new_op_desc.remove_attr(op_maker.kOpRoleVarAttrName())

            # op's update Grad
243
            if core.grad_var_suffix() in new_op_desc.input_arg_names():
244 245 246 247 248 249 250 251 252 253
                grad_value = new_op_desc.input("Grad")[0]
                # TODO FIXME(xym) support fp16
                grad_merge_value = grad_value + '@GradientMerge'
                new_op_desc.set_input("Grad", [grad_merge_value])

        main_program.global_block()._sync_with_cpp()
        cur_block._sync_with_cpp()

        # clear gradient_merge_vars
        for param, new_grad in new_params_to_grads:
254 255 256 257
            layers.fill_constant(shape=new_grad.shape,
                                 dtype=new_grad.dtype,
                                 value=0.0,
                                 out=new_grad)
258
            new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize)
259 260

    layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None)
261
    cond_op = main_program.global_block().ops[-1]
262
    cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize)
263 264 265 266


def parse_program(main_program, startup_program, params_grads, k_steps, avg,
                  dist_context):
267
    # 1 remove optimizer_op from main_program
268 269 270 271 272
    optimize_ops_desc = _remove_and_get_optimizer_op(main_program, dist_context)

    # back to block 0
    main_program._rollback()

273
    # 2 append gradient merge backward op to main_program
274
    new_params_to_grads, grad_to_gradient_merge = _append_gradient_merge_backward_op(
275 276 277 278
        main_program, startup_program, params_grads, dist_context)

    # 3 create gradient_merge_cond
    cond_var = _get_gm_cond_var(main_program, k_steps, dist_context)
279 280

    # 4 create ConditionalBlock and append gradient merge optimizer ops
281 282
    _create_cond_block_and_update_optimizer(main_program, cond_var,
                                            new_params_to_grads,
283
                                            grad_to_gradient_merge,
284
                                            optimize_ops_desc, k_steps, avg)
285 286 287 288


@register_pass("auto_parallel_gradient_merge_pass")
class GradientMergePass(PassBase):
289

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    def __init__(self):
        super(GradientMergePass, self).__init__()
        self.set_attr("k_steps", -1)
        self.set_attr("avg", True)

    def _check_self(self):
        if self.get_attr("k_steps") < 1:
            return False
        return True

    def _check_conflict(self, other_pass):
        return True

    def _type(self):
        return PassType.COMM_OPT

    def _apply_single_impl(self, main_program, startup_program, context):
        k_steps = self.get_attr("k_steps", -1)
        avg = self.get_attr("avg", False)
        dist_context = self.get_attr("dist_context")
        params_grads = self.get_attr("params_grads")
        with paddle.static.program_guard(main_program, startup_program):
            parse_program(main_program, startup_program, params_grads, k_steps,
                          avg, dist_context)

        main_program._sync_with_cpp()