backward.py 98.9 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

T
tangwei12 已提交
15
from .proto import framework_pb2
16

17
from paddle.fluid import framework as framework
18
from paddle.fluid import program_guard
F
update  
fengjiayi 已提交
19
from . import core
F
update  
fengjiayi 已提交
20
import collections
21
import copy
22
import logging
23
from . import unique_name
24
from . import log_helper
L
liym27 已提交
25
import paddle.fluid
26
from .data_feeder import check_type
27
import warnings
28

29
from collections.abc import Sequence
30

M
mapingshuo 已提交
31 32 33 34 35
__all__ = [
    'append_backward',
    'gradients',
]

36 37 38
_logger = log_helper.get_logger(
    __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'
)
39

M
mapingshuo 已提交
40

41
class ProgramStats:
M
mapingshuo 已提交
42 43 44 45 46 47 48 49 50
    def __init__(self, block, ops):
        self.block = block
        self.ops = ops
        self.op_deps = {}  # op-> in_ops, out_ops
        self.var_op_deps = {}  # var as input op, var as output op

    def get_input_nodes(self):
        input_names = []
        for name in self.var_op_deps:
51 52 53 54
            if (
                len(self.var_op_deps[name]["var_as_output_ops"]) == 0
                and len(self.var_op_deps[name]["var_as_input_ops"]) > 0
            ):
M
mapingshuo 已提交
55 56 57 58 59 60 61 62 63 64 65
                if self.block.var(name).persistable:
                    continue
                input_names.append(name)
        for op in self.ops:
            if op.desc.type() == "read":
                input_names.extend(op.desc.output_arg_names())
        return input_names

    def get_reserved_vars(self):
        var_name = []
        for op in self.ops:
M
mapingshuo 已提交
66
            if op.desc.type() == "seed":
M
mapingshuo 已提交
67 68 69 70 71 72 73 74 75 76 77
                var_name.extend(op.desc.output_arg_names())
        return var_name

    def get_out_of_subgraph_vars(self, begin_op_idx, end_op_idx):
        var_name = []
        for i in range(begin_op_idx, end_op_idx, 1):
            for name in self.ops[i].desc.output_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_input_ops"]:
                        if idx >= end_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
78 79 80 81 82
            for name in self.ops[i].desc.input_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_output_ops"]:
                        if idx < begin_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
        return var_name

    def is_subgraph(self, var_group1, var_group2):
        # should traverse from var_group1 to var_group2
        # max op idx in var_group2
        # min op idx in var_group1
        min_op_idx = len(self.ops)
        max_op_idx = -1
        for name in var_group1:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group2:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group1:
            op_idx = self.var_op_deps[name]["var_as_input_ops"]
            for idx in op_idx:
                min_op_idx = min(min_op_idx, idx)
        for name in var_group2:
            op_idx = self.var_op_deps[name]["var_as_output_ops"]
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if min_op_idx >= max_op_idx:
            return False, min_op_idx, max_op_idx
J
JZ-LIANG 已提交
107

M
mapingshuo 已提交
108 109
        return True, min_op_idx, max_op_idx

J
JZ-LIANG 已提交
110 111 112 113 114 115
    def _update_segment_start(self, min_idx, pre_segment_end_idx):
        """
        persist vars of amp-related cast should be included in recompute segment
        """

        def is_amp_cast(op):
116 117 118 119
            return (
                op.desc.type() == 'cast'
                and self.block.var(op.desc.input_arg_names()[0]).persistable
            )
J
JZ-LIANG 已提交
120 121 122 123 124

        idx_ = min_idx - 1
        updated_min_idx = min_idx
        while idx_ > pre_segment_end_idx:
            if is_amp_cast(self.ops[idx_]):
125 126 127 128 129 130
                _logger.info(
                    "found amp-cast op: {}, : {}".format(
                        self.ops[idx_].desc.type(),
                        self.ops[idx_].desc.input_arg_names()[0],
                    )
                )
J
JZ-LIANG 已提交
131 132 133 134 135 136 137
                updated_min_idx = idx_
                idx_ -= 1
            else:
                break

        return updated_min_idx

M
mapingshuo 已提交
138 139 140 141 142
    def build_stats(self):
        for i, op in enumerate(self.ops):
            self.op_deps[i] = {"in_ops": [], "out_ops": []}
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
143
                    self.op_deps[i]["in_ops"].extend(
144 145
                        self.var_op_deps[name]["var_as_output_ops"]
                    )
M
mapingshuo 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_input_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = [i]
                    self.var_op_deps[name]["var_as_output_ops"] = []

            for j, name in enumerate(op.desc.output_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_output_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = []
                    self.var_op_deps[name]["var_as_output_ops"] = [i]

            for op_idx in self.op_deps[i]["in_ops"]:
                self.op_deps[op_idx]["out_ops"].extend([i])

165 166 167 168
    def sort_checkpoints(self, checkpoints_name):
        sorted_checkpoints = []
        for name in checkpoints_name:
            if name not in self.var_op_deps:
169
                _logger.info(
170
                    "Recompute Optimizer: deleted %s from checkpoints, because it is not used in paddle program."
171 172
                    % name
                )
173 174 175 176 177
            elif self.var_op_deps[name]["var_as_output_ops"] == []:
                # input nodes
                sorted_checkpoints.append((name, -1))
            else:
                sorted_checkpoints.append(
178 179
                    (name, max(self.var_op_deps[name]["var_as_output_ops"]))
                )
180 181 182
        sorted_checkpoints = sorted(sorted_checkpoints, key=lambda x: x[1])
        return [x[0] for x in sorted_checkpoints]

M
mapingshuo 已提交
183 184 185 186 187 188
    def modify_forward_desc_for_recompute(self):
        op_types = [op.desc.type() for op in self.ops]
        if "dropout" not in op_types:
            return

        op_idx = 0
189
        while op_idx < len(self.ops):
M
mapingshuo 已提交
190 191 192 193
            op = self.ops[op_idx]
            if op.desc.type() != "dropout":
                op_idx += 1
                continue
194 195 196 197
            # already insert seed op before dropout
            if op.input('Seed') is not None and len(op.input('Seed')) == 1:
                op_idx += 1
                continue
M
mapingshuo 已提交
198 199
            # add a seed op so that the two dropout op can generate same output
            op_unique_name = unique_name.generate("seed")
200 201 202
            var_unique_name = unique_name.generate_with_ignorable_key(
                ".".join([op_unique_name, 'tmp'])
            )
M
mapingshuo 已提交
203 204 205 206 207
            added_var = self.block.create_var(
                name=var_unique_name,
                dtype='int32',
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
208 209
                stop_gradient=False,
            )
M
mapingshuo 已提交
210
            seed = 0 if op.attr("fix_seed") is False else int(op.attr("seed"))
211

212 213
            op_device_attr_name = (
                core.op_proto_and_checker_maker.kOpDeviceAttrName()
214 215 216 217 218
            )
            op_device = ""
            if op.desc.has_attr(op_device_attr_name):
                op_device = op.desc.attr(op_device_attr_name)

219
            # Setting the force_cpu of seed to true will make the output of seed in cpu memory,
220
            # reduce the synchronous copy from GPU to CPU in dropout, and reduce the communication hang
221 222 223 224 225 226 227
            added_op = self.block._insert_op(
                index=op.idx,
                type='seed',
                inputs={},
                outputs={'Out': [added_var]},
                attrs={'seed': seed, 'op_device': op_device, 'force_cpu': True},
            )
M
mapingshuo 已提交
228 229 230 231 232 233 234 235
            self.ops.insert(op_idx, added_op)
            # modify dropout op desc so that it accept a seed var as input
            op.desc.set_input("Seed", [var_unique_name])
            op.desc.remove_attr("fix_seed")
            op.desc.remove_attr("seed")
            self.block._sync_with_cpp()
            op_idx += 2

M
mapingshuo 已提交
236 237

def _pretty_op_desc_(op_desc, prefix):
238 239 240 241 242 243 244 245
    out_s = "%s\tname:[%s]\n%s    \tinputs:[%s]\n%s    \toutputs:[%s]" % (
        prefix + "_op",
        str(op_desc.type()),
        prefix + "_input",
        " ".join(op_desc.input_arg_names()),
        prefix + "_output",
        " ".join(op_desc.output_arg_names()),
    )
M
mapingshuo 已提交
246 247 248
    return out_s


249 250 251
def _add_needed_descs_to_block(
    descs, block, main_block, in_memory_vars, grad_op_id_to_fwd_op=None
):
M
mapingshuo 已提交
252 253 254
    if len(descs) == 0:
        return []
    result_descs = []
255
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
M
mapingshuo 已提交
256 257
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
258 259
        origin_desc = desc
        origin_is_operator = False
M
mapingshuo 已提交
260 261
        if isinstance(desc, framework.Operator):
            desc = desc.desc
262
            origin_is_operator = True
M
mapingshuo 已提交
263 264 265 266 267 268 269 270 271
        if isinstance(desc, tuple):
            desc = desc[0]
        is_needed = False
        for name in desc.output_arg_names():
            if main_block.has_var(name) and main_block.var(name).persistable:
                continue
            if name not in in_memory_vars:
                is_needed = True
        if is_needed:
272 273
            if origin_is_operator and grad_op_id_to_fwd_op is not None:
                grad_op_id_to_fwd_op[desc.original_id()] = origin_desc
M
mapingshuo 已提交
274 275 276
            new_op_desc = block.desc.append_op()
            new_op_desc.copy_from(desc)
            new_op_desc._set_attr(op_role_attr_name, backward)
277 278
            if desc.has_attr('op_device'):
                new_op_desc._set_attr('op_device', desc.attr('op_device'))
M
mapingshuo 已提交
279 280 281 282
            result_descs.append(new_op_desc)
    return result_descs


283
def _add_descs_to_block(descs, block, grad_op_id_to_fwd_op=None):
M
mapingshuo 已提交
284 285 286
    if len(descs) == 0:
        return []
    result_descs = []
287
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
M
mapingshuo 已提交
288 289 290
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
        if isinstance(desc, framework.Operator):
291 292 293
            # for recompute, should record recompute ops
            if grad_op_id_to_fwd_op is not None:
                grad_op_id_to_fwd_op[desc.desc.original_id()] = desc
M
mapingshuo 已提交
294 295 296 297 298 299
            desc = desc.desc
        if isinstance(desc, tuple):
            desc = desc[0]
        new_op_desc = block.desc.append_op()
        new_op_desc.copy_from(desc)
        new_op_desc._set_attr(op_role_attr_name, backward)
300 301
        if desc.has_attr('op_device'):
            new_op_desc._set_attr('op_device', desc.attr('op_device'))
M
mapingshuo 已提交
302 303 304 305 306 307 308
        result_descs.append(new_op_desc)
    return result_descs


def _find_loss_op_(loss):
    for op in reversed(loss.block.ops):
        assert isinstance(op, framework.Operator)
309 310 311 312
        if (
            len(op.output_arg_names) == 1
            and op.output_arg_names[0] == loss.name
        ):
M
mapingshuo 已提交
313 314 315 316
            loss.op = op
            break
    if loss.op is None:
        raise ValueError("loss.op is None. Should not happend")
317 318


319 320
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
321
    Traverse all ops in op_descs[begin_idx : end_idx],
322 323
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
324 325 326
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
327
        end_idx = len(op_descs)
328 329 330 331 332 333 334 335 336 337 338 339 340
    if isinstance(op_descs, (list, tuple)):
        for i in range(begin_idx, end_idx):
            op_desc = op_descs[i]
            if isinstance(op_desc, tuple):
                op_desc = op_desc[0]
            op_desc._rename_input(old_name, new_name)
            op_desc._rename_output(old_name, new_name)
    if isinstance(op_descs, collections.OrderedDict):
        for key, value in op_descs.items():
            if isinstance(value, (list, tuple)):
                for op_desc in value:
                    op_desc._rename_input(old_name, new_name)
                    op_desc._rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
341 342


F
fengjiayi 已提交
343
def _create_op_desc_(op_type, inputs, outputs, attrs):
344 345 346
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
347 348
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
349
    for para, args in inputs.items():
350 351 352
        op_desc.set_input(
            para,
            list(
353 354 355 356 357 358
                map(
                    lambda arg: arg.decode() if isinstance(arg, bytes) else arg,
                    args,
                )
            ),
        )
359
    for para, args in outputs.items():
360 361 362
        op_desc.set_output(
            para,
            list(
363 364 365 366 367 368
                map(
                    lambda arg: arg.decode() if isinstance(arg, bytes) else arg,
                    args,
                )
            ),
        )
Y
yuyang18 已提交
369 370

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
371
    op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
Y
yuyang18 已提交
372 373 374

    if op_role_attr_name not in attrs:
        attrs[
375 376
            op_role_attr_name
        ] = core.op_proto_and_checker_maker.OpRole.Backward
377 378
    if op_device_attr_name not in attrs:
        attrs[op_device_attr_name] = ""
379
    for name, val in attrs.items():
F
fengjiayi 已提交
380 381 382
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
W
Wu Yi 已提交
383
            op_desc._set_attr(name, val)
F
fengjiayi 已提交
384 385 386
    return op_desc


M
mapingshuo 已提交
387
def _create_loss_op_desc_(loss):
388
    create_shape = [] if len(loss.shape) == 0 else [1]
M
mapingshuo 已提交
389
    op_desc = _create_op_desc_(
390 391 392 393
        "fill_constant",
        {},
        {"Out": [_append_grad_suffix_(loss.name)]},
        {
394
            "shape": create_shape,
395 396 397 398 399 400
            "value": 1.0,
            "dtype": loss.dtype,
            "force_cpu": False,
            core.op_proto_and_checker_maker.kOpRoleAttrName(): int(
                core.op_proto_and_checker_maker.OpRole.Backward
            )
401
            | int(core.op_proto_and_checker_maker.OpRole.Loss),
402 403 404 405 406
            core.op_proto_and_checker_maker.kOpDeviceAttrName(): loss.op.attr(
                core.op_proto_and_checker_maker.kOpDeviceAttrName()
            ),
        },
    )
M
mapingshuo 已提交
407 408 409
    return op_desc


410
def _infer_var_data_type_shape_(grad_var_name, block):
411
    """
412
    Infer the data type and shape of given grad variable
413
    """
414
    grad_var = block.desc.find_var(grad_var_name.encode())
M
minqiyang 已提交
415
    fwd_name = _strip_grad_suffix_(grad_var_name)
416 417
    if block.desc.has_var_recursive(fwd_name.encode()):
        fwd_var = block.desc.find_var_recursive(fwd_name.encode())
F
fengjiayi 已提交
418
        grad_var.set_dtype(fwd_var.dtype())
419
        grad_var.set_shape(fwd_var.shape())
F
fengjiayi 已提交
420
    else:
421 422
        # TODO(jiabin): Maybe we should not to this to cause some unexpected error on dtype
        warnings.warn(
423 424 425 426
            "Set grad var: {} dtype to default FP32, since we can't find its related forward var".format(
                grad_var_name
            )
        )
427
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
428 429


F
fengjiayi 已提交
430
def _all_in_set_(cands, s):
431 432 433
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
434 435
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
436 437 438 439 440 441
    for c in cands:
        if not c in s:
            return False
    return True


442 443 444 445 446 447
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
448 449
    for c in cands:
        if c in s:
450 451 452 453
            return True
    return False


F
fengjiayi 已提交
454
def _strip_grad_suffix_(name):
455
    """
M
mapingshuo 已提交
456
    Strip the grad suffix from the given variable name
457 458 459
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
M
minqiyang 已提交
460
    pos = name.find(core.grad_var_suffix())
461 462
    new_name = name[:pos] if pos != -1 else name
    new_pos = name.rfind('grad/')
463
    return new_name[new_pos + 5 :] if new_pos != -1 else new_name
F
fengjiayi 已提交
464 465 466


def _append_grad_suffix_(name):
467 468 469 470
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
471
    return name + core.grad_var_suffix()
F
fengjiayi 已提交
472 473


474 475 476
def _accumulate_gradients_by_sum_op_(
    var_name, renamed_vars, pending_sum_ops, op_idx, op_device=""
):
477 478 479 480 481 482
    """
    Use sum op to accumulate_gradients, the gradients are stored in renamed_vars.
    """
    if op_idx not in pending_sum_ops.keys():
        pending_sum_ops[op_idx] = []
    pending_sum_ops[op_idx].append(
483 484 485 486 487 488 489
        _create_op_desc_(
            "sum",
            {"X": renamed_vars[var_name]},
            {"Out": [var_name]},
            {"use_mkldnn": False, "op_device": op_device},
        )
    )
490 491 492
    renamed_vars[var_name] = [var_name]


493 494 495
def _accumulate_gradients_by_add_ops_(
    var_name, renamed_vars, pending_sum_ops, op_idx, op_device=""
):
496 497 498 499 500 501 502 503 504 505 506 507 508 509
    """
    Use several inplace add op to accumulate_gradients, the gradients are stored in renamed_vars.
    """
    if op_idx not in pending_sum_ops.keys():
        pending_sum_ops[op_idx] = []
    out_name = renamed_vars[var_name][0]
    for i in range(1, len(renamed_vars[var_name])):
        x_name = out_name
        y_name = renamed_vars[var_name][i]
        if i != len(renamed_vars[var_name]) - 1:
            out_name = var_name + '@ADD@' + str(i)
        else:
            out_name = var_name
        pending_sum_ops[op_idx].append(
510 511 512 513 514 515 516
            _create_op_desc_(
                "grad_add",
                {"X": [x_name], "Y": [y_name]},
                {"Out": [out_name]},
                {"use_mkldnn": False, "op_device": op_device},
            )
        )
517 518 519
    renamed_vars[var_name] = [var_name]


520 521 522
def _addup_repetitive_outputs_(
    op_descs, block_idx, grad_var_to_var=None, grad_op_id_to_fwd_op=None
):
523 524
    """
    In backward part, an variable may be the output of more than one ops.
F
fengjiayi 已提交
525 526
    And one op may yield its multiple outputs to the same variable.
    In these cases, the variable should be the accumulation of all the outputs.
527
    `sum_op`s are added to implement the accumulate.
528 529 530 531

    Args:
        grad_var_to_var(dict): used to build the mapping between grad var name and forward var name.
        Only for auto parallel.
532
    """
533

534
    _MAX_ADD_NUM_ = framework._global_flags()['FLAGS_max_inplace_grad_add']
535
    # pending_sum_ops = []
536
    pending_sum_ops = collections.OrderedDict()
F
update  
fengjiayi 已提交
537
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
538
    renamed_vars = collections.defaultdict(list)
539
    renamed_var_start_idx = collections.defaultdict(list)
540
    var_device = collections.defaultdict(str)
F
fengjiayi 已提交
541
    for idx, op_desc in enumerate(op_descs):
542 543
        op_device_attr_name = (
            core.op_proto_and_checker_maker.kOpDeviceAttrName()
T
tangwei12 已提交
544 545 546 547
        )
        op_device = ""
        if op_desc.has_attr(op_device_attr_name):
            op_device = op_desc.attr(op_device_attr_name)
F
update  
fengjiayi 已提交
548
        for var_name in op_desc.input_arg_names():
M
mapingshuo 已提交
549 550
            if "@GRAD" not in var_name:
                continue
F
fengjiayi 已提交
551
            if len(renamed_vars[var_name]) > 1:
552
                if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
553 554 555 556 557 558 559
                    _accumulate_gradients_by_sum_op_(
                        var_name,
                        renamed_vars,
                        pending_sum_ops,
                        idx,
                        var_device[var_name],
                    )
560
                else:
561 562 563 564 565 566 567
                    _accumulate_gradients_by_add_ops_(
                        var_name,
                        renamed_vars,
                        pending_sum_ops,
                        idx,
                        var_device[var_name],
                    )
568

F
update  
fengjiayi 已提交
569
        for param_idx, param_name in enumerate(op_desc.output_names()):
F
fengjiayi 已提交
570 571
            arg_names = op_desc.output(param_name)
            for arg_idx, var_name in enumerate(arg_names):
M
mapingshuo 已提交
572 573
                if "@GRAD" not in var_name:
                    continue
T
tangwei12 已提交
574
                # if "@RENAME@" in var_name:
M
mapingshuo 已提交
575
                #    continue
576 577 578 579
                if (
                    var_name == core.empty_var_name()
                    or var_name in op_desc.input_arg_names()
                ):
F
fengjiayi 已提交
580 581 582 583 584
                    # empty variable or inplace op
                    continue
                if len(renamed_vars[var_name]) == 0:
                    # it's the first time we get the variable
                    renamed_vars[var_name] = [var_name]
585
                    renamed_var_start_idx[var_name] = idx
F
fengjiayi 已提交
586 587
                else:
                    if len(renamed_vars[var_name]) == 1:
588 589 590 591 592 593 594
                        new_name = (
                            var_name
                            + "@RENAME@block"
                            + str(block_idx)
                            + "@"
                            + str(var_rename_count[var_name])
                        )
F
fengjiayi 已提交
595
                        var_rename_count[var_name] += 1
596 597 598 599
                        # Build the mapping between the new_name and var_name (Only for auto parallel)
                        if grad_var_to_var is not None:
                            if var_name in grad_var_to_var:
                                grad_var_to_var[new_name] = grad_var_to_var[
600 601
                                    var_name
                                ]
602 603
                            else:
                                grad_var_to_var[new_name] = var_name
F
fengjiayi 已提交
604 605
                        # rename original var_name
                        renamed_vars[var_name][0] = new_name
606 607 608 609
                        # before change: _rename_arg_(op_descs, var_name,
                        #                             new_name, 0, idx)
                        # rename arg from idx of the first appearance
                        # in backward, not always from 0
610 611 612 613 614 615 616
                        _rename_arg_(
                            op_descs,
                            var_name,
                            new_name,
                            renamed_var_start_idx[var_name],
                            idx,
                        )
F
fengjiayi 已提交
617 618
                        _rename_arg_(pending_sum_ops, var_name, new_name)

F
update  
fengjiayi 已提交
619 620 621
                        for p in op_desc.output_names()[:param_idx]:
                            p_arg_names = op_desc.output(p)
                            if var_name in p_arg_names:
622 623 624 625 626 627 628
                                op_desc.set_output(
                                    p,
                                    [
                                        new_name if x == var_name else x
                                        for x in p_arg_names
                                    ],
                                )
F
update  
fengjiayi 已提交
629 630 631 632 633 634

                        arg_names = [
                            new_name if x == var_name else x
                            for x in arg_names[:arg_idx]
                        ] + arg_names[arg_idx:]

635 636 637 638 639 640 641
                    new_name = (
                        var_name
                        + "@RENAME@block"
                        + str(block_idx)
                        + "@"
                        + str(var_rename_count[var_name])
                    )
F
fengjiayi 已提交
642
                    var_rename_count[var_name] += 1
643 644 645 646
                    # Build the mapping between the new_name and var_name (Only for auto parallel)
                    if grad_var_to_var is not None:
                        if var_name in grad_var_to_var:
                            grad_var_to_var[new_name] = grad_var_to_var[
647 648
                                var_name
                            ]
649 650
                        else:
                            grad_var_to_var[new_name] = var_name
F
fengjiayi 已提交
651 652 653
                    arg_names[arg_idx] = new_name
                    op_desc.set_output(param_name, arg_names)
                    renamed_vars[var_name].append(new_name)
W
WangXi 已提交
654
                    # record the latest device
655
                    var_device[var_name] = op_device
F
update  
fengjiayi 已提交
656

657
    for var_name, inputs in renamed_vars.items():
658 659
        if len(renamed_vars[var_name]) > 1:
            if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
660 661 662 663 664 665 666
                _accumulate_gradients_by_sum_op_(
                    var_name,
                    renamed_vars,
                    pending_sum_ops,
                    len(op_descs),
                    var_device[var_name],
                )
667
            else:
668 669 670 671 672 673 674
                _accumulate_gradients_by_add_ops_(
                    var_name,
                    renamed_vars,
                    pending_sum_ops,
                    len(op_descs),
                    var_device[var_name],
                )
675

676
    op_descs_len = len(op_descs)
F
fengjiayi 已提交
677
    # sum_op descs are sorted according to their insert position
678
    for key, value in collections.OrderedDict(
679 680
        reversed(list(pending_sum_ops.items()))
    ).items():
681 682 683 684 685 686 687

        # NOTE(zhiqiu): Since reversed, the idx of op_descs to be inserted will remains correct.
        # For example, [0, 1, 2], and we want to insert 'a' at idx 1, 'b' at idx 2, and the expected result is [0, 1, 'a', 2, 'b'].
        # If reversed, we first insert 'b' at idx 2, it becomes [0, 1, 2, 'b'], and then insert 'a' at idx 1, it becomes [0, 1, 'a', 2, 'b'].
        # If not reverse, we first insert 'a' at idx 1, it becomes [0, 1, 'a', 2], and then insert 'b' at idx 2, it becomes [0, 1, 'a', 'b', 2].
        idx = key
        for i, op in enumerate(value):
688 689
            # update the mapping between fwd and bwd
            target_idx = idx - 1 if idx == op_descs_len else idx + i
690 691 692 693 694 695 696
            if (
                grad_op_id_to_fwd_op is not None
                and grad_op_id_to_fwd_op.get(
                    op_descs[target_idx].original_id(), None
                )
                is not None
            ):
697
                grad_op_id_to_fwd_op[op.original_id()] = grad_op_id_to_fwd_op[
698 699
                    op_descs[target_idx].original_id()
                ]
700
            op_descs.insert(idx + i, op)
F
fengjiayi 已提交
701 702 703 704

    return op_descs


705 706 707
def _remove_no_grad_branch_(
    op_descs, no_grad_set, grad_op_id_to_fwd_op=None, target_vars=[]
):
708 709 710 711
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
712
        2. all grad inputs of the grad op are in 'no_grad_set'
713
    NOTE: we will skip target_vars's grad name.
714
    """
F
fengjiayi 已提交
715 716

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
717 718
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
719
            return True
720 721 722 723
        if _all_in_set_(
            [
                name
                for name in op_desc.input_arg_names()
724
                if name.find(core.grad_var_suffix()) != -1
725 726 727
            ],
            no_grad_set,
        ):
728
            no_grad_set.update(set(out_arg_names) - target_grad_var_names)
F
fengjiayi 已提交
729 730 731
            return True
        return False

F
fengjiayi 已提交
732
    # Remove ops whose outputs are all in no_grad_dict
733
    target_grad_var_names = set(
734 735
        [var.name + core.grad_var_suffix() for var in target_vars]
    )
736
    op_descs = [
737 738
        op_desc
        for op_desc in op_descs
739 740
        if not _op_can_be_removed_(op_desc, no_grad_set)
    ]
741
    # Insert fill_any_like_op with value 0
F
fengjiayi 已提交
742
    to_insert = []
F
fengjiayi 已提交
743
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
744
        for arg in op_desc.input_arg_names():
M
mapingshuo 已提交
745
            # arg is a gradient var name and arg should not have gradient
F
fengjiayi 已提交
746
            if core.grad_var_suffix() in arg and arg in no_grad_set:
747
                x_in = _strip_grad_suffix_(arg)
M
mapingshuo 已提交
748 749
                # the reason should be: arg can be input of another grad op
                # and the op is a not-to-remove op
750 751 752 753 754 755
                new_op_desc = _create_op_desc_(
                    "fill_any_like",
                    {"X": [x_in]},
                    {"Out": [arg]},
                    {'value': 0, 'dtype': -1},
                )
756
                # update the mapping between fwd and bwd
757 758 759 760 761 762 763 764
                if (
                    grad_op_id_to_fwd_op is not None
                    and grad_op_id_to_fwd_op.get(op_desc.original_id(), None)
                    is not None
                ):
                    grad_op_id_to_fwd_op[
                        new_op_desc.original_id()
                    ] = grad_op_id_to_fwd_op[op_desc.original_id()]
765
                to_insert.append((new_op_desc, idx))
F
fengjiayi 已提交
766

767
    list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)])
F
fengjiayi 已提交
768 769 770 771

    return op_descs


C
chengduo 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
    """
    Pruning Program with Structural Analysis Method of Computational Graph.
    The nodes of the computational graph composed of backward OPS should be
    interconnected. If there are unconnected sub-graphs in the computational graph,
    these sub-graphs should be cut off.

    Args:
        grad_op_descs(list[core.OpDesc]): The candidate backward OpDescs.
        forward_ops(list[Operator]): The forward ops.
        input_grad_names_set(set): this set is used to store the gradients' name
            which is generated by backward ops, and input_grad_names_set can help
            to prune the unnecessary backward ops.

    Return:
787
        (set[core.OpDesc]): A set of OpDescs which should be pruned.
C
chengduo 已提交
788 789
    """

790
    class Var:
C
chengduo 已提交
791 792 793 794 795 796 797 798 799 800 801 802 803 804
        def __init__(self, var_name):
            self.var_name = var_name
            self.gen_op = None
            self.pendding_ops = []

        def set_gen_op(self, gen_op):
            assert isinstance(gen_op, Op)
            assert self.gen_op is None
            self.gen_op = gen_op

        def add_pending_op(self, op):
            assert isinstance(op, Op)
            self.pendding_ops.append(op)

805
    class Op:
C
chengduo 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
        def __init__(self, op_desc):
            self.op_desc = op_desc
            self.inputs = []
            self.outputs = []

        def insert_input(self, var):
            assert isinstance(var, Var)
            self.inputs.append(var)

        def insert_output(self, var):
            assert isinstance(var, Var)
            self.outputs.append(var)

    var_versions = dict()

    def _create_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        else:
            var_versions[name].append(Var(name))
        return var_versions[name][-1]

    def _create_or_get_last_version_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        return var_versions[name][-1]

    def _create_op_node(op_desc):
        op_node = Op(op_desc)
        for input in op_desc.input_arg_names():
            var = _create_or_get_last_version_node(name=input)
            var.add_pending_op(op_node)
            op_node.insert_input(var)
        for output in op_desc.output_arg_names():
            var = _create_node(name=output)
            var.set_gen_op(op_node)
            op_node.insert_output(var)
        return op_node

    # Record the forward vars
846 847 848
    forward_vars_set = (
        set() if input_grad_names_set is None else set(input_grad_names_set)
    )
C
chengduo 已提交
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
    for op in forward_ops:
        forward_vars_set.update(op.desc.input_arg_names())
        forward_vars_set.update(op.desc.output_arg_names())

    # Record the vars which are created during backward and is not generated by op.
    backward_vars_set = set()
    # special_op_nodes is the candidate sub-graph head node.
    special_op_nodes = set()
    for op_desc in grad_op_descs:
        input_set = set(op_desc.input_arg_names())
        # The new_vars are created during backward and is not generated by op.
        new_vars = input_set - forward_vars_set - backward_vars_set
        backward_vars_set.update(op_desc.output_arg_names())

        op_node = _create_op_node(op_desc)
        if len(new_vars) == len(input_set):
            special_op_nodes.add(op_node)

    not_need_op_descs = []
    # Start traversing all candidate sub-graph headers to check whether
    # they are connected to backward computational graphs, and if they are
    # not, list them in not_need_op_descs
    for special_op_node in special_op_nodes:
        op_list = [special_op_node]
        ready_vars = set(special_op_node.inputs)
        remove_ops = True
        candidate_ops = [special_op_node]
        while len(candidate_ops) > 0:
            op_node = candidate_ops.pop(0)
            if _all_in_set_(op_node.inputs, ready_vars):
                for out_var in op_node.outputs:
                    candidate_ops.extend(out_var.pendding_ops)
                    op_list.extend(out_var.pendding_ops)
                ready_vars.update(op_node.outputs)
            else:
                remove_ops = False
                break
        if remove_ops:
            not_need_op_descs.extend([node.op_desc for node in op_list])
888 889 890
    not_need_op_descs_set = set(not_need_op_descs)
    grad_op_descs_set = set(grad_op_descs)
    # If a backward computational graph is simply one sub-graph header, the
891
    # not_need_op_descs will be whole graph, this IF clause avoids it.
892 893 894
    if grad_op_descs_set == not_need_op_descs_set:
        return set()
    return not_need_op_descs_set
C
chengduo 已提交
895 896


Y
Yang Yang 已提交
897 898
def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
899
    proto = framework_pb2.OpDesc.FromString(bytes(protostr))
Y
Yang Yang 已提交
900 901 902
    return proto.__str__()


903 904 905 906 907 908 909 910 911 912
def _append_backward_ops_with_checkpoints_(
    block,
    ops,
    target_vars,
    target_block,
    no_grad_dict,
    grad_to_var,
    checkpoints,
    grad_op_id_to_fwd_op=None,
):
M
mapingshuo 已提交
913 914 915 916 917 918
    """
    Create grad ops with forward ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
        ops(Op): the forward operators whose forward recomputation backward ops need to be added
919
        target_vars(list[Tensor]): the loss vars we want to calculate gradient.
M
mapingshuo 已提交
920 921 922 923 924 925 926
        target_block(Block): the block which is going to hold new generated grad ops
        no_grad_dict(dict):
            key(int) block index
            val(str): corresponding forward variable name
        checkpoints: variables that a user defined as checkpoint for forward recomputation

    Algorithms:
M
mapingshuo 已提交
927
        0) deal with forward recomputing program descs
M
mapingshuo 已提交
928 929 930 931 932
        1) find ops between checkpoints, i.e. recompute_segments
        2) go through all forward ops and induct all variables that will be hold in memory
            a. variables that are used across segments will be held in memory
            b. output of dropout op will be held in memory
            c. input variables will be held in memory
M
mapingshuo 已提交
933 934 935
        3) go through each recompute_segments, add backward ops with forward recomputation
            a. add ops in current recompute_segment as forward recomputation ops
            b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
936 937
            c. add backward ops of current recomputation ops
            d. add sum op for repetitive_outputs
M
mapingshuo 已提交
938 939
        4) remove no grad branch as it is in _remove_no_grad_branch_
        5) Note1: all appended ops' OpRole are Backward
M
mapingshuo 已提交
940 941
        6) Note2: all variables with new name should be returned so that _append_backward_vars_ can be called
        7) Note3: current forward recomputation backpropagation does not handle programs with subblock
M
mapingshuo 已提交
942
    """
M
mapingshuo 已提交
943 944

    checkpoints_name = [x.name for x in checkpoints]
945
    checkpoints_name = list(set(checkpoints_name))
M
mapingshuo 已提交
946 947
    local_block = block.program._create_block()
    buffer_block = block.program._create_block()
948
    # 0) deal with forward recomputing program descs
M
mapingshuo 已提交
949
    program_stat = ProgramStats(block, ops)
M
mapingshuo 已提交
950
    program_stat.modify_forward_desc_for_recompute()
M
mapingshuo 已提交
951
    program_stat.build_stats()
M
mapingshuo 已提交
952 953

    # 1) find ops between checkpoints, i.e. recompute_segments
954
    checkpoints_name = program_stat.sort_checkpoints(checkpoints_name)
M
mapingshuo 已提交
955 956
    segments = []

957
    if len(checkpoints_name) == 1:
M
mapingshuo 已提交
958 959 960 961 962 963 964
        # only one checkpoint
        max_op_idx = -1
        var_group = [checkpoints_name[0]]
        for name in var_group:
            if name not in program_stat.var_op_deps:
                break
            op_idx = program_stat.var_op_deps[name]["var_as_output_ops"]
J
JZ-LIANG 已提交
965
            # only count the last generate op
M
mapingshuo 已提交
966 967 968 969 970 971
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if max_op_idx > 0:
            segments.append([0, max_op_idx + 1])
    else:
        start_idx = 0
J
JZ-LIANG 已提交
972
        pre_segment_end_idx = -1
M
mapingshuo 已提交
973 974 975
        while True:
            if start_idx >= len(checkpoints_name) - 1:
                break
J
JZ-LIANG 已提交
976 977
            # min_idx: checkpoint_1' s input op
            # max_idx: checkpoint_2' s output op
M
mapingshuo 已提交
978
            flag, min_idx, max_idx = program_stat.is_subgraph(
979 980
                [checkpoints_name[start_idx]], [checkpoints_name[start_idx + 1]]
            )
M
mapingshuo 已提交
981
            if flag:
J
JZ-LIANG 已提交
982 983
                # max_idx + 1 since the exact and used segment end idx is max_idx
                min_idx = program_stat._update_segment_start(
984 985
                    min_idx, pre_segment_end_idx
                )
M
mapingshuo 已提交
986
                segments.append([min_idx, max_idx + 1])
987
            else:
988 989 990 991 992
                _logger.info(
                    "Could not recompute op range [{}] - [{}] ".format(
                        min_idx, max_idx + 1
                    )
                )
J
JZ-LIANG 已提交
993

M
mapingshuo 已提交
994 995 996 997 998 999
            start_idx += 1

    if segments != [] and segments[0][0] != 0:
        recompute_segments = [[0, segments[0][0]]] + segments
    else:
        recompute_segments = segments
M
mapingshuo 已提交
1000

J
JZ-LIANG 已提交
1001
    for i, (idx1, idx2) in enumerate(recompute_segments):
1002
        _logger.info("recompute segment[{}]".format(i))
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
        _logger.info(
            "segment start op: [{}]: [{}]".format(
                ops[idx1].desc.type(), ops[idx1].desc.input_arg_names()
            )
        )
        _logger.info(
            "segment end op: [{}]: [{}]".format(
                ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names()
            )
        )
1013
        _logger.info("recompute segment[{}]".format(i))
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
        _logger.info(
            "segment start op: [{}]: [{}]".format(
                ops[idx1].desc.type(), ops[idx1].desc.input_arg_names()
            )
        )
        _logger.info(
            "segment end op: [{}]: [{}]".format(
                ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names()
            )
        )
J
JZ-LIANG 已提交
1024

M
mapingshuo 已提交
1025
    # 2) go through all forward ops and induct all variables that will be hold in memory
M
mapingshuo 已提交
1026
    vars_should_be_hold = []
1027
    # a. variables that are used across segments will be held in memory
M
mapingshuo 已提交
1028 1029
    for segment in recompute_segments:
        vars_should_be_hold.extend(
1030 1031
            program_stat.get_out_of_subgraph_vars(segment[0], segment[1])
        )
J
JZ-LIANG 已提交
1032 1033

    cross_vars = set(vars_should_be_hold) - set(checkpoints_name)
1034 1035 1036 1037 1038
    _logger.info(
        "found [{}] vars which cross recompute segment: [{}], better checkpoints might be set to reduce those vars".format(
            len(cross_vars), cross_vars
        )
    )
J
JZ-LIANG 已提交
1039

M
mapingshuo 已提交
1040
    # b. output of seed op should be kept in memory
M
mapingshuo 已提交
1041
    vars_should_be_hold.extend(program_stat.get_reserved_vars())
M
mapingshuo 已提交
1042
    # c. input variables are checkpoints
M
mapingshuo 已提交
1043 1044 1045
    vars_should_be_hold.extend(program_stat.get_input_nodes())
    vars_should_be_hold = list(set(vars_should_be_hold))

M
mapingshuo 已提交
1046
    # 3) go through each recompute_segments, add backward ops with forward recomputation
M
mapingshuo 已提交
1047 1048 1049 1050 1051 1052
    grad_op_descs = []
    var_name_dict = {}

    vars_in_memory = vars_should_be_hold + checkpoints_name

    max_calculated_op_position = len(ops)
1053
    device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
M
mapingshuo 已提交
1054 1055 1056 1057
    if recompute_segments == []:
        gap_ops = ops[0:max_calculated_op_position]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
1058 1059 1060 1061 1062
                raise Exception(
                    "Recompute don't support ops with sub_block"
                    "invoke op: %s"
                    % _pretty_op_desc_(op.desc, "with_sub_block")
                )
M
mapingshuo 已提交
1063
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
1064 1065
                op.desc, no_grad_dict[block.idx], []
            )
1066 1067 1068 1069 1070 1071

            # record the mapping between fwd and bwd
            if grad_op_id_to_fwd_op is not None:
                for op_desc in grad_op_desc:
                    grad_op_id_to_fwd_op[op_desc.original_id()] = op

1072 1073 1074 1075 1076
            # Set device for grad_op according to forward Op
            if op.desc.has_attr(device_attr_name):
                op_device = op.desc.attr(device_attr_name)
                for op_desc in grad_op_desc:
                    op_desc._set_attr(device_attr_name, op_device)
1077 1078 1079
            added_descs = _add_descs_to_block(
                grad_op_desc, local_block, grad_op_id_to_fwd_op
            )
M
mapingshuo 已提交
1080 1081 1082 1083
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

    for i, segment in enumerate(recompute_segments[::-1]):
1084
        gap_ops = ops[segment[1] : max_calculated_op_position]
M
mapingshuo 已提交
1085 1086 1087
        max_calculated_op_position = segment[0]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
1088 1089 1090 1091 1092
                raise Exception(
                    "Recompute don't support ops with sub_block"
                    "invoke op: %s"
                    % _pretty_op_desc_(op.desc, "with_sub_block")
                )
M
mapingshuo 已提交
1093
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
1094 1095
                op.desc, no_grad_dict[block.idx], []
            )
1096 1097 1098 1099 1100 1101

            # record the mapping between fwd and bwd
            if grad_op_id_to_fwd_op is not None:
                for op_desc in grad_op_desc:
                    grad_op_id_to_fwd_op[op_desc.original_id()] = op

1102 1103 1104 1105 1106
            # Set device for grad_op according to forward Op
            if op.desc.has_attr(device_attr_name):
                op_device = op.desc.attr(device_attr_name)
                for op_desc in grad_op_desc:
                    op_desc._set_attr(device_attr_name, op_device)
1107 1108 1109
            added_descs = _add_descs_to_block(
                grad_op_desc, local_block, grad_op_id_to_fwd_op
            )
M
mapingshuo 已提交
1110 1111 1112
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

1113
        ff_ops = ops[segment[0] : segment[1]]
M
mapingshuo 已提交
1114 1115 1116 1117
        var_suffix = ".subprog_%d" % i

        for op in ff_ops:
            if op.has_attr("sub_block"):
1118 1119 1120 1121 1122
                raise Exception(
                    "Recompute don't support ops with sub_block"
                    "invoke op: %s"
                    % _pretty_op_desc_(op.desc, "with_sub_block")
                )
M
mapingshuo 已提交
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
            input_and_output_names = []
            input_and_output_names.extend(op.desc.input_arg_names())
            input_and_output_names.extend(op.desc.output_arg_names())
            for name in input_and_output_names:
                if block.var(name).persistable or name in checkpoints_name:
                    continue
                if name in vars_should_be_hold:
                    continue
                if name not in var_name_dict:
                    var_name_dict[name] = name + var_suffix
1133 1134 1135

                    # we should create the rename var in subprog, otherwise its VarType will be BOOL
                    ref_var = block.program.global_block().var(name)
1136 1137 1138 1139 1140 1141 1142 1143
                    block.create_var(
                        name=var_name_dict[name],
                        shape=ref_var.shape,
                        dtype=ref_var.dtype,
                        type=ref_var.type,
                        persistable=ref_var.persistable,
                        stop_gradient=ref_var.stop_gradient,
                    )
1144

M
mapingshuo 已提交
1145
        # 3.a. add ops in current recompute_segment as forward recomputation ops
1146 1147 1148 1149 1150 1151
        buffer_descs = _add_needed_descs_to_block(
            ff_ops, buffer_block, block, vars_in_memory, grad_op_id_to_fwd_op
        )
        added_descs = _add_descs_to_block(
            ff_ops, local_block, grad_op_id_to_fwd_op
        )
M
mapingshuo 已提交
1152

M
mapingshuo 已提交
1153
        # 3.b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
1154 1155 1156 1157 1158 1159
        for key in var_name_dict:
            _rename_arg_(buffer_descs, key, var_name_dict[key])

        # added_descs should be in grad_op_descs because it is backward op desc
        grad_op_descs.extend(buffer_descs)

1160
        # 3.c. add backward ops for all ops in current segment
M
mapingshuo 已提交
1161 1162
        for op_desc in reversed(added_descs):
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
1163 1164
                op_desc, no_grad_dict[block.idx], []
            )
1165

1166 1167 1168
            # record the mapping between fwd and bwd
            if grad_op_id_to_fwd_op is not None:
                for g_op_desc in grad_op_desc:
1169 1170 1171
                    grad_op_id_to_fwd_op[
                        g_op_desc.original_id()
                    ] = grad_op_id_to_fwd_op[op_desc.original_id()]
1172

1173 1174 1175 1176 1177 1178
            # Set device for grad_op according to forward Op
            if op_desc.has_attr(device_attr_name):
                op_device = op_desc.attr(device_attr_name)
                for g_op_desc in grad_op_desc:
                    g_op_desc._set_attr(device_attr_name, op_device)

M
mapingshuo 已提交
1179 1180 1181 1182 1183
            for key in var_name_dict:
                _rename_arg_(grad_op_desc, key, var_name_dict[key])
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)

M
mapingshuo 已提交
1184
    # 3.d. add sum op for repetitive_outputs
1185
    grad_op_descs = _addup_repetitive_outputs_(
1186 1187
        grad_op_descs, block.idx, grad_op_id_to_fwd_op=grad_op_id_to_fwd_op
    )
M
mapingshuo 已提交
1188
    # 4) remove no grad branch as it is in _remove_no_grad_branch_
1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
    grad_op_descs = _remove_no_grad_branch_(
        grad_op_descs,
        no_grad_dict[block.idx],
        grad_op_id_to_fwd_op,
        target_vars,
    )
    added_descs = _add_descs_to_block(
        grad_op_descs, target_block, grad_op_id_to_fwd_op
    )
    return (
        program_stat,
        checkpoints_name,
        vars_should_be_hold,
        recompute_segments,
    )


def _get_sub_block_path(
    sub_block,
    sub_block_op_desc,
    no_grad_set,
    op_path_dict,
    sub_block_target_names=None,
):
1213 1214
    """
    Get output vars in subblock which will be assigned to parent block.
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
    It is used to find the grad path in subblock.

    Args:
        sub_block(Block): The sub-block in which to get op path.
        sub_block_op_desc: The op desc of the sub-block op such as 'while', 'conditional_block' and 'recurrent'.
        no_grad_set(set): The set of no grad var name. no_grad_set will be changed.
        op_path_dict(dict): op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
        sub_block_target_names(set): Target var names of sub-block.
    Return:
        The forward op path of sub-block corresponding to backward op.
1227
    """
1228

1229
    assert sub_block_op_desc.has_attr(
1230 1231
        "sub_block"
    ) and sub_block.idx == sub_block_op_desc._block_attr_id("sub_block")
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
    assert isinstance(sub_block_target_names, (set, type(None)))

    if sub_block_target_names is None:
        sub_block_target_names = sub_block_op_desc.output_arg_names

    # TODO(huihuangzheng): add support for recurrent op.
    if sub_block_op_desc.type in ["conditional_block", "while"]:
        # Step1: get the output vars in sub-block
        sub_outputs = [
            sub_block._var_recursive(var) for var in sub_block_target_names
        ]
        for var in sub_block_target_names:
1244
            for op_desc in sub_block.ops:
1245
                if var in op_desc.output_arg_names:
1246
                    for name in op_desc.input_arg_names:
1247
                        sub_outputs.append(sub_block._var_recursive(name))
1248

1249 1250
        # Step2: find op path of sub-block
        is_while = sub_block_op_desc.type in ["while"]
1251 1252 1253
        sub_block_op_path = _find_op_path_(
            sub_block, sub_outputs, [], no_grad_set, op_path_dict, is_while
        )
1254 1255 1256 1257
        return sub_block_op_path
    return sub_block.ops


1258 1259 1260
def _is_grad_op_(op):
    op_maker = core.op_proto_and_checker_maker
    backward = core.op_proto_and_checker_maker.OpRole.Backward
1261 1262 1263
    if op_maker.kOpRoleVarAttrName() in op.attr_names and int(
        op.all_attrs()[op_maker.kOpRoleAttrName()]
    ) == int(backward):
1264 1265 1266 1267 1268 1269 1270 1271
        return True
    return False


def _rename_grad_name_(name, grad_order):
    return 'grad/' * grad_order + name


1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
def _append_backward_ops_(
    block,
    ops,
    target_vars,
    target_block,
    no_grad_dict,
    grad_to_var,
    callbacks=None,
    input_grad_names_set=None,
    op_path_dict=None,
    distop_context=None,
    rename_var_map=None,
    grad_op_id_to_fwd_op=None,
):
1286 1287 1288 1289 1290
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
1291
        ops(Op): the forward operators whose backward ops need to be added
1292
        target_vars(list[Tensor]): the loss vars we want to calculate gradient.
1293
        target_block(Block): the block which is going to hold new generated grad ops
1294
        no_grad_dict(dict):
1295
            key(int)  block index
T
tianshuo78520a 已提交
1296
            val(set) a set of variable names. These variables have no gradient
1297 1298 1299
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
C
chengduo 已提交
1300 1301 1302 1303
        callbacks(callable object): a callable object used to decorate new generated grad ops
        input_grad_names_set(set): this set is used to store the gradients' name which is
            generated by backward ops, and input_grad_names_set can help to prune the unnecessary
            backward ops.
1304 1305 1306
        op_path_dict(dict): op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
1307 1308
        rename_var_map(dict): used to associate target_grad var name with first grad_op input name.
            Only used in for high order gradient.
1309
    """
1310 1311

    # Build the mapping between the forward op and backward op (Only for auto parallel)
1312 1313 1314
    def update_distop_context(
        distop_context, op_grad_to_var, appending_grad_times
    ):
1315
        distop_context.grad_var_to_var[appending_grad_times].update(
1316 1317
            op_grad_to_var
        )
1318
        for op_desc in grad_op_desc:
1319 1320 1321
            assert (
                op_desc.original_id() not in distop_context.grad_op_id_to_op_id
            )
1322
            distop_context.grad_op_id_to_op_id[
1323 1324
                op_desc.original_id()
            ] = op.desc.original_id()
1325

Y
Yang Yang 已提交
1326
    if callbacks is not None:
1327
        assert isinstance(callbacks, (list, tuple))
Y
Yang Yang 已提交
1328 1329 1330
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
1331

F
fengjiayi 已提交
1332
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
1333 1334
    grad_op_descs = []
    program = block.program
1335

1336 1337 1338
    if rename_var_map is None:
        rename_var_map = {}
    assert isinstance(rename_var_map, dict)
1339

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
    if core._is_bwd_prim_enabled():
        composite_block = program.clone().current_block()
        # Infer shape for operators whose output haven't been created.
        for op in composite_block.ops:
            if not all(
                tuple(
                    composite_block._find_var_recursive(arg)
                    for arg in op.output_arg_names
                )
            ):
                infershape_for_composite(composite_block, op.desc)

1352
    # add grad_op_desc by reversed ops
1353
    for op in reversed(ops):
F
fengjiayi 已提交
1354 1355 1356
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
W
Wu Yi 已提交
1357
            sub_block = program.block(op._block_attr_id("sub_block"))
W
Wu Yi 已提交
1358
            grad_sub_block = program._create_block()
W
Wu Yi 已提交
1359
            grad_sub_block._set_forward_block_idx(sub_block.idx)
1360 1361 1362
            # see follwing comments for why set None here.
            pre_input_grad_names_set = copy.copy(input_grad_names_set)
            input_grad_names_set = None
1363
            sub_block_path = op_path_dict[op._block_attr_id("sub_block")]
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
            _append_backward_ops_(
                sub_block,
                sub_block_path,
                target_vars,
                grad_sub_block,
                no_grad_dict,
                grad_to_var,
                callbacks,
                input_grad_names_set,
                op_path_dict,
                grad_op_id_to_fwd_op=grad_op_id_to_fwd_op,
            )
1376
            input_grad_names_set = pre_input_grad_names_set
Y
Yu Yang 已提交
1377

W
Wu Yi 已提交
1378
            program._rollback()
F
fengjiayi 已提交
1379
            grad_sub_block_list.append(grad_sub_block.desc)
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
        # In primitive mode, raw phi GradOp will be split into multiple small
        # primitive operators, and the split rules are defined in c++ level,
        # see detials: paddle/fluid/prim/api/manual/backward/composite_backward_api.h
        # It means that the output's shape and dtype of previous operators which
        # maybe used as the input of next operators must be known. Therefore,
        # we infer shape and dtype in a sandbox block(named composite_block) for
        # used in c++ level.
        # For example:
        #   forward:
        #       z = multiply(x, y) //maybe broadcast in kernel
        #   bcckward:
        #       x_grad_unreduce = z_grad * y // maybe unreduce
        #       reduced_axes = get_reduced_axes(x_grad.shape, x.shape) // need known shape
        #       x_grad = reduce_sum(x_grad_unreduce)
        grad_op_desc = []
        op_grad_to_var = {}
        if core._is_bwd_prim_enabled():

            def find_op_index(block_desc, cur_op_desc):
                for idx in range(block_desc.op_size()):
                    if cur_op_desc == block_desc.op(idx):
                        return idx
                return -1
F
fengjiayi 已提交
1403

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                composite_block.desc.op(find_op_index(block.desc, op.desc)),
                no_grad_dict[composite_block.idx],
                grad_sub_block_list,
            )
            for desc in grad_op_desc:
                infershape_for_composite(composite_block, desc)
        else:
            # Getting op's corresponding grad_op
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op.desc, no_grad_dict[block.idx], grad_sub_block_list
            )
1416

1417 1418 1419 1420 1421
        # record the mapping between fwd and bwd
        if grad_op_id_to_fwd_op is not None:
            for op_desc in grad_op_desc:
                grad_op_id_to_fwd_op[op_desc.original_id()] = op

1422
        # Build the mapping between the forward op and backward op (Only for auto parallel)
1423
        if distop_context is not None:
1424 1425 1426
            update_distop_context(
                distop_context, op_grad_to_var, program._appending_grad_times
            )
1427
        else:
1428 1429 1430 1431 1432
            default_ctx = getattr(
                paddle.distributed.auto_parallel.dist_context,
                '_g_default_distributed_context',
                None,
            )
1433 1434
            if default_ctx is not None:
                distop_context = default_ctx.dist_op_context
1435 1436 1437 1438 1439
                update_distop_context(
                    distop_context,
                    op_grad_to_var,
                    program._appending_grad_times,
                )
Y
Yang Yu 已提交
1440

1441 1442
        # Set device for grad_op according to forward Op
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
1443 1444 1445 1446
        if op.desc.has_attr(device_attr_name):
            op_device = op.desc.attr(device_attr_name)
            for op_desc in grad_op_desc:
                op_desc._set_attr(device_attr_name, op_device)
1447

1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
        # Rename internal gradient variables in multiple backward
        # so that they have different names with previous backward.
        # For example:
        #  y = x * x, grad = fluid.gradients(fluid.gradients(y, x) + y * y, x)
        # In second-time backward, gradient variable names of partial
        # forward network (y * y) may be have same names with first-time
        # fluid.gradients(y, x).
        # So rename here before _addup_repetitive_outputs_.
        if program._appending_grad_times > 1:
            for op_desc in grad_op_desc:
T
Tongxin Bai 已提交
1458 1459 1460 1461
                forward_op_inputs = op.desc.input_arg_names()
                for name in op_desc.input_arg_names():
                    if name in rename_var_map and name not in forward_op_inputs:
                        op_desc._rename_input(name, rename_var_map[name])
1462 1463 1464 1465 1466
                for name in op_desc.output_arg_names():
                    if "@GRAD" not in name:
                        continue
                    if block.desc.find_var(name.encode("ascii")):
                        new_name = _rename_grad_name_(
1467 1468
                            name, program._appending_grad_times
                        )
1469 1470 1471 1472
                        op_desc._rename_output(name, new_name)
                        rename_var_map[name] = new_name

                        if name in op_grad_to_var:
1473 1474 1475
                            # Build the mapping between the grad var name and var name (Only for auto parallel)
                            if distop_context is not None:
                                distop_context.grad_var_to_var[
1476 1477
                                    program._appending_grad_times
                                ][new_name] = op_grad_to_var[name]
1478 1479 1480
                            op_grad_to_var[new_name] = op_grad_to_var[name]
                            op_grad_to_var.pop(name)

1481 1482 1483 1484 1485
        # If input_grad_names_set is not None, extend grad_op_descs only when
        # any input grad in outputs of previous grad ops.
        # But this strategy is not suited for while op for some control flow,
        # for example, for while op, the grads maybe generated in next loop.
        if input_grad_names_set is not None:
1486 1487 1488 1489
            is_grad_name = (
                lambda name: name.find(core.grad_var_suffix()) != -1
                or name in input_grad_names_set
            )
1490 1491 1492
            is_append_grad = False
            for op_desc in grad_op_desc:
                input_grad_names = [
1493 1494
                    name
                    for name in op_desc.input_arg_names()
1495
                    if is_grad_name(name)
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
                ]
                # some code of gradient ops, like increment, are not very
                # standard, there is no @GRAD in these ops' inputs.
                if len(input_grad_names) == 0:
                    is_append_grad = True
                    break

                if _some_in_set_(input_grad_names, input_grad_names_set):
                    grad_op_descs.append(op_desc)
                    is_append_grad = True
                    for name in op_desc.output_arg_names():
                        input_grad_names_set.add(name)
            if is_append_grad:
                grad_to_var.update(op_grad_to_var)
        else:
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)
F
fengjiayi 已提交
1513

1514 1515 1516 1517
    # record mapping bewteen grad var name and var name (Only for auto parallel)
    grad_var_to_var = None
    if distop_context is not None:
        grad_var_to_var = distop_context.grad_var_to_var[
1518 1519
            program._appending_grad_times
        ]
M
mapingshuo 已提交
1520
    # sum parameter's gradients' var given multiple var gradient
1521 1522 1523 1524
    grad_op_descs = _addup_repetitive_outputs_(
        grad_op_descs,
        block.idx,
        grad_var_to_var,
1525 1526
        grad_op_id_to_fwd_op=grad_op_id_to_fwd_op,
    )
F
fengjiayi 已提交
1527

M
mapingshuo 已提交
1528 1529
    # if all outputs of the grad op are in no_grad_set, then just remove and fill zero
    # if all inputs of the grad op are in no_grad_set, just remove this op
1530 1531 1532 1533 1534 1535
    grad_op_descs = _remove_no_grad_branch_(
        grad_op_descs,
        no_grad_dict[block.idx],
        grad_op_id_to_fwd_op,
        target_vars,
    )
F
fengjiayi 已提交
1536

M
mapingshuo 已提交
1537
    # remove some backward ops
1538
    # TODO(Jiabin): Support this in prime later, it will prune add_grad, fix this problem
1539
    if not core._is_bwd_prim_enabled():
1540 1541 1542 1543 1544 1545
        not_need_ops = _find_not_need_ops(
            grad_op_descs, ops, input_grad_names_set
        )
        grad_op_descs = [
            op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops
        ]
1546 1547
    else:
        logging.debug("Runing backward composite and disable find_not_need_ops")
1548

F
fengjiayi 已提交
1549
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
1550 1551
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
1552
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
1553 1554
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
W
Wu Yi 已提交
1555
        new_op_desc._set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
1556
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
1557
        if callbacks is not None:
1558
            assert isinstance(callbacks, (list, tuple))
Y
Yang Yang 已提交
1559 1560
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
1561

F
fengjiayi 已提交
1562

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
def _is_grad_var_(var_name):
    return core.grad_var_suffix() in var_name


# Find the op who holds the sub_block as its "sub_block" attr
def _find_parent_op_(sub_block):
    sub_block_id = sub_block.idx

    if sub_block_id == 0:
        return None

    program = sub_block.program
1575
    for block_id in range(program.num_blocks):
1576
        block_desc = program.block(block_id).desc
1577
        for op_idx in range(block_desc.op_size()):
1578
            op = block_desc.op(op_idx)
1579 1580 1581 1582
            if (
                op.has_attr("sub_block")
                and op._block_attr_id("sub_block") == sub_block_id
            ):
1583 1584
                return op

1585
    # NOTE(paddle-dev): When optimizer is added in conditional block,
1586 1587 1588 1589
    # sub_block may not be found.
    return None


F
fengjiayi 已提交
1590
def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
1603
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
1604
    """
1605 1606
    ops_to_remove = []
    '''
1607 1608 1609 1610 1611
    NOTE(paddle-dev): while_grad op may hold some inputs which are not found
    in the parent/forward block, and they are also the outputs of while_grad
    op. These kinds of inputs are the recursive outputs inside while_grad op.
    They should be considered as "already created" when scanning the inner
    ops of while_grad ops.
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
    '''
    parent_op = _find_parent_op_(block)
    parent_op_vars = []
    if parent_op is not None:
        input_args = parent_op.input_arg_names()
        output_args = parent_op.output_arg_names()
        for in_arg in input_args:
            if in_arg in output_args:
                parent_op_vars.append(in_arg)

F
fengjiayi 已提交
1622 1623 1624
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
W
Wu Yi 已提交
1625
            sub_block = block.program.block(op_desc._block_attr_id("sub_block"))
F
fengjiayi 已提交
1626
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
1627 1628 1629 1630 1631 1632 1633 1634 1635

        grad_var_ins = [
            var for var in op_desc.input_arg_names() if _is_grad_var_(var)
        ]
        grad_var_outs = [
            var for var in op_desc.output_arg_names() if _is_grad_var_(var)
        ]

        inputs = [
1636 1637
            var
            for var in op_desc.input_arg_names()
1638 1639 1640
            if var != core.empty_var_name()
        ]
        outputs = [
1641 1642
            var
            for var in op_desc.output_arg_names()
1643 1644 1645
            if var != core.empty_var_name()
        ]

1646
        # If the outputs of grad op is empty, just remove it
1647 1648 1649 1650 1651
        if not outputs:
            ops_to_remove.append(op_idx)
            continue
        else:
            '''
1652
            If the output is not empty and there is any grad input, find
1653 1654 1655 1656
            whether there is any existing input. If not, just remove it.
            '''
            if grad_var_ins:
                existing_grad_var_ins = [
1657 1658
                    var
                    for var in grad_var_ins
1659
                    if block.desc.has_var_recursive(var.encode())
1660
                    or var in parent_op_vars
1661 1662 1663 1664
                ]
                if not existing_grad_var_ins:
                    '''
                    FIXME(paddle-dev, zengjinle): rnn_memory_helper_grad is used
1665 1666
                    in recurrent op. The input of this op does not even exist in
                    the program! Therefore, any dependency analysis would not
1667
                    work to this op! If I do not add the following code, this op
1668 1669
                    would be pruned, and the calculation result would be wrong.
                    Maybe we should re-design this op later...
1670 1671 1672
                    '''
                    if op_desc.type() not in ['rnn_memory_helper_grad']:
                        ops_to_remove.append(op_idx)
1673
                        continue
1674

F
fengjiayi 已提交
1675 1676 1677
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
1678 1679 1680 1681
            if (
                block.desc.has_var_recursive(grad_var_name.encode())
                or grad_var_name == core.empty_var_name()
            ):
F
fengjiayi 已提交
1682
                continue
1683
            block.desc.var(grad_var_name.encode())
F
fengjiayi 已提交
1684
            new_vars.add(grad_var_name)
1685
            if grad_var_name not in grad_to_var:
F
fengjiayi 已提交
1686 1687 1688
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
H
hong 已提交
1689
        op_desc.check_attrs()
F
fengjiayi 已提交
1690 1691
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
1692

F
fengjiayi 已提交
1693 1694
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
1695
                _infer_var_data_type_shape_(arg, block)
F
update  
fengjiayi 已提交
1696

1697 1698 1699
    for op_idx in reversed(ops_to_remove):
        block.desc._remove_op(op_idx, op_idx + 1)

F
update  
fengjiayi 已提交
1700

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737
def infershape_for_composite(block, grad_op_desc):
    # pruning empty output
    if len(grad_op_desc.output_arg_names()) == 0:
        return

    # append op to block
    op_desc = block.desc.append_op()
    op_desc.copy_from(grad_op_desc)
    op_desc._set_attr(
        core.op_proto_and_checker_maker.kOpRoleAttrName(),
        core.op_proto_and_checker_maker.OpRole.Backward,
    )

    # create output var
    new_vars = set()
    # create new gradient variables
    for grad_var_name in op_desc.output_arg_names():
        if not (
            block.desc.has_var_recursive(grad_var_name.encode())
            or grad_var_name == core.empty_var_name()
        ):
            block.desc.var(grad_var_name.encode())
            new_vars.add(grad_var_name)

    # infer shape and infer dthype
    op_desc.check_attrs()
    op_desc.infer_var_type(block.desc)
    op_desc.infer_shape(block.desc)

    for arg in op_desc.output_arg_names():
        if arg in new_vars:
            _infer_var_data_type_shape_(arg, block)


def _rename_grad_(
    block, start_op_idx, grad_to_var, target_grad_map, skip_rename_var_list
):
1738 1739 1740 1741 1742
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
W
Wu Yi 已提交
1743
                op_desc._rename_input(name, var_map[name])
1744 1745

        for name in op_desc.output_arg_names():
M
mapingshuo 已提交
1746 1747
            if "@GRAD" not in name:
                continue
1748
            if block.desc.find_var(name.encode("ascii")):
1749 1750
                if name in skip_rename_var_list:
                    continue
Y
Yu Yang 已提交
1751
                new_name = unique_name.generate(name)
W
Wu Yi 已提交
1752
                op_desc._rename_output(name, new_name)
1753 1754
                var_map[name] = new_name

1755
    for g, ng in var_map.items():
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
1767
        for var in list(block.vars.values()):
1768 1769 1770 1771 1772 1773 1774
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
def _get_son_parent_block_idx_dict(program, current_block_idx):

    son_parent_block_idx_dict = collections.OrderedDict()
    while current_block_idx >= 0:
        parent_block_idx = program.block(current_block_idx).parent_idx
        son_parent_block_idx_dict[current_block_idx] = parent_block_idx
        current_block_idx = parent_block_idx

    return son_parent_block_idx_dict


1786 1787 1788 1789 1790 1791 1792
def _get_no_grad_set_name(no_grad_set):
    no_grad_set_name = set()
    if no_grad_set is not None:
        if isinstance(no_grad_set, (set, list, tuple)):
            for i, no_grad_var in enumerate(no_grad_set):
                if isinstance(no_grad_var, framework.Variable):
                    no_grad_set_name.add(no_grad_var.name)
1793
                elif isinstance(no_grad_var, str):
1794 1795 1796 1797
                    no_grad_set_name.add(no_grad_var)
                else:
                    raise TypeError(
                        "The type of no_grad_set's member must be paddle.fluid.Variable or str, but received %s."
1798 1799
                        % (type(no_grad_var))
                    )
1800 1801
        else:
            raise TypeError(
1802 1803 1804 1805
                "The type of no_grad_set should be set or list or tuple, but received {}".format(
                    type(no_grad_set)
                )
            )
1806 1807 1808
    return no_grad_set_name


1809
@framework.static_only
1810 1811 1812 1813 1814 1815 1816 1817
def append_backward(
    loss,
    parameter_list=None,
    no_grad_set=None,
    callbacks=None,
    checkpoints=None,
    distop_context=None,
):
1818
    """
1819 1820
    :api_attr: Static Graph

1821
    This function appends backward part to main_program.
F
fengjiayi 已提交
1822

1823 1824
    A complete neural network training is made up of forward and backward
    propagation. However, when we configure a network, we only need to
1825 1826
    specify its forward part. This function uses the chain rule to automatically
    generate the backward part according to the forward part.
F
fengjiayi 已提交
1827

1828 1829
    In most cases, users do not need to invoke this function manually.
    It will be automatically invoked by the optimizer's `minimize` function.
F
fengjiayi 已提交
1830

1831
    Parameters:
1832
        loss(Tensor): The loss Tensor of the network.
1833
        parameter_list(list[Tensor|str]|tuple[Tensor|str], optional): List/Tuple of Parameters or Parameter.names
1834
                                           that need to be updated by optimizers.
1835
                                           If it is None, all parameters
F
fengjiayi 已提交
1836
                                           will be updated.
1837
                                           Default: None.
1838 1839
        no_grad_set(set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All Tensors with
1840
                               `stop_gradient=True` from all blocks will
F
fengjiayi 已提交
1841
                               be automatically added into this set.
1842
                               If this parameter is not None, the Tensors or Tensor.names in this set will be added to the default set.
1843
                               Default: None.
1844
        callbacks(list[callable object]|tuple[callable object], optional): List/Tuple of callback functions.
1845
                                               The callbacks are used for
1846 1847 1848 1849 1850 1851
                                               doing some custom jobs during
                                               backward part building. All
                                               callable objects in it will
                                               be invoked once each time a
                                               new gradient operator is added
                                               into the program. The callable
Z
zhangchunle 已提交
1852
                                               object must have two input
1853 1854
                                               parameters: ``block`` and ``context`` .
                                               The ``block`` is the :ref:`api_guide_Block_en` which
1855
                                               the new gradient operator will
1856
                                               be added to. The ``context`` is a
1857
                                               map, whose keys are gradient
1858 1859 1860
                                               Tensor names and values are
                                               corresponding original :ref:`api_guide_tensor_en` .
                                               In addition to this, the ``context``
1861
                                               has another special key-value pair:
1862
                                               the key is string ``__current_op_desc__``
1863 1864 1865
                                               and the value is the op_desc of the
                                               gradient operator who has just
                                               triggered the callable object.
1866
                                               Default: None.
F
fengjiayi 已提交
1867 1868

    Returns:
1869 1870
        list of tuple ( :ref:`api_guide_tensor_en` , :ref:`api_guide_tensor_en` ): Pairs of parameter and its corresponding gradients.
        The key is the parameter and the value is gradient Tensor.
F
fengjiayi 已提交
1871 1872

    Raises:
1873
        AssertionError: If ``loss`` is not an instance of Tensor.
F
fengjiayi 已提交
1874 1875 1876 1877

    Examples:
        .. code-block:: python

1878 1879
            import paddle
            import paddle.nn.functional as F
L
lujun 已提交
1880

1881 1882 1883 1884 1885
            paddle.enable_static()

            x = paddle.static.data(name='x', shape=[None, 13], dtype='int64')
            y = paddle.static.data(name='y', shape=[None, 1], dtype='float32')
            x_emb = paddle.static.nn.embedding(x, size=[100, 256])
1886
            y_predict = paddle.static.nn.fc(x=x_emb, size=1, activation=None, name='my_fc')
1887 1888
            loss = F.square_error_cost(input=y_predict, label=y)
            avg_loss = paddle.mean(loss)
1889 1890

            # Get all weights in main_program, not include bias.
1891
            all_weights = [param for param in paddle.static.default_main_program().block(0).all_parameters() if 'w_' in param.name]
1892 1893 1894
            all_weights_name = [w.name for w in all_weights]

            # return all param_grads needed to be updated if parameter_list set default None.
1895
            p_g_list1 = paddle.static.append_backward(loss=avg_loss)
1896 1897
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

1898 1899
            # return the param_grads corresponding to parameter_list that can be list of param (Tensor).
            p_g_list2 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights)
1900 1901 1902
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # parameter_list can be list of param.name (str).
1903
            p_g_list3 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights_name)
1904 1905
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

1906 1907
            # no_grad_set can be set of Tensors that means grad will be cut off from these Tensors.
            p_g_list4 = paddle.static.append_backward(loss=avg_loss, no_grad_set=set([x_emb]))
1908 1909
            # output: [(my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

1910 1911
            # no_grad_set can be set of Tensor.name when the Tensor is created inside layers and can't be specified explicitly.
            p_g_list5 = paddle.static.append_backward(loss=avg_loss, no_grad_set=set(['my_fc.b_0']))
1912 1913 1914
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # return [] because all param_grads are filtered by no_grad_set.
1915
            p_g_list6 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights, no_grad_set=set(all_weights))
1916

1917
    """
1918 1919 1920
    grad_op_id_to_fwd_op = (
        {}
    )  # for cuda graph usage, recording the mapping between grad op original id to fwd op
1921

1922 1923 1924
    check_type(
        loss, 'loss', framework.Variable, 'paddle.static.append_backward'
    )
Y
yuyang18 已提交
1925

Y
Fix bug  
yuyang18 已提交
1926 1927
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
M
mapingshuo 已提交
1928
        _find_loss_op_(loss)
Y
Fix bug  
yuyang18 已提交
1929

1930 1931 1932
    loss.op._set_attr(
        core.op_proto_and_checker_maker.kOpRoleAttrName(),
        int(core.op_proto_and_checker_maker.OpRole.Forward)
1933 1934
        | int(core.op_proto_and_checker_maker.OpRole.Loss),
    )
Y
yuyang18 已提交
1935

Y
Yang Yang 已提交
1936
    if callbacks is not None:
1937 1938 1939 1940 1941 1942
        check_type(
            callbacks,
            'callbacks',
            (list, tuple),
            'paddle.static.append_backward',
        )
Y
Yu Yang 已提交
1943

F
fengjiayi 已提交
1944
    program = loss.block.program
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
    root_block = program.block(0)
    current_block_idx = program.current_block_idx
    current_block = program.block(current_block_idx)

    is_in_control_flow = current_block_idx != 0

    # Double grad is not supported in sub-block (control flow)
    if not is_in_control_flow:
        # _appending_grad_times used for double grad
        program._appending_grad_times += 1
1955

F
fengjiayi 已提交
1956
    if no_grad_set is None:
1957
        no_grad_set = set()
1958 1959
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
1960
    no_grad_dict = _get_stop_gradients_(program)
1961 1962
    # no_grad_set only contains vars in block 0
    # Todo(liym27): support vars in sub block
1963
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
Y
Yu Yang 已提交
1964

1965 1966 1967 1968 1969 1970 1971
    # Currently it is only to support the optimizer.minimize
    # in a switch branch, which can append_backward in a sub_block.
    # Note: while_loop is in control flow, but it makes no sense to call optimizer in while.
    # Todo: report error when it is in while_loop
    if is_in_control_flow:
        # create grad block if in switch control flow.
        target_grad_block = program._create_block(
1972 1973
            parent_idx=current_block.parent_idx
        )
1974 1975 1976 1977 1978 1979
        target_grad_block._set_forward_block_idx(current_block_idx)
        # after _create_block, program.current_block changes
    else:
        target_grad_block = root_block

    son_parent_block_idx_dict = _get_son_parent_block_idx_dict(
1980 1981
        program, current_block_idx
    )
1982 1983 1984 1985

    block_fwd_op_num_dict = {}  # block_id: fwd_op_num
    for idx in son_parent_block_idx_dict:
        block_fwd_op_num_dict[idx] = program.block(idx).desc.op_size()
F
fengjiayi 已提交
1986

F
fengjiayi 已提交
1987 1988
    grad_to_var = dict()

1989
    # pass the cuda_graph_attr to the fill_constant which generates the loss_grad
M
mapingshuo 已提交
1990
    op_desc = _create_loss_op_desc_(loss)
1991
    grad_op_id_to_fwd_op[op_desc.original_id()] = loss.op
1992 1993 1994 1995 1996 1997
    target_grad_block.desc.append_op().copy_from(op_desc)

    for block_idx in son_parent_block_idx_dict:
        block = program.block(block_idx)

        block_no_grad_set = set(
1998 1999
            map(_strip_grad_suffix_, no_grad_dict[block_idx])
        )
2000 2001

        op_path_dict = dict()
2002 2003 2004
        op_path = _find_op_path_(
            block, [loss], [], block_no_grad_set, op_path_dict
        )
2005

2006 2007 2008
        no_grad_vars = _find_no_grad_vars(
            block, op_path, [loss], block_no_grad_set
        )
2009 2010 2011

        block_no_grad_set.update(no_grad_vars)
        no_grad_dict[block_idx].update(
2012 2013
            list(map(_append_grad_suffix_, block_no_grad_set))
        )
2014 2015 2016 2017 2018

        input_grad_names_set = None
        # For double backward, input_grad_names is used for filtering
        # some non-used gradients op(s).

2019
        # TODO(liym27): need a better design.
2020 2021 2022 2023 2024
        # not support double grad in control flow sub-block now.
        if not is_in_control_flow:
            if program._appending_grad_times > 1:
                input_grad_names_set = set([_append_grad_suffix_(loss.name)])

2025
        # TODO: support _append_backward_ops_with_checkpoints_ in
2026
        #  sub-block (control flow)
J
JZ-LIANG 已提交
2027
        is_recompute = False
2028
        if (
2029
            checkpoints is not None
2030 2031 2032
            and isinstance(checkpoints, list)
            and len(checkpoints) > 0
        ):
J
JZ-LIANG 已提交
2033
            is_recompute = True
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
            (
                program_stat,
                checkpoint_names,
                vars_should_be_hold,
                recompute_segments,
            ) = _append_backward_ops_with_checkpoints_(
                root_block,
                op_path,
                [loss],
                root_block,
                no_grad_dict,
                grad_to_var,
                checkpoints,
                grad_op_id_to_fwd_op,
            )
2049 2050 2051 2052
        else:
            _append_backward_ops_(
                block,  # the block where forward ops are in
                op_path,
2053
                [loss],
2054 2055 2056 2057
                target_grad_block,
                no_grad_dict,
                grad_to_var,
                callbacks,
2058
                input_grad_names_set=input_grad_names_set,
2059
                op_path_dict=op_path_dict,
2060
                distop_context=distop_context,
2061 2062
                grad_op_id_to_fwd_op=grad_op_id_to_fwd_op,
            )
2063 2064 2065 2066 2067

    grad_info_map = dict()

    # if in control flow, target_grad_block is a created new block which only contains grad ops,
    # so fwd_op_num is set to 0.
2068 2069 2070 2071 2072
    fwd_op_num = (
        block_fwd_op_num_dict[current_block_idx]
        if not is_in_control_flow
        else 0
    )
2073 2074

    # Because append_backward may be called multiple times,
2075 2076
    # we need rename the internal gradient variables so that they have
    # different names.
2077
    _rename_grad_(target_grad_block, fwd_op_num, grad_to_var, {}, [])
2078

2079 2080 2081
    _append_backward_vars_(
        target_grad_block, fwd_op_num, grad_to_var, grad_info_map
    )
F
fengjiayi 已提交
2082

F
fengjiayi 已提交
2083
    program.current_block_idx = current_block_idx
W
Wu Yi 已提交
2084
    program._sync_with_cpp()
F
fengjiayi 已提交
2085

2086 2087 2088 2089 2090 2091
    # for cuda graph, copy the cuda graph attr from forward op to backward op
    for op in target_grad_block.ops:
        if grad_op_id_to_fwd_op.get(op.desc.original_id(), None) is not None:
            fwd_op = grad_op_id_to_fwd_op[op.desc.original_id()]
            op._cuda_graph_attr = fwd_op._cuda_graph_attr

2092
    if parameter_list is not None:
2093 2094 2095 2096 2097 2098
        check_type(
            parameter_list,
            'parameter_list',
            (list, tuple, set),
            'fluid.backward.append_backward',
        )
2099 2100
        parameters = []
        for i, param in enumerate(parameter_list):
2101 2102 2103 2104 2105 2106
            check_type(
                param,
                'parameter_list[%s]' % i,
                (framework.Variable, str),
                'fluid.backward.append_backward',
            )
2107 2108
            if isinstance(param, framework.Variable):
                parameters.append(param.name)
2109
            elif isinstance(param, str):
2110
                parameters.append(param)
2111
    else:
F
fengjiayi 已提交
2112
        params = program.global_block().all_parameters()
C
chengduo 已提交
2113
        parameters = [param.name for param in params if param.trainable]
2114

2115
    params_and_grads = []
2116
    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
2117
    for param in parameters:
2118
        if param not in grad_info_map:
F
fengjiayi 已提交
2119
            continue
F
update  
fengjiayi 已提交
2120
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
2121
        grad_block = grad_info[1]
2122
        if not grad_block.has_var(grad_info[0]):
2123 2124 2125 2126 2127
            raise ValueError(
                "grad block[{0}] did not have grad var {1}".format(
                    grad_info[1], grad_info[0]
                )
            )
2128
        # Get the param var from the global block
F
fengjiayi 已提交
2129
        param_var = program.global_block().var(param)
2130
        grad_var = grad_block.var(grad_info[0])
2131 2132 2133 2134 2135
        if not is_in_control_flow:
            if loss.block.has_var(grad_info[0]):
                params_and_grads.append((param_var, grad_var))
            else:
                params_and_grads.append((param_var, None))
2136
        else:
2137
            params_and_grads.append((param_var, grad_var))
Y
yuyang18 已提交
2138 2139 2140 2141

    for p, g in params_and_grads:
        if g is None:
            continue
2142 2143 2144
        ops = (
            grad_block.ops if is_in_control_flow else program.global_block().ops
        )
2145
        for op in reversed(ops):
Y
yuyang18 已提交
2146 2147 2148 2149 2150 2151 2152
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
2153
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
2154 2155
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
W
Wu Yi 已提交
2156
        g.op._set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
2157

J
JZ-LIANG 已提交
2158 2159 2160 2161
    if is_recompute:
        return params_and_grads, checkpoint_names
    else:
        return params_and_grads
2162 2163 2164 2165 2166


def _as_list(x):
    if x is None:
        return []
2167
    return list(x) if isinstance(x, Sequence) else [x]
2168 2169


2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
def _is_ancestor_block(ancestor_block, block):
    prog = block.program
    ancestor_idx = ancestor_block.idx
    parent_idx = block.parent_idx

    while parent_idx != -1:
        if parent_idx == ancestor_idx:
            return True
        parent_idx = prog.block(parent_idx).parent_idx

    return False


def _get_output_names(cur_block, targets):
    """
    In `cur_block`, get output names those linked to targets.
    NOTE:
    1. `targets` can be in `cur_block`;
    Usually, `targets` is in `cur_block`. However, considering control flow,
    2. `targets` may be in sub-block but `cur_block` is an ancestor of `targets[0].block`;
    3. `targets` may be in the block which is ancestor of `cur_block`.
    """

    block = targets[0].block if targets else cur_block
    current_output_names = set([out.name for out in targets])

2196 2197 2198 2199 2200 2201
    # 1. If `targets` in cur_block or the ancestral block of `cur_block`
    if block.idx == cur_block.idx or _is_ancestor_block(block, cur_block):
        return current_output_names

    # 2. If `cur_block` is an ancestor of `targets[0].block`, run while loop
    prog = cur_block.program
2202 2203 2204 2205 2206 2207 2208 2209 2210
    while block.idx != cur_block.idx:
        assert block.parent_idx != -1
        parent_block = prog.block(block.parent_idx)

        parent_block_output_names = set()
        for op in reversed(block.ops):
            if _some_in_set_(op.desc.output_arg_names(), current_output_names):
                for name in op.desc.input_arg_names():
                    current_output_names.add(name)
2211 2212 2213
                    if not block.desc.find_var(
                        name.encode()
                    ) and parent_block.desc.find_var(name.encode()):
2214 2215 2216 2217 2218 2219 2220 2221
                        parent_block_output_names.add(name)

        block = parent_block
        current_output_names = parent_block_output_names

    return current_output_names


2222 2223 2224
def _find_no_grad_vars(block, op_path, targets, no_grad_set):
    """
    Find the vars which is not used in the program, and
2225
    those vars belong to no_grad_var.
2226
    """
2227
    output_names = _get_output_names(block, targets)
2228 2229 2230 2231 2232
    no_grad_var = []
    for i, op in reversed(list(enumerate(op_path))):
        # If the op has sub_block, it is too complicated to find the correct no_grad_var.
        if not op.has_attr("sub_block"):
            for out_var in op.desc.output_arg_names():
2233 2234 2235 2236 2237
                if (
                    out_var not in output_names
                    and out_var not in op.desc.input_arg_names()
                    and not block.vars[out_var].stop_gradient
                ):
2238 2239 2240 2241 2242 2243 2244
                    no_grad_var.append(out_var)
        for name in op.desc.input_arg_names():
            if name not in no_grad_set:
                output_names.add(name)
    return set(no_grad_var)


2245 2246 2247
def _find_op_path_(
    block, targets, inputs, no_grad_set, op_path_dict=None, is_while=False
):
2248
    """
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
    It is used to find the grad path in `block`.

    Args:
        block(Block): The block in which to get op path.
        targets(list[Variable]): The target variables.
        inputs(list[Variable]): The input variables.
        no_grad_set(set): The set of no grad var name. no_grad_set will be changed.
        op_path_dict(dict): op_path_dict will be changed. op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
        is_while(bool): Whether or not `block` is while block
    Return:
        The forward op path of block corresponding to backward op.
2262
    """
2263

2264
    input_names = set([inp.name for inp in inputs])
2265 2266 2267
    output_names = _get_output_names(block, targets)
    if op_path_dict is None:
        op_path_dict = dict()
2268 2269 2270 2271 2272 2273

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
2274 2275 2276
            if _some_in_set_(
                op.desc.input_arg_names(), input_names
            ) and core.has_non_empty_grad_op_maker(op.type):
2277 2278 2279 2280 2281 2282 2283
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
2284 2285 2286 2287
        if op.has_attr("sub_block"):
            sub_block_id = op._block_attr_id("sub_block")
            sub_block = block.program.block(sub_block_id)
            sub_block_target_names = output_names & set(op.output_arg_names)
2288 2289 2290
            sub_block_path = _get_sub_block_path(
                sub_block, op, set(), op_path_dict, sub_block_target_names
            )
2291 2292
            op_path_dict[sub_block_id] = sub_block_path

2293 2294 2295
        if _some_in_set_(
            op.desc.output_arg_names(), output_names
        ) and core.has_non_empty_grad_op_maker(op.type):
2296 2297 2298 2299 2300 2301
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

2302 2303 2304 2305
    if is_while:
        # If block is while block, dealing with op specifically again.
        # TODO(liym27): Consider special types of ops.
        for i, op in reversed(list(enumerate(block.ops))):
2306 2307 2308
            if relevant_op_flags[i] == False and _some_in_set_(
                op.desc.output_arg_names(), output_names
            ):
2309
                relevant_op_flags[i] = True
H
hong 已提交
2310 2311 2312 2313
                if core.has_non_empty_grad_op_maker(op.type):
                    for name in op.desc.input_arg_names():
                        if name not in no_grad_set:
                            output_names.add(name)
2314

2315 2316 2317 2318 2319 2320 2321
    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
2322
                if name not in input_names and block.vars[name].stop_gradient:
2323 2324 2325 2326 2327 2328 2329
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
2330
    Backpropagate the gradients of targets to inputs.
2331 2332

    Args:
2333 2334 2335
        targets(Tensor|list[Tensor]|tuple[Tensor]): The target Tensors
        inputs(Tensor|list[Tensor]|tuple[Tensor]): The input Tensors
        target_gradients (Tensor|list[Tensor]|tuple[Tensor], optional): The gradient Tensors
2336 2337
            of targets which has the same shape with targets, If None, ones will
            be created for them.
2338 2339
        no_grad_set(set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All Tensors with
2340 2341
                               `stop_gradient=True` from all blocks will
                               be automatically added into this set.
2342
                               If this parameter is not None, the Tensors or Tensor.names in this set will be added to the default set.
2343
                               Default: None.
2344 2345

    Return:
2346 2347
        (list[Tensor]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient Tensor
2348 2349 2350 2351 2352 2353 2354 2355
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
2356 2357
    # increase appending gradients times
    prog._appending_grad_times += 1
2358 2359 2360 2361 2362 2363 2364
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
2365 2366
            "Should have the same number of target_gradients as targets"
        )
2367 2368 2369

    if no_grad_set is None:
        no_grad_set = set()
2370 2371
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
2372
    no_grad_dict = _get_stop_gradients_(prog)
2373
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
2374 2375 2376

    fwd_op_num = block.desc.op_size()

2377 2378
    input_grad_names_set = set()

2379
    target_grad_map = {}
2380
    rename_var_map = {}
2381
    skip_rename_var_list = []
2382 2383
    for i, grad in enumerate(target_gradients):
        target = targets[i]
2384
        grad_name = _append_grad_suffix_(target.name)
2385
        if grad is None:
2386
            op_desc = _create_op_desc_(
2387 2388
                "fill_any_like",
                {"X": [target.name]},
2389 2390 2391 2392 2393 2394
                {"Out": [grad_name]},
                {
                    "value": 1.0,
                    "dtype": target.dtype,
                },
            )
2395
            block.desc.append_op().copy_from(op_desc)
2396
            block.program._sync_with_cpp()
2397
            input_grad_names_set.add(grad_name)
2398
            skip_rename_var_list.append(grad_name)
2399 2400 2401 2402 2403
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
2404 2405 2406
                    "The shapes of target and grad are different: %s %s"
                    % (target.name, grad.name)
                )
2407
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name
2408
            input_grad_names_set.add(grad.name)
2409
            rename_var_map[grad_name] = grad.name
2410

2411 2412 2413
    if core._is_bwd_prim_enabled():
        core._set_prim_target_grad_name(target_grad_map)

2414
    # For double backward, input_grad_names is used for filter
2415 2416
    # some non-used gradients op. rename_var_map is used to
    # associate target_grad var name with first grad_op input name.
2417 2418
    if prog._appending_grad_times == 1:
        input_grad_names_set = None
2419
        rename_var_map = {}
2420 2421 2422 2423 2424 2425

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
2426 2427

    op_path_dict = dict()
2428 2429 2430
    op_path = _find_op_path_(
        block, targets, inputs, block_no_grad_set, op_path_dict
    )
2431 2432

    # find no grad var by op_path
2433 2434 2435
    no_grad_vars = _find_no_grad_vars(
        block, op_path, targets, block_no_grad_set
    )
2436 2437
    block_no_grad_set.update(no_grad_vars)

2438
    no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set)))
2439 2440
    grad_to_var = dict()
    grad_info_map = dict()
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
    _append_backward_ops_(
        block,
        op_path,
        targets,
        block,
        no_grad_dict,
        grad_to_var,
        input_grad_names_set=input_grad_names_set,
        op_path_dict=op_path_dict,
        rename_var_map=rename_var_map,
    )
2452 2453 2454 2455

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
2456 2457 2458
    _rename_grad_(
        block, fwd_op_num, grad_to_var, target_grad_map, skip_rename_var_list
    )
2459 2460

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
W
Wu Yi 已提交
2461
    prog._sync_with_cpp()
2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars
2477 2478


2479
@framework.static_only
2480 2481
def gradients(targets, inputs, target_gradients=None, no_grad_set=None):
    """
T
tangwei12 已提交
2482

2483 2484 2485
    Backpropagate the gradients of targets to inputs.

    Args:
2486 2487 2488
        targets (Tensor|list[Tensor]|tuple[Tensor]): The target Tensors.
        inputs (Tensor|list[Tensor]|tuple[Tensor]): The input Tensors.
        target_gradients (Tensor|list[Tensor]|tuple[Tensor], optional): The gradient Tensor
2489 2490
            of targets which has the same shape with targets, If None, ones will
            be created for them.
2491 2492 2493
        no_grad_set (set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
            should be ignored. All Tensors with ``stop_gradient=True`` from all blocks will
            be automatically added into this set. If this parameter is not None, the Tensors or Tensor.names
2494
            in this set will be added to the default set. Default: None.
2495 2496

    Return:
2497 2498
        (list[Tensor]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient Tensor
2499 2500 2501
        will be None.

    Examples:
2502

2503
        .. code-block:: python
2504
          :name: code-example
2505 2506 2507 2508
            import paddle
            import paddle.nn.functional as F

            paddle.enable_static()
2509

2510
            x = paddle.static.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
2511
            x.stop_gradient=False
2512 2513 2514
            y = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False)
            y = F.relu(y)
            z = paddle.static.gradients([y], x)
2515
            print(z) # [var x@GRAD : LOD_TENSOR.shape(-1, 2, 8, 8).dtype(float32).stop_gradient(False)]
2516
    """
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
    check_type(
        targets,
        'targets',
        (framework.Variable, list, tuple),
        'paddle.static.gradients',
    )
    check_type(
        inputs,
        'inputs',
        (framework.Variable, list, tuple),
        'paddle.static.gradients',
    )
    check_type(
        target_gradients,
        'target_gradients',
        (framework.Variable, list, tuple, type(None)),
        'paddle.static.gradients',
    )
2535 2536
    outs = calc_gradient(targets, inputs, target_gradients, no_grad_set)
    return _as_list(outs)
2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576


@framework.static_only
def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None):
    """
    :api_attr: Static Graph

    Backpropagate the gradients of the program and apply the gradients with the given optimizer.

    Args:
        program (Program): The input program.
        optimizer (Optimizer): The optimizer to apply the gradients.
        inputs (Tensor|list[Tensor]|tuple[Tensor], optional): The input Tensors.
            If None, the inputs will be created from the input variables in the given program. Default:None.
        outputs (Tensor|list[Tensor]|tuple[Tensor], optional): The output Tensors.
            If None, the outputs will be created from the output variables in the given program. Default: None.

    Return:
        tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by gradients_with_optimizer and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
            ``fetch_list`` before run, see details in ``Executor``.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.static as static

            paddle.enable_static()

            img = static.data(name='image', shape=[None, 784])
            pred = static.nn.fc(x=img, size=10, activation='relu')
            loss = paddle.mean(pred)
            opt_ops, pram_grads = paddle.fluid.backward.gradients_with_optimizer(static.default_main_program(), opt)
            print(opt_ops)

    """
2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
    check_type(
        program,
        'program',
        paddle.fluid.Program,
        'paddle.static.gradients_with_optimizer',
    )
    check_type(
        optimizer,
        'optimizer',
        paddle.optimizer.Optimizer,
        'paddle.static.gradients_with_optimizer',
    )
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606

    if inputs is None or outputs is None:
        in_set = set()
        out_set = set()
        for block in program.blocks:
            for op in block.ops:
                for name in op.input_arg_names:
                    in_set.add(block.vars[name])
                for name in op.output_arg_names:
                    out_set.add(block.vars[name])
        if inputs is None:
            inputs = list(in_set.difference(out_set))
        if outputs is None:
            outputs = list(out_set.difference(in_set))

    grads = gradients(outputs, inputs)

    with program_guard(program, None):
2607 2608 2609 2610 2611 2612
        pram_grads = [
            (pram, grad)
            for pram, grad in zip(inputs, grads)
            if isinstance(pram, paddle.fluid.framework.Parameter)
            and grad is not None
        ]
2613 2614 2615 2616

        optimize_ops = optimizer.apply_gradients(pram_grads)

    return optimize_ops, pram_grads