backward.py 69.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
from paddle.fluid import framework as framework
F
update  
fengjiayi 已提交
18
from . import core
F
update  
fengjiayi 已提交
19
import collections
20
import copy
21
import six
22
import logging
M
minqiyang 已提交
23
from .. import compat as cpt
24
from . import unique_name
25
from . import log_helper
L
liym27 已提交
26
import paddle.fluid
27
from .data_feeder import check_type
M
mapingshuo 已提交
28 29 30 31 32
__all__ = [
    'append_backward',
    'gradients',
]

33 34 35
_logger = log_helper.get_logger(
    __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')

M
mapingshuo 已提交
36 37 38 39 40 41 42 43 44 45 46

class ProgramStats(object):
    def __init__(self, block, ops):
        self.block = block
        self.ops = ops
        self.op_deps = {}  # op-> in_ops, out_ops
        self.var_op_deps = {}  # var as input op, var as output op

    def get_input_nodes(self):
        input_names = []
        for name in self.var_op_deps:
47
            if len(self.var_op_deps[name]["var_as_output_ops"]) == 0 and \
M
mapingshuo 已提交
48 49 50 51 52 53 54 55 56 57 58 59
               len(self.var_op_deps[name]["var_as_input_ops"]) > 0:
                if self.block.var(name).persistable:
                    continue
                input_names.append(name)
        for op in self.ops:
            if op.desc.type() == "read":
                input_names.extend(op.desc.output_arg_names())
        return input_names

    def get_reserved_vars(self):
        var_name = []
        for op in self.ops:
M
mapingshuo 已提交
60
            if op.desc.type() == "seed":
M
mapingshuo 已提交
61 62 63 64 65 66 67 68 69 70 71
                var_name.extend(op.desc.output_arg_names())
        return var_name

    def get_out_of_subgraph_vars(self, begin_op_idx, end_op_idx):
        var_name = []
        for i in range(begin_op_idx, end_op_idx, 1):
            for name in self.ops[i].desc.output_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_input_ops"]:
                        if idx >= end_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
72 73 74 75 76
            for name in self.ops[i].desc.input_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_output_ops"]:
                        if idx < begin_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
        return var_name

    def is_subgraph(self, var_group1, var_group2):
        # should traverse from var_group1 to var_group2
        # max op idx in var_group2
        # min op idx in var_group1
        min_op_idx = len(self.ops)
        max_op_idx = -1
        for name in var_group1:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group2:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group1:
            op_idx = self.var_op_deps[name]["var_as_input_ops"]
            for idx in op_idx:
                min_op_idx = min(min_op_idx, idx)
        for name in var_group2:
            op_idx = self.var_op_deps[name]["var_as_output_ops"]
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if min_op_idx >= max_op_idx:
            return False, min_op_idx, max_op_idx
        return True, min_op_idx, max_op_idx

    def build_stats(self):
        for i, op in enumerate(self.ops):
            self.op_deps[i] = {"in_ops": [], "out_ops": []}
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
                    self.op_deps[i]["in_ops"].extend(self.var_op_deps[name][
                        "var_as_output_ops"])
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_input_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = [i]
                    self.var_op_deps[name]["var_as_output_ops"] = []

            for j, name in enumerate(op.desc.output_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_output_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = []
                    self.var_op_deps[name]["var_as_output_ops"] = [i]

            for op_idx in self.op_deps[i]["in_ops"]:
                self.op_deps[op_idx]["out_ops"].extend([i])

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
    def sort_checkpoints(self, checkpoints_name):
        sorted_checkpoints = []
        for name in checkpoints_name:
            if name not in self.var_op_deps:
                _logger.debug(
                    "Recompute Optimizer: deleted %s from checkpoints, because it is not used in paddle program."
                    % name)
            elif self.var_op_deps[name]["var_as_output_ops"] == []:
                # input nodes
                sorted_checkpoints.append((name, -1))
            else:
                sorted_checkpoints.append(
                    (name, max(self.var_op_deps[name]["var_as_output_ops"])))
        sorted_checkpoints = sorted(sorted_checkpoints, key=lambda x: x[1])
        return [x[0] for x in sorted_checkpoints]

M
mapingshuo 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    def modify_forward_desc_for_recompute(self):
        op_types = [op.desc.type() for op in self.ops]
        if "dropout" not in op_types:
            return

        op_idx = 0
        while (op_idx < len(self.ops)):
            op = self.ops[op_idx]
            if op.desc.type() != "dropout":
                op_idx += 1
                continue
            # add a seed op so that the two dropout op can generate same output
            op_unique_name = unique_name.generate("seed")
            var_unique_name = unique_name.generate_with_ignorable_key(".".join(
                [op_unique_name, 'tmp']))
            added_var = self.block.create_var(
                name=var_unique_name,
                dtype='int32',
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
                stop_gradient=False)
            seed = 0 if op.attr("fix_seed") is False else int(op.attr("seed"))
            added_op = self.block._insert_op(
                index=op.idx,
                type='seed',
                inputs={},
                outputs={'Out': [added_var]},
                attrs={'seed': seed})
            self.ops.insert(op_idx, added_op)
            # modify dropout op desc so that it accept a seed var as input
            op.desc.set_input("Seed", [var_unique_name])
            op.desc.remove_attr("fix_seed")
            op.desc.remove_attr("seed")
            self.block._sync_with_cpp()
            op_idx += 2

M
mapingshuo 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

def _pretty_op_desc_(op_desc, prefix):
    out_s = "%s\tname:[%s]\n%s    \tinputs:[%s]\n%s    \toutputs:[%s]" % \
            (prefix + "_op", str(op_desc.type()), prefix + "_input", " ".join(op_desc.input_arg_names()),
             prefix + "_output", " ".join(op_desc.output_arg_names()))
    return out_s


def _add_needed_descs_to_block(descs, block, main_block, in_memory_vars):
    if len(descs) == 0:
        return []
    result_descs = []
    op_role_attr_name = \
            core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
        if isinstance(desc, framework.Operator):
            desc = desc.desc
        if isinstance(desc, tuple):
            desc = desc[0]
        is_needed = False
        for name in desc.output_arg_names():
            if main_block.has_var(name) and main_block.var(name).persistable:
                continue
            if name not in in_memory_vars:
                is_needed = True
        if is_needed:
            new_op_desc = block.desc.append_op()
            new_op_desc.copy_from(desc)
            new_op_desc._set_attr(op_role_attr_name, backward)
            result_descs.append(new_op_desc)
    return result_descs


def _add_descs_to_block(descs, block):
    if len(descs) == 0:
        return []
    result_descs = []
    op_role_attr_name = \
        core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
        if isinstance(desc, framework.Operator):
            desc = desc.desc
        if isinstance(desc, tuple):
            desc = desc[0]
        new_op_desc = block.desc.append_op()
        new_op_desc.copy_from(desc)
        new_op_desc._set_attr(op_role_attr_name, backward)
        result_descs.append(new_op_desc)
    return result_descs


def _find_loss_op_(loss):
    for op in reversed(loss.block.ops):
        assert isinstance(op, framework.Operator)
        if len(op.output_arg_names) == 1 and op.output_arg_names[
                0] == loss.name:
            loss.op = op
            break
    if loss.op is None:
        raise ValueError("loss.op is None. Should not happend")
243 244


245 246
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
247
    Traverse all ops in op_descs[begin_idx : end_idx],
248 249
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
250 251 252
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
253
        end_idx = len(op_descs)
F
update  
fengjiayi 已提交
254
    for i in range(begin_idx, end_idx):
255
        op_desc = op_descs[i]
F
fengjiayi 已提交
256 257
        if isinstance(op_desc, tuple):
            op_desc = op_desc[0]
W
Wu Yi 已提交
258 259
        op_desc._rename_input(old_name, new_name)
        op_desc._rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
260 261


F
fengjiayi 已提交
262
def _create_op_desc_(op_type, inputs, outputs, attrs):
263 264 265
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
266 267
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
M
minqiyang 已提交
268
    for para, args in six.iteritems(inputs):
269 270 271 272 273
        op_desc.set_input(
            para,
            list(
                map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg,
                    args)))
M
minqiyang 已提交
274
    for para, args in six.iteritems(outputs):
275 276 277 278 279
        op_desc.set_output(
            para,
            list(
                map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg,
                    args)))
Y
yuyang18 已提交
280 281

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
282
    op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
Y
yuyang18 已提交
283 284 285 286

    if op_role_attr_name not in attrs:
        attrs[
            op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
287 288
    if op_device_attr_name not in attrs:
        attrs[op_device_attr_name] = ""
M
minqiyang 已提交
289
    for name, val in six.iteritems(attrs):
F
fengjiayi 已提交
290 291 292
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
W
Wu Yi 已提交
293
            op_desc._set_attr(name, val)
F
fengjiayi 已提交
294 295 296
    return op_desc


M
mapingshuo 已提交
297 298 299 300 301 302 303 304 305 306
def _create_loss_op_desc_(loss):
    op_desc = _create_op_desc_(
        "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
            "shape": [1],
            "value": 1.0,
            "dtype": loss.dtype,
            "force_cpu": False,
            core.op_proto_and_checker_maker.kOpRoleAttrName():
            int(core.op_proto_and_checker_maker.OpRole.Backward) |
            int(core.op_proto_and_checker_maker.OpRole.Loss),
307 308
            core.op_proto_and_checker_maker.kOpDeviceAttrName():
            loss.op.attr(core.op_proto_and_checker_maker.kOpDeviceAttrName())
M
mapingshuo 已提交
309 310 311 312
        })
    return op_desc


313
def _infer_var_data_type_shape_(grad_var_name, block):
314
    """
315
    Infer the data type and shape of given grad variable
316
    """
M
minqiyang 已提交
317 318 319 320
    grad_var = block.desc.find_var(cpt.to_bytes(grad_var_name))
    fwd_name = _strip_grad_suffix_(grad_var_name)
    if block.desc.has_var_recursive(cpt.to_bytes(fwd_name)):
        fwd_var = block.desc.find_var_recursive(cpt.to_bytes(fwd_name))
F
fengjiayi 已提交
321
        grad_var.set_dtype(fwd_var.dtype())
322
        grad_var.set_shape(fwd_var.shape())
F
fengjiayi 已提交
323
    else:
324
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
325 326


F
fengjiayi 已提交
327
def _all_in_set_(cands, s):
328 329 330
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
331 332
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
333 334 335 336 337 338
    for c in cands:
        if not c in s:
            return False
    return True


339 340 341 342 343 344
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
M
minqiyang 已提交
345 346
    literal_set = cpt.to_text(s)
    literal_cands = cpt.to_text(cands)
M
minqiyang 已提交
347 348
    for c in literal_cands:
        if c in literal_set:
349 350 351 352
            return True
    return False


F
fengjiayi 已提交
353
def _strip_grad_suffix_(name):
354
    """
M
mapingshuo 已提交
355
    Strip the grad suffix from the given variable name
356 357 358
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
M
minqiyang 已提交
359
    name = cpt.to_text(name)
M
minqiyang 已提交
360
    pos = name.find(core.grad_var_suffix())
F
fengjiayi 已提交
361
    return name[:pos] if pos != -1 else name
F
fengjiayi 已提交
362 363 364


def _append_grad_suffix_(name):
365 366 367 368
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
M
minqiyang 已提交
369
    return cpt.to_text(name) + core.grad_var_suffix()
F
fengjiayi 已提交
370 371


372
def _addup_repetitive_outputs_(op_descs, block_idx):
373 374
    """
    In backward part, an variable may be the output of more than one ops.
F
fengjiayi 已提交
375 376
    And one op may yield its multiple outputs to the same variable.
    In these cases, the variable should be the accumulation of all the outputs.
377 378
    `sum_op`s are added to implement the accumulate.
    """
F
update  
fengjiayi 已提交
379 380
    pending_sum_ops = []
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
381
    renamed_vars = collections.defaultdict(list)
382
    renamed_var_start_idx = collections.defaultdict(list)
F
fengjiayi 已提交
383
    for idx, op_desc in enumerate(op_descs):
F
update  
fengjiayi 已提交
384
        for var_name in op_desc.input_arg_names():
M
mapingshuo 已提交
385 386
            if "@GRAD" not in var_name:
                continue
F
fengjiayi 已提交
387
            if len(renamed_vars[var_name]) > 1:
388 389 390
                pending_sum_ops.append((_create_op_desc_(
                    "sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]},
                    {"use_mkldnn": False}), idx))
F
fengjiayi 已提交
391
                renamed_vars[var_name] = [var_name]
F
update  
fengjiayi 已提交
392
        for param_idx, param_name in enumerate(op_desc.output_names()):
F
fengjiayi 已提交
393 394
            arg_names = op_desc.output(param_name)
            for arg_idx, var_name in enumerate(arg_names):
M
mapingshuo 已提交
395 396 397 398
                if "@GRAD" not in var_name:
                    continue
                #if "@RENAME@" in var_name:
                #    continue
F
fengjiayi 已提交
399 400 401 402 403 404 405
                if var_name == core.empty_var_name(
                ) or var_name in op_desc.input_arg_names():
                    # empty variable or inplace op
                    continue
                if len(renamed_vars[var_name]) == 0:
                    # it's the first time we get the variable
                    renamed_vars[var_name] = [var_name]
406
                    renamed_var_start_idx[var_name] = idx
F
fengjiayi 已提交
407 408
                else:
                    if len(renamed_vars[var_name]) == 1:
409
                        new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \
F
fengjiayi 已提交
410 411 412 413
                            str(var_rename_count[var_name])
                        var_rename_count[var_name] += 1
                        # rename original var_name
                        renamed_vars[var_name][0] = new_name
414 415 416 417 418 419
                        # before change: _rename_arg_(op_descs, var_name,
                        #                             new_name, 0, idx)
                        # rename arg from idx of the first appearance
                        # in backward, not always from 0
                        _rename_arg_(op_descs, var_name, new_name,
                                     renamed_var_start_idx[var_name], idx)
F
fengjiayi 已提交
420 421
                        _rename_arg_(pending_sum_ops, var_name, new_name)

F
update  
fengjiayi 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434
                        for p in op_desc.output_names()[:param_idx]:
                            p_arg_names = op_desc.output(p)
                            if var_name in p_arg_names:
                                op_desc.set_output(p, [
                                    new_name if x == var_name else x
                                    for x in p_arg_names
                                ])

                        arg_names = [
                            new_name if x == var_name else x
                            for x in arg_names[:arg_idx]
                        ] + arg_names[arg_idx:]

435
                    new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \
F
fengjiayi 已提交
436
                        str(var_rename_count[var_name])
F
fengjiayi 已提交
437
                    var_rename_count[var_name] += 1
F
fengjiayi 已提交
438 439 440
                    arg_names[arg_idx] = new_name
                    op_desc.set_output(param_name, arg_names)
                    renamed_vars[var_name].append(new_name)
F
update  
fengjiayi 已提交
441

M
minqiyang 已提交
442
    for var_name, inputs in six.iteritems(renamed_vars):
F
update  
fengjiayi 已提交
443
        if len(inputs) > 1:
444 445 446
            pending_sum_ops.append(
                (_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]},
                                  {"use_mkldnn": False}), len(op_descs)))
F
fengjiayi 已提交
447
    # sum_op descs are sorted according to their insert position
F
update  
fengjiayi 已提交
448
    for p in reversed(pending_sum_ops):
F
fengjiayi 已提交
449 450 451 452 453 454
        op_descs.insert(p[1], p[0])

    return op_descs


def _remove_no_grad_branch_(op_descs, no_grad_set):
455 456 457 458
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
459
        2. all grad inputs of the grad op are in 'no_grad_set'
460
    """
F
fengjiayi 已提交
461 462

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
463 464
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
465
            return True
466 467 468 469
        if _all_in_set_([
                name for name in op_desc.input_arg_names()
                if name.find(core.grad_var_suffix()) != -1
        ], no_grad_set):
F
fengjiayi 已提交
470
            no_grad_set.update(out_arg_names)
F
fengjiayi 已提交
471 472 473
            return True
        return False

F
fengjiayi 已提交
474
    # Remove ops whose outputs are all in no_grad_dict
475 476 477 478
    op_descs = [
        op_desc for op_desc in op_descs
        if not _op_can_be_removed_(op_desc, no_grad_set)
    ]
F
fengjiayi 已提交
479 480
    # Insert fill_zeros_like_op
    to_insert = []
F
fengjiayi 已提交
481
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
482
        for arg in op_desc.input_arg_names():
M
mapingshuo 已提交
483
            # arg is a gradient var name and arg should not have gradient
F
fengjiayi 已提交
484
            if core.grad_var_suffix() in arg and arg in no_grad_set:
485
                x_in = _strip_grad_suffix_(arg)
M
mapingshuo 已提交
486 487
                # the reason should be: arg can be input of another grad op
                # and the op is a not-to-remove op
488 489
                to_insert.append((_create_op_desc_(
                    "fill_zeros_like", {"X": [x_in]}, {"Out": [arg]}, {}), idx))
F
fengjiayi 已提交
490

491
    list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)])
F
fengjiayi 已提交
492 493 494 495

    return op_descs


C
chengduo 已提交
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
    """
    Pruning Program with Structural Analysis Method of Computational Graph.
    The nodes of the computational graph composed of backward OPS should be
    interconnected. If there are unconnected sub-graphs in the computational graph,
    these sub-graphs should be cut off.

    Args:
        grad_op_descs(list[core.OpDesc]): The candidate backward OpDescs.
        forward_ops(list[Operator]): The forward ops.
        input_grad_names_set(set): this set is used to store the gradients' name
            which is generated by backward ops, and input_grad_names_set can help
            to prune the unnecessary backward ops.

    Return:
511
        (set[core.OpDesc]): A set of OpDescs which should be pruned.
C
chengduo 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
    """

    class Var(object):
        def __init__(self, var_name):
            self.var_name = var_name
            self.gen_op = None
            self.pendding_ops = []

        def set_gen_op(self, gen_op):
            assert isinstance(gen_op, Op)
            assert self.gen_op is None
            self.gen_op = gen_op

        def add_pending_op(self, op):
            assert isinstance(op, Op)
            self.pendding_ops.append(op)

    class Op(object):
        def __init__(self, op_desc):
            self.op_desc = op_desc
            self.inputs = []
            self.outputs = []

        def insert_input(self, var):
            assert isinstance(var, Var)
            self.inputs.append(var)

        def insert_output(self, var):
            assert isinstance(var, Var)
            self.outputs.append(var)

    var_versions = dict()

    def _create_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        else:
            var_versions[name].append(Var(name))
        return var_versions[name][-1]

    def _create_or_get_last_version_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        return var_versions[name][-1]

    def _create_op_node(op_desc):
        op_node = Op(op_desc)
        for input in op_desc.input_arg_names():
            var = _create_or_get_last_version_node(name=input)
            var.add_pending_op(op_node)
            op_node.insert_input(var)
        for output in op_desc.output_arg_names():
            var = _create_node(name=output)
            var.set_gen_op(op_node)
            op_node.insert_output(var)
        return op_node

    # Record the forward vars
    forward_vars_set = set() if input_grad_names_set is None else set(
        input_grad_names_set)
    for op in forward_ops:
        forward_vars_set.update(op.desc.input_arg_names())
        forward_vars_set.update(op.desc.output_arg_names())

    # Record the vars which are created during backward and is not generated by op.
    backward_vars_set = set()
    # special_op_nodes is the candidate sub-graph head node.
    special_op_nodes = set()
    for op_desc in grad_op_descs:
        input_set = set(op_desc.input_arg_names())
        # The new_vars are created during backward and is not generated by op.
        new_vars = input_set - forward_vars_set - backward_vars_set
        backward_vars_set.update(op_desc.output_arg_names())

        op_node = _create_op_node(op_desc)
        if len(new_vars) == len(input_set):
            special_op_nodes.add(op_node)

    not_need_op_descs = []
    # Start traversing all candidate sub-graph headers to check whether
    # they are connected to backward computational graphs, and if they are
    # not, list them in not_need_op_descs
    for special_op_node in special_op_nodes:
        op_list = [special_op_node]
        ready_vars = set(special_op_node.inputs)
        remove_ops = True
        candidate_ops = [special_op_node]
        while len(candidate_ops) > 0:
            op_node = candidate_ops.pop(0)
            if _all_in_set_(op_node.inputs, ready_vars):
                for out_var in op_node.outputs:
                    candidate_ops.extend(out_var.pendding_ops)
                    op_list.extend(out_var.pendding_ops)
                ready_vars.update(op_node.outputs)
            else:
                remove_ops = False
                break
        if remove_ops:
            not_need_op_descs.extend([node.op_desc for node in op_list])
611 612 613 614 615 616 617
    not_need_op_descs_set = set(not_need_op_descs)
    grad_op_descs_set = set(grad_op_descs)
    # If a backward computational graph is simply one sub-graph header, the
    # not_need_op_descs will be whole graph, this IF clause avoids it. 
    if grad_op_descs_set == not_need_op_descs_set:
        return set()
    return not_need_op_descs_set
C
chengduo 已提交
618 619


620
from .proto import framework_pb2
Y
Yang Yang 已提交
621 622 623 624


def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
M
minqiyang 已提交
625
    proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
Y
Yang Yang 已提交
626 627 628
    return proto.__str__()


M
mapingshuo 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
def _append_backward_ops_with_checkpoints_(
        block, ops, target_block, no_grad_dict, grad_to_var, checkpoints):
    """
    Create grad ops with forward ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
        ops(Op): the forward operators whose forward recomputation backward ops need to be added
        target_block(Block): the block which is going to hold new generated grad ops
        no_grad_dict(dict):
            key(int) block index
            val(str): corresponding forward variable name
        checkpoints: variables that a user defined as checkpoint for forward recomputation

    Algorithms:
M
mapingshuo 已提交
644
        0) deal with forward recomputing program descs
M
mapingshuo 已提交
645 646 647 648 649
        1) find ops between checkpoints, i.e. recompute_segments
        2) go through all forward ops and induct all variables that will be hold in memory
            a. variables that are used across segments will be held in memory
            b. output of dropout op will be held in memory
            c. input variables will be held in memory
M
mapingshuo 已提交
650 651 652
        3) go through each recompute_segments, add backward ops with forward recomputation
            a. add ops in current recompute_segment as forward recomputation ops
            b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
653 654
            c. add backward ops of current recomputation ops
            d. add sum op for repetitive_outputs
M
mapingshuo 已提交
655 656
        4) remove no grad branch as it is in _remove_no_grad_branch_
        5) Note1: all appended ops' OpRole are Backward
M
mapingshuo 已提交
657 658
        6) Note2: all variables with new name should be returned so that _append_backward_vars_ can be called
        7) Note3: current forward recomputation backpropagation does not handle programs with subblock
M
mapingshuo 已提交
659
    """
M
mapingshuo 已提交
660 661

    checkpoints_name = [x.name for x in checkpoints]
662
    checkpoints_name = list(set(checkpoints_name))
M
mapingshuo 已提交
663 664
    local_block = block.program._create_block()
    buffer_block = block.program._create_block()
M
mapingshuo 已提交
665
    # 0) deal with forward recomputing program descs  
M
mapingshuo 已提交
666
    program_stat = ProgramStats(block, ops)
M
mapingshuo 已提交
667
    program_stat.modify_forward_desc_for_recompute()
M
mapingshuo 已提交
668
    program_stat.build_stats()
M
mapingshuo 已提交
669 670

    # 1) find ops between checkpoints, i.e. recompute_segments
671
    checkpoints_name = program_stat.sort_checkpoints(checkpoints_name)
M
mapingshuo 已提交
672 673
    segments = []

674
    if len(checkpoints_name) == 1:
M
mapingshuo 已提交
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
        # only one checkpoint
        max_op_idx = -1
        var_group = [checkpoints_name[0]]
        for name in var_group:
            if name not in program_stat.var_op_deps:
                break
            op_idx = program_stat.var_op_deps[name]["var_as_output_ops"]
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if max_op_idx > 0:
            segments.append([0, max_op_idx + 1])
    else:
        start_idx = 0
        while True:
            if start_idx >= len(checkpoints_name) - 1:
                break
            flag, min_idx, max_idx = program_stat.is_subgraph(
                [checkpoints_name[start_idx]],
                [checkpoints_name[start_idx + 1]])
            if flag:
                segments.append([min_idx, max_idx + 1])
            start_idx += 1

    if segments != [] and segments[0][0] != 0:
        recompute_segments = [[0, segments[0][0]]] + segments
    else:
        recompute_segments = segments
M
mapingshuo 已提交
702 703

    # 2) go through all forward ops and induct all variables that will be hold in memory
M
mapingshuo 已提交
704
    vars_should_be_hold = []
705
    # a. variables that are used across segments will be held in memory
M
mapingshuo 已提交
706 707 708
    for segment in recompute_segments:
        vars_should_be_hold.extend(
            program_stat.get_out_of_subgraph_vars(segment[0], segment[1]))
M
mapingshuo 已提交
709
    # b. output of seed op should be kept in memory
M
mapingshuo 已提交
710
    vars_should_be_hold.extend(program_stat.get_reserved_vars())
M
mapingshuo 已提交
711
    # c. input variables are checkpoints
M
mapingshuo 已提交
712 713 714
    vars_should_be_hold.extend(program_stat.get_input_nodes())
    vars_should_be_hold = list(set(vars_should_be_hold))

M
mapingshuo 已提交
715
    # 3) go through each recompute_segments, add backward ops with forward recomputation
M
mapingshuo 已提交
716 717 718 719 720 721 722
    grad_op_descs = []
    var_name_dict = {}

    vars_in_memory = vars_should_be_hold + checkpoints_name

    max_calculated_op_position = len(ops)
    if recompute_segments == []:
723
        # if there is no recompute segment, add backward ops like
M
mapingshuo 已提交
724
        # _append_backward_ops_ function
M
mapingshuo 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
        gap_ops = ops[0:max_calculated_op_position]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op.desc, cpt.to_text(no_grad_dict[block.idx]), [])
            added_descs = _add_descs_to_block(grad_op_desc, local_block)
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

    for i, segment in enumerate(recompute_segments[::-1]):
        # add grad op for ops not in any segments
        gap_ops = ops[segment[1]:max_calculated_op_position]
        max_calculated_op_position = segment[0]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op.desc, cpt.to_text(no_grad_dict[block.idx]), [])
            added_descs = _add_descs_to_block(grad_op_desc, local_block)
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

        ff_ops = ops[segment[0]:segment[1]]
        var_suffix = ".subprog_%d" % i

        for op in ff_ops:
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            input_and_output_names = []
            input_and_output_names.extend(op.desc.input_arg_names())
            input_and_output_names.extend(op.desc.output_arg_names())
            for name in input_and_output_names:
                if block.var(name).persistable or name in checkpoints_name:
                    continue
                if name in vars_should_be_hold:
                    continue
                if name not in var_name_dict:
                    var_name_dict[name] = name + var_suffix
M
mapingshuo 已提交
770
        # 3.a. add ops in current recompute_segment as forward recomputation ops
M
mapingshuo 已提交
771 772 773 774
        buffer_descs = _add_needed_descs_to_block(ff_ops, buffer_block, block,
                                                  vars_in_memory)
        added_descs = _add_descs_to_block(ff_ops, local_block)

M
mapingshuo 已提交
775
        # 3.b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
776 777 778 779 780 781
        for key in var_name_dict:
            _rename_arg_(buffer_descs, key, var_name_dict[key])

        # added_descs should be in grad_op_descs because it is backward op desc
        grad_op_descs.extend(buffer_descs)

M
mapingshuo 已提交
782
        # 3.c. add backward ops of current recomputation ops
M
mapingshuo 已提交
783 784 785 786 787 788 789 790
        for op_desc in reversed(added_descs):
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op_desc, cpt.to_text(no_grad_dict[block.idx]), [])
            for key in var_name_dict:
                _rename_arg_(grad_op_desc, key, var_name_dict[key])
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)

M
mapingshuo 已提交
791
    # 3.d. add sum op for repetitive_outputs
792
    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs, block.idx)
M
mapingshuo 已提交
793
    # 4) remove no grad branch as it is in _remove_no_grad_branch_
M
mapingshuo 已提交
794 795 796 797 798 799
    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
    added_descs = _add_descs_to_block(grad_op_descs, target_block)
    return program_stat, checkpoints_name, vars_should_be_hold, recompute_segments


800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
def _get_sub_block_path(sub_block, sub_block_op_desc, no_grad_set):
    """
    Get output vars in subblock which will be assigned to parent block.
    It is used to find the grad path in subblock
    """
    assert sub_block_op_desc.has_attr(
        "sub_block") and sub_block.idx == sub_block_op_desc._block_attr_id(
            "sub_block")
    # TODO(huihuangzheng): add support for recurrent op and while op
    if sub_block_op_desc.type == "conditional_block":
        sub_outputs = []
        sub_assign_to_out_ops = []
        for var in sub_block_op_desc.output_arg_names:
            for op_desc in sub_block.ops:
                if op_desc.type == "assign" and var in op_desc.output_arg_names:
                    sub_assign_to_out_ops.append(op_desc)
816 817 818 819
                    for name in op_desc.input_arg_names:
                        if sub_block.has_var(name):
                            sub_outputs.append(sub_block.var(name))

820 821 822 823 824 825 826 827 828 829
        sub_block_op_path = _find_op_path_(sub_block, sub_outputs, [],
                                           no_grad_set)
        # TODO better way than finding in list
        for op_desc in sub_assign_to_out_ops:
            if op_desc not in sub_block_op_path:
                sub_block_op_path.append(op_desc)
        return sub_block_op_path
    return sub_block.ops


830 831
def _append_backward_ops_(block,
                          ops,
F
fengjiayi 已提交
832 833 834
                          target_block,
                          no_grad_dict,
                          grad_to_var,
835 836
                          callbacks=None,
                          input_grad_names_set=None):
837 838 839 840 841
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
842
        ops(Op): the forward operators whose backward ops need to be added
843
        target_block(Block): the block which is going to hold new generated grad ops
844
        no_grad_dict(dict):
845
            key(int)  block index
T
tianshuo78520a 已提交
846
            val(set) a set of variable names. These variables have no gradient
847 848 849
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
C
chengduo 已提交
850 851 852 853
        callbacks(callable object): a callable object used to decorate new generated grad ops
        input_grad_names_set(set): this set is used to store the gradients' name which is
            generated by backward ops, and input_grad_names_set can help to prune the unnecessary
            backward ops.
854
    """
Y
Yang Yang 已提交
855
    if callbacks is not None:
Y
Yang Yang 已提交
856 857 858 859
        assert (isinstance(callbacks, list))
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
860

F
fengjiayi 已提交
861
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
862 863
    grad_op_descs = []
    program = block.program
864 865

    # add grad_op_desc by reversed ops
866
    for op in reversed(ops):
F
fengjiayi 已提交
867 868 869
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
W
Wu Yi 已提交
870
            sub_block = program.block(op._block_attr_id("sub_block"))
W
Wu Yi 已提交
871
            grad_sub_block = program._create_block()
W
Wu Yi 已提交
872
            grad_sub_block._set_forward_block_idx(sub_block.idx)
873 874 875
            # see follwing comments for why set None here.
            pre_input_grad_names_set = copy.copy(input_grad_names_set)
            input_grad_names_set = None
876 877 878
            sub_block_path = _get_sub_block_path(sub_block, op,
                                                 no_grad_dict[sub_block.idx])
            _append_backward_ops_(sub_block, sub_block_path, grad_sub_block,
879 880 881
                                  no_grad_dict, grad_to_var, callbacks,
                                  input_grad_names_set)
            input_grad_names_set = pre_input_grad_names_set
Y
Yu Yang 已提交
882

W
Wu Yi 已提交
883
            program._rollback()
F
fengjiayi 已提交
884 885
            grad_sub_block_list.append(grad_sub_block.desc)

F
fengjiayi 已提交
886
        # Getting op's corresponding grad_op
F
fengjiayi 已提交
887
        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
M
minqiyang 已提交
888
            op.desc, cpt.to_text(no_grad_dict[block.idx]), grad_sub_block_list)
Y
Yang Yu 已提交
889

890 891
        # Set device for grad_op according to forward Op
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
892 893 894 895
        if op.desc.has_attr(device_attr_name):
            op_device = op.desc.attr(device_attr_name)
            for op_desc in grad_op_desc:
                op_desc._set_attr(device_attr_name, op_device)
896

897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
        # If input_grad_names_set is not None, extend grad_op_descs only when
        # any input grad in outputs of previous grad ops.
        # But this strategy is not suited for while op for some control flow,
        # for example, for while op, the grads maybe generated in next loop.
        if input_grad_names_set is not None:
            is_append_grad = False
            for op_desc in grad_op_desc:
                input_grad_names = [
                    name for name in op_desc.input_arg_names()
                    if name.find(core.grad_var_suffix()) != -1
                ]
                # some code of gradient ops, like increment, are not very
                # standard, there is no @GRAD in these ops' inputs.
                if len(input_grad_names) == 0:
                    is_append_grad = True
                    break

                if _some_in_set_(input_grad_names, input_grad_names_set):
                    grad_op_descs.append(op_desc)
                    is_append_grad = True
                    for name in op_desc.output_arg_names():
                        input_grad_names_set.add(name)
            if is_append_grad:
                grad_to_var.update(op_grad_to_var)
        else:
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)
F
fengjiayi 已提交
924

M
mapingshuo 已提交
925
    # sum parameter's gradients' var given multiple var gradient
926
    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs, block.idx)
F
fengjiayi 已提交
927

M
mapingshuo 已提交
928 929
    # if all outputs of the grad op are in no_grad_set, then just remove and fill zero
    # if all inputs of the grad op are in no_grad_set, just remove this op
F
fengjiayi 已提交
930 931
    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
F
fengjiayi 已提交
932

M
mapingshuo 已提交
933
    # remove some backward ops
C
chengduo 已提交
934
    not_need_ops = _find_not_need_ops(grad_op_descs, ops, input_grad_names_set)
M
mapingshuo 已提交
935

C
chengduo 已提交
936 937 938
    grad_op_descs = [
        op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops
    ]
939

F
fengjiayi 已提交
940
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
941 942
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
943
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
944 945
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
W
Wu Yi 已提交
946
        new_op_desc._set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
947
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
948 949 950 951
        if callbacks is not None:
            assert (isinstance(callbacks, list))
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
952

F
fengjiayi 已提交
953

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
def _is_grad_var_(var_name):
    return core.grad_var_suffix() in var_name


# Find the op who holds the sub_block as its "sub_block" attr
def _find_parent_op_(sub_block):
    sub_block_id = sub_block.idx

    if sub_block_id == 0:
        return None

    program = sub_block.program
    for block_id in six.moves.range(program.num_blocks):
        block_desc = program.block(block_id).desc
        for op_idx in six.moves.range(block_desc.op_size()):
            op = block_desc.op(op_idx)
            if op.has_attr("sub_block") and op._block_attr_id(
                    "sub_block") == sub_block_id:
                return op

    # NOTE(paddle-dev): When optimizer is added in conditional block, 
    # sub_block may not be found.
    return None


F
fengjiayi 已提交
979
def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
980 981 982 983 984 985 986 987 988 989 990 991
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
992
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
993
    """
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
    ops_to_remove = []
    '''
    NOTE(paddle-dev): while_grad op may hold some inputs which are not found 
    in the parent/forward block, and they are also the outputs of while_grad 
    op. These kinds of inputs are the recursive outputs inside while_grad op. 
    They should be considered as "already created" when scanning the inner 
    ops of while_grad ops.  
    '''
    parent_op = _find_parent_op_(block)
    parent_op_vars = []
    if parent_op is not None:
        input_args = parent_op.input_arg_names()
        output_args = parent_op.output_arg_names()
        for in_arg in input_args:
            if in_arg in output_args:
                parent_op_vars.append(in_arg)

F
fengjiayi 已提交
1011 1012 1013
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
W
Wu Yi 已提交
1014
            sub_block = block.program.block(op_desc._block_attr_id("sub_block"))
F
fengjiayi 已提交
1015
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058

        grad_var_ins = [
            var for var in op_desc.input_arg_names() if _is_grad_var_(var)
        ]
        grad_var_outs = [
            var for var in op_desc.output_arg_names() if _is_grad_var_(var)
        ]

        inputs = [
            var for var in op_desc.input_arg_names()
            if var != core.empty_var_name()
        ]
        outputs = [
            var for var in op_desc.output_arg_names()
            if var != core.empty_var_name()
        ]

        # If the outputs of grad op is empty, just remove it 
        if not outputs:
            ops_to_remove.append(op_idx)
            continue
        else:
            '''
            If the output is not empty and there is any grad input, find 
            whether there is any existing input. If not, just remove it.
            '''
            if grad_var_ins:
                existing_grad_var_ins = [
                    var for var in grad_var_ins
                    if block.desc.has_var_recursive(cpt.to_bytes(var)) or var in
                    parent_op_vars
                ]
                if not existing_grad_var_ins:
                    '''
                    FIXME(paddle-dev, zengjinle): rnn_memory_helper_grad is used
                    in recurrent op. The input of this op does not even exist in 
                    the program! Therefore, any dependency analysis would not 
                    work to this op! If I do not add the following code, this op
                    would be pruned, and the calculation result would be wrong. 
                    Maybe we should re-design this op later...  
                    '''
                    if op_desc.type() not in ['rnn_memory_helper_grad']:
                        ops_to_remove.append(op_idx)
1059
                        continue
1060

F
fengjiayi 已提交
1061 1062 1063
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
M
minqiyang 已提交
1064 1065
            if block.desc.has_var_recursive(cpt.to_bytes(
                    grad_var_name)) or grad_var_name == core.empty_var_name():
F
fengjiayi 已提交
1066
                continue
M
minqiyang 已提交
1067
            block.desc.var(cpt.to_bytes(grad_var_name))
F
fengjiayi 已提交
1068
            new_vars.add(grad_var_name)
1069
            if grad_var_name not in grad_to_var:
F
fengjiayi 已提交
1070 1071 1072 1073 1074
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
1075

F
fengjiayi 已提交
1076 1077
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
1078
                _infer_var_data_type_shape_(arg, block)
F
update  
fengjiayi 已提交
1079

1080 1081 1082
    for op_idx in reversed(ops_to_remove):
        block.desc._remove_op(op_idx, op_idx + 1)

F
update  
fengjiayi 已提交
1083

1084 1085 1086 1087 1088 1089
def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
W
Wu Yi 已提交
1090
                op_desc._rename_input(name, var_map[name])
1091 1092

        for name in op_desc.output_arg_names():
M
mapingshuo 已提交
1093 1094
            if "@GRAD" not in name:
                continue
1095
            if block.desc.find_var(name.encode("ascii")):
Y
Yu Yang 已提交
1096
                new_name = unique_name.generate(name)
W
Wu Yi 已提交
1097
                op_desc._rename_output(name, new_name)
1098 1099
                var_map[name] = new_name

M
minqiyang 已提交
1100
    for g, ng in six.iteritems(var_map):
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
1112
        for var in list(block.vars.values()):
1113 1114 1115 1116 1117 1118 1119
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
def _get_son_parent_block_idx_dict(program, current_block_idx):

    son_parent_block_idx_dict = collections.OrderedDict()
    while current_block_idx >= 0:
        parent_block_idx = program.block(current_block_idx).parent_idx
        son_parent_block_idx_dict[current_block_idx] = parent_block_idx
        current_block_idx = parent_block_idx

    return son_parent_block_idx_dict


1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
def _get_no_grad_set_name(no_grad_set):
    no_grad_set_name = set()
    if no_grad_set is not None:
        if isinstance(no_grad_set, (set, list, tuple)):
            for i, no_grad_var in enumerate(no_grad_set):
                if isinstance(no_grad_var, framework.Variable):
                    no_grad_set_name.add(no_grad_var.name)
                elif isinstance(no_grad_var, six.string_types):
                    no_grad_set_name.add(no_grad_var)
                else:
                    raise TypeError(
                        "The type of no_grad_set's member must be paddle.fluid.Variable or str, but received %s."
                        % (type(no_grad_var)))
        else:
            raise TypeError(
                "The type of no_grad_set should be set or list or tuple, but received {}".
                format(type(no_grad_set)))
    return no_grad_set_name


M
mapingshuo 已提交
1151 1152 1153 1154 1155
def append_backward(loss,
                    parameter_list=None,
                    no_grad_set=None,
                    callbacks=None,
                    checkpoints=None):
1156
    """
1157
    This function appends backward part to main_program.
F
fengjiayi 已提交
1158

1159 1160
    A complete neural network training is made up of forward and backward
    propagation. However, when we configure a network, we only need to
1161 1162
    specify its forward part. This function uses the chain rule to automatically
    generate the backward part according to the forward part.
F
fengjiayi 已提交
1163

1164 1165
    In most cases, users do not need to invoke this function manually.
    It will be automatically invoked by the optimizer's `minimize` function.
F
fengjiayi 已提交
1166

1167 1168
    Parameters:
        loss( :ref:`api_guide_Variable_en` ): The loss variable of the network.
1169 1170
        parameter_list(list[Variable|str], optional): List of Parameters or Parameter.names
                                           that need to be updated by optimizers.
1171
                                           If it is None, all parameters
F
fengjiayi 已提交
1172
                                           will be updated.
1173
                                           Default: None.
1174
        no_grad_set(set[Variable|str], optional): Set of Variables or Variable.names in the :ref:`api_guide_Block_en` 0 whose gradients
1175
                               should be ignored. All variables with
1176
                               `stop_gradient=True` from all blocks will
F
fengjiayi 已提交
1177
                               be automatically added into this set.
1178
                               If this parameter is not None, the Variables or Variable.names in this set will be added to the default set.
1179
                               Default: None.
1180
        callbacks(list[callable object], optional): List of callback functions.
1181
                                               The callbacks are used for
1182 1183 1184 1185 1186 1187
                                               doing some custom jobs during
                                               backward part building. All
                                               callable objects in it will
                                               be invoked once each time a
                                               new gradient operator is added
                                               into the program. The callable
Z
zhangchunle 已提交
1188
                                               object must have two input
1189
                                               parameters: 'block' and 'context'.
1190
                                               The 'block' is the :ref:`api_guide_Block_en` which
1191 1192 1193 1194
                                               the new gradient operator will
                                               be added to. The 'context' is a
                                               map, whose keys are gradient
                                               variable names and values are
1195
                                               corresponding original :ref:`api_guide_Variable_en` .
1196 1197 1198 1199 1200 1201
                                               In addition to this, the 'context'
                                               has another special key-value pair:
                                               the key is string '__current_op_desc__'
                                               and the value is the op_desc of the
                                               gradient operator who has just
                                               triggered the callable object.
1202
                                               Default: None.
F
fengjiayi 已提交
1203 1204

    Returns:
1205 1206
        list of tuple ( :ref:`api_guide_Variable_en` , :ref:`api_guide_Variable_en` ): Pairs of parameter and its corresponding gradients.
        The key is the parameter and the value is gradient variable.
F
fengjiayi 已提交
1207 1208 1209 1210 1211 1212 1213

    Raises:
        AssertionError: If `loss` is not an instance of Variable.

    Examples:
        .. code-block:: python

1214
            import paddle.fluid as fluid
L
lujun 已提交
1215

1216 1217 1218 1219
            x = fluid.data(name='x', shape=[None, 13], dtype='int64')
            y = fluid.data(name='y', shape=[None, 1], dtype='float32')
            x_emb = fluid.embedding(x, size=[100, 256])
            y_predict = fluid.layers.fc(input=x_emb, size=1, act=None, name='my_fc')
L
lujun 已提交
1220
            loss = fluid.layers.square_error_cost(input=y_predict, label=y)
F
fengjiayi 已提交
1221
            avg_loss = fluid.layers.mean(loss)
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248

            # Get all weights in main_program, not include bias.
            all_weights = [param for param in fluid.default_main_program().block(0).all_parameters() if 'w_' in param.name]
            all_weights_name = [w.name for w in all_weights]

            # return all param_grads needed to be updated if parameter_list set default None.
            p_g_list1 = fluid.backward.append_backward(loss=avg_loss)
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

            # return the param_grads corresponding to parameter_list that can be list of param (Variable).
            p_g_list2 = fluid.backward.append_backward(loss=avg_loss, parameter_list=all_weights)
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # parameter_list can be list of param.name (str).
            p_g_list3 = fluid.backward.append_backward(loss=avg_loss, parameter_list=all_weights_name)
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # no_grad_set can be set of Variables that means grad will be cut off from these Variables.
            p_g_list4 = fluid.backward.append_backward(loss=avg_loss, no_grad_set=set([x_emb]))
            # output: [(my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

            # no_grad_set can be set of Variable.name when the Variable is created inside layers and can't be specified explicitly.
            p_g_list5 = fluid.backward.append_backward(loss=avg_loss, no_grad_set=set(['my_fc.b_0']))
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # return [] because all param_grads are filtered by no_grad_set.
            p_g_list6 = fluid.backward.append_backward(loss=avg_loss, parameter_list=all_weights, no_grad_set=set(all_weights))
1249

1250
    """
1251 1252
    check_type(loss, 'loss', framework.Variable,
               'fluid.backward.append_backward')
Y
yuyang18 已提交
1253

Y
Fix bug  
yuyang18 已提交
1254 1255
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
M
mapingshuo 已提交
1256
        _find_loss_op_(loss)
Y
Fix bug  
yuyang18 已提交
1257

W
Wu Yi 已提交
1258 1259 1260
    loss.op._set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(),
                      int(core.op_proto_and_checker_maker.OpRole.Forward) |
                      int(core.op_proto_and_checker_maker.OpRole.Loss))
Y
yuyang18 已提交
1261

Y
Yang Yang 已提交
1262
    if callbacks is not None:
1263 1264
        check_type(callbacks, 'callbacks', list,
                   'fluid.backward.append_backward')
Y
Yu Yang 已提交
1265

F
fengjiayi 已提交
1266
    program = loss.block.program
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
    root_block = program.block(0)
    current_block_idx = program.current_block_idx
    current_block = program.block(current_block_idx)

    is_in_control_flow = current_block_idx != 0

    # Double grad is not supported in sub-block (control flow)
    if not is_in_control_flow:
        # _appending_grad_times used for double grad
        program._appending_grad_times += 1
1277

F
fengjiayi 已提交
1278
    if no_grad_set is None:
1279
        no_grad_set = set()
1280 1281
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
1282
    no_grad_dict = _get_stop_gradients_(program)
1283 1284
    # no_grad_set only contains vars in block 0
    # Todo(liym27): support vars in sub block
1285
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
Y
Yu Yang 已提交
1286

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
    # Currently it is only to support the optimizer.minimize
    # in a switch branch, which can append_backward in a sub_block.
    # Note: while_loop is in control flow, but it makes no sense to call optimizer in while.
    # Todo: report error when it is in while_loop
    if is_in_control_flow:
        # create grad block if in switch control flow.
        target_grad_block = program._create_block(
            parent_idx=current_block.parent_idx)
        target_grad_block._set_forward_block_idx(current_block_idx)
        # after _create_block, program.current_block changes
    else:
        target_grad_block = root_block

    son_parent_block_idx_dict = _get_son_parent_block_idx_dict(
        program, current_block_idx)

    block_fwd_op_num_dict = {}  # block_id: fwd_op_num
    for idx in son_parent_block_idx_dict:
        block_fwd_op_num_dict[idx] = program.block(idx).desc.op_size()
F
fengjiayi 已提交
1306

F
fengjiayi 已提交
1307 1308
    grad_to_var = dict()

M
mapingshuo 已提交
1309
    op_desc = _create_loss_op_desc_(loss)
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
    target_grad_block.desc.append_op().copy_from(op_desc)

    for block_idx in son_parent_block_idx_dict:
        block = program.block(block_idx)

        block_no_grad_set = set(
            map(_strip_grad_suffix_, no_grad_dict[block_idx]))
        op_path = _find_op_path_(block, [loss], [], block_no_grad_set)

        no_grad_vars = _find_no_grad_vars(block, op_path, [loss],
                                          block_no_grad_set)

        block_no_grad_set.update(no_grad_vars)
        no_grad_dict[block_idx].update(
            list(map(_append_grad_suffix_, block_no_grad_set)))

        input_grad_names_set = None
        # For double backward, input_grad_names is used for filtering
        # some non-used gradients op(s).

        # Todo(liym27): need a better design.
        # not support double grad in control flow sub-block now.
        if not is_in_control_flow:
            if program._appending_grad_times > 1:
                input_grad_names_set = set([_append_grad_suffix_(loss.name)])

        # Todo: support _append_backward_ops_with_checkpoints_ in
        #  sub-block (control flow)
        if checkpoints != None and \
                isinstance(checkpoints, list) and \
                len(checkpoints) > 0:
            program_stat, checkpoint_names, \
            vars_should_be_hold, \
            recompute_segments = \
                _append_backward_ops_with_checkpoints_(
                    root_block,
                    op_path,
                    root_block,
                    no_grad_dict,
                    grad_to_var,
                    checkpoints)
        else:
            _append_backward_ops_(
                block,  # the block where forward ops are in
                op_path,
                target_grad_block,
                no_grad_dict,
                grad_to_var,
                callbacks,
                input_grad_names_set=input_grad_names_set)

    grad_info_map = dict()

    # if in control flow, target_grad_block is a created new block which only contains grad ops,
    # so fwd_op_num is set to 0.
    fwd_op_num = block_fwd_op_num_dict[
        current_block_idx] if not is_in_control_flow else 0

    # Because append_backward may be called multiple times,
1369 1370
    # we need rename the internal gradient variables so that they have
    # different names.
1371
    _rename_grad_(target_grad_block, fwd_op_num, grad_to_var, {})
1372

1373 1374
    _append_backward_vars_(target_grad_block, fwd_op_num, grad_to_var,
                           grad_info_map)
F
fengjiayi 已提交
1375

F
fengjiayi 已提交
1376
    program.current_block_idx = current_block_idx
W
Wu Yi 已提交
1377
    program._sync_with_cpp()
F
fengjiayi 已提交
1378

1379
    if parameter_list is not None:
1380 1381
        check_type(parameter_list, 'parameter_list', (list, tuple, set),
                   'fluid.backward.append_backward')
1382 1383
        parameters = []
        for i, param in enumerate(parameter_list):
1384 1385 1386
            check_type(param, 'parameter_list[%s]' % i, (framework.Variable,
                                                         six.string_types),
                       'fluid.backward.append_backward')
1387 1388 1389 1390
            if isinstance(param, framework.Variable):
                parameters.append(param.name)
            elif isinstance(param, six.string_types):
                parameters.append(param)
1391
    else:
F
fengjiayi 已提交
1392
        params = program.global_block().all_parameters()
C
chengduo 已提交
1393
        parameters = [param.name for param in params if param.trainable]
1394

1395
    params_and_grads = []
1396
    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
1397
    for param in parameters:
M
minqiyang 已提交
1398
        if cpt.to_text(param) not in grad_info_map:
F
fengjiayi 已提交
1399
            continue
F
update  
fengjiayi 已提交
1400
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
1401
        grad_block = grad_info[1]
1402 1403 1404 1405
        if not grad_block.has_var(grad_info[0]):
            raise ValueError("grad block[{0}] did not have grad var {1}".format(
                grad_info[1], grad_info[0]))
        # Get the param var from the global block
F
fengjiayi 已提交
1406
        param_var = program.global_block().var(param)
1407
        grad_var = grad_block.var(grad_info[0])
1408 1409 1410 1411 1412
        if not is_in_control_flow:
            if loss.block.has_var(grad_info[0]):
                params_and_grads.append((param_var, grad_var))
            else:
                params_and_grads.append((param_var, None))
1413
        else:
1414
            params_and_grads.append((param_var, grad_var))
Y
yuyang18 已提交
1415 1416 1417 1418

    for p, g in params_and_grads:
        if g is None:
            continue
1419 1420 1421
        ops = grad_block.ops if is_in_control_flow else program.global_block(
        ).ops
        for op in reversed(ops):
Y
yuyang18 已提交
1422 1423 1424 1425 1426 1427 1428
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
1429
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
1430 1431
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
W
Wu Yi 已提交
1432
        g.op._set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
1433

1434
    return params_and_grads
1435 1436 1437 1438 1439 1440 1441 1442


def _as_list(x):
    if x is None:
        return []
    return list(x) if isinstance(x, collections.Sequence) else [x]


1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
def _is_ancestor_block(ancestor_block, block):
    prog = block.program
    ancestor_idx = ancestor_block.idx
    parent_idx = block.parent_idx

    while parent_idx != -1:
        if parent_idx == ancestor_idx:
            return True
        parent_idx = prog.block(parent_idx).parent_idx

    return False


def _get_output_names(cur_block, targets):
    """
    In `cur_block`, get output names those linked to targets.
    NOTE:
    1. `targets` can be in `cur_block`;
    Usually, `targets` is in `cur_block`. However, considering control flow,
    2. `targets` may be in sub-block but `cur_block` is an ancestor of `targets[0].block`;
    3. `targets` may be in the block which is ancestor of `cur_block`.
    """

    block = targets[0].block if targets else cur_block
    prog = cur_block.program
    if _is_ancestor_block(block, cur_block):
        return set()

    current_output_names = set([out.name for out in targets])

    # if `cur_block` is an ancestor of `targets[0].block`, run while loop
    while block.idx != cur_block.idx:
        assert block.parent_idx != -1
        parent_block = prog.block(block.parent_idx)

        parent_block_output_names = set()
        for op in reversed(block.ops):
            if _some_in_set_(op.desc.output_arg_names(), current_output_names):
                for name in op.desc.input_arg_names():
                    current_output_names.add(name)
                    if not block.desc.find_var(cpt.to_bytes(name)) \
                            and parent_block.desc.find_var(cpt.to_bytes(name)):
                        parent_block_output_names.add(name)

        block = parent_block
        current_output_names = parent_block_output_names

    return current_output_names


1493 1494 1495
def _find_no_grad_vars(block, op_path, targets, no_grad_set):
    """
    Find the vars which is not used in the program, and
1496
    those vars belong to no_grad_var.
1497
    """
1498
    output_names = _get_output_names(block, targets)
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
    no_grad_var = []
    for i, op in reversed(list(enumerate(op_path))):
        # If the op has sub_block, it is too complicated to find the correct no_grad_var.
        if not op.has_attr("sub_block"):
            for out_var in op.desc.output_arg_names():
                if out_var not in output_names and out_var not in op.desc.input_arg_names(
                ) and not block.vars[out_var].stop_gradient:
                    no_grad_var.append(out_var)
        for name in op.desc.input_arg_names():
            if name not in no_grad_set:
                output_names.add(name)
    return set(no_grad_var)


1513 1514 1515 1516 1517
def _find_op_path_(block, outputs, inputs, no_grad_set):
    """
    no_grad_set will also be changed
    """
    input_names = set([inp.name for inp in inputs])
1518
    output_names = _get_output_names(block, outputs)
1519 1520 1521 1522 1523 1524

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
1525 1526 1527
            if _some_in_set_(
                    op.desc.input_arg_names(),
                    input_names) and core.has_non_empty_grad_op_maker(op.type):
1528 1529 1530 1531 1532 1533 1534
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
1535 1536 1537
        if _some_in_set_(
                op.desc.output_arg_names(),
                output_names) and core.has_non_empty_grad_op_maker(op.type):
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
1551
                if name not in input_names and block.vars[name].stop_gradient:
1552 1553 1554 1555 1556 1557 1558
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
1559
    Backpropagate the gradients of targets to inputs.
1560 1561 1562 1563

    Args:
        targets(Variable|list[Variable]): The target variables
        inputs(Variable|list[Variable]): The input variables
1564
        target_gradients (Variable|list[Variable], optional): The gradient variables
1565 1566
            of targets which has the same shape with targets, If None, ones will
            be created for them.
1567 1568 1569 1570 1571 1572
        no_grad_set(set[Variable|str], optional): Set of Variables or Variable.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All variables with
                               `stop_gradient=True` from all blocks will
                               be automatically added into this set.
                               If this parameter is not None, the Variables or Variable.names in this set will be added to the default set.
                               Default: None.
1573 1574

    Return:
1575
        (list[Variable]): A list of gradients for inputs
1576 1577 1578 1579 1580 1581 1582 1583 1584
        If an input does not affect targets, the corresponding gradient variable
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
1585 1586
    # increase appending gradients times
    prog._appending_grad_times += 1
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
            "Should have the same number of target_gradients as targets")

    if no_grad_set is None:
        no_grad_set = set()
1598 1599
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
1600
    no_grad_dict = _get_stop_gradients_(prog)
1601
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
1602 1603 1604

    fwd_op_num = block.desc.op_size()

1605 1606
    input_grad_names_set = set()

1607 1608 1609 1610 1611
    target_grad_map = {}
    for i, grad in enumerate(target_gradients):
        target = targets[i]
        if grad is None:
            grad_name = _append_grad_suffix_(target.name)
L
lvmengsi 已提交
1612 1613 1614 1615 1616
            target_shape = target.name + '_shape'
            block.desc.append_op().copy_from(
                _create_op_desc_("shape", {'Input': [target.name]},
                                 {"Out": [target_shape]}, {}))
            input_grad_names_set.add(target_shape)
L
liym27 已提交
1617
            op_desc = _create_op_desc_("fill_constant",
L
lvmengsi 已提交
1618
                                       {"ShapeTensor": [target_shape]},
1619
                                       {"Out": [grad_name]}, {
1620
                                           "shape": target.shape,
1621 1622 1623
                                           "value": 1.0,
                                           "dtype": target.dtype,
                                       })
L
liym27 已提交
1624

1625
            block.desc.append_op().copy_from(op_desc)
1626
            input_grad_names_set.add(grad_name)
1627 1628 1629 1630 1631 1632 1633 1634
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
                    "The shapes of target and grad are different: %s %s" % (
                        target.name, grad.name))
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name
1635 1636 1637 1638 1639 1640
            input_grad_names_set.add(grad.name)

    # For double backward, input_grad_names is used for filter
    # some non-used gradients op.
    if prog._appending_grad_times == 1:
        input_grad_names_set = None
1641 1642 1643 1644 1645 1646 1647

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(block, targets, inputs, block_no_grad_set)
1648
    no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set)))
1649 1650
    grad_to_var = dict()
    grad_info_map = dict()
1651 1652 1653 1654 1655 1656 1657
    _append_backward_ops_(
        block,
        op_path,
        block,
        no_grad_dict,
        grad_to_var,
        input_grad_names_set=input_grad_names_set)
1658 1659 1660 1661 1662 1663 1664

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map)

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
W
Wu Yi 已提交
1665
    prog._sync_with_cpp()
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars
1681 1682 1683 1684 1685 1686 1687 1688 1689


def gradients(targets, inputs, target_gradients=None, no_grad_set=None):
    """
    Backpropagate the gradients of targets to inputs.

    Args:
        targets (Variable|list[Variable]): The target variables.
        inputs (Variable|list[Variable]): The input variables.
1690
        target_gradients (Variable|list[Variable], optional): The gradient variables
1691 1692
            of targets which has the same shape with targets, If None, ones will
            be created for them.
1693 1694 1695 1696
        no_grad_set (set[Variable|str], optional): Set of Variables or Variable.names in the :ref:`api_guide_Block_en` 0 whose gradients
            should be ignored. All variables with `stop_gradient=True` from all blocks will
            be automatically added into this set. If this parameter is not None, the Variables or Variable.names
            in this set will be added to the default set. Default: None.
1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707

    Return:
        (list[Variable]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient variable
        will be None.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

1708
            x = fluid.data(name='x', shape=[None,2,8,8], dtype='float32')
1709 1710 1711 1712 1713 1714 1715 1716
            x.stop_gradient=False
            y = fluid.layers.conv2d(x, 4, 1, bias_attr=False)
            y = fluid.layers.relu(y)
            y = fluid.layers.conv2d(y, 4, 1, bias_attr=False)
            y = fluid.layers.relu(y)
            z = fluid.gradients([y], x)
            print(z)
    """
1717 1718 1719 1720 1721 1722 1723
    check_type(targets, 'targets', (framework.Variable, list),
               'fluid.backward.gradients')
    check_type(inputs, 'inputs', (framework.Variable, list),
               'fluid.backward.gradients')
    check_type(target_gradients, 'target_gradients', (
        framework.Variable, list, type(None)), 'fluid.backward.gradients')

1724 1725
    outs = calc_gradient(targets, inputs, target_gradients, no_grad_set)
    return _as_list(outs)