backward.py 95.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

T
tangwei12 已提交
15
from .proto import framework_pb2
16

17
from paddle.fluid import framework as framework
18
from paddle.fluid import program_guard
F
update  
fengjiayi 已提交
19
from . import core
F
update  
fengjiayi 已提交
20
import collections
21
import copy
22
import logging
M
minqiyang 已提交
23
from .. import compat as cpt
24
from . import unique_name
25
from . import log_helper
L
liym27 已提交
26
import paddle.fluid
27
from .data_feeder import check_type
28
import warnings
29 30 31 32
try:
    from collections.abc import Sequence
except:
    from collections import Sequence
33

M
mapingshuo 已提交
34 35 36 37 38
__all__ = [
    'append_backward',
    'gradients',
]

39 40 41
_logger = log_helper.get_logger(__name__,
                                logging.INFO,
                                fmt='%(asctime)s-%(levelname)s: %(message)s')
42

M
mapingshuo 已提交
43 44

class ProgramStats(object):
45

M
mapingshuo 已提交
46 47 48 49 50 51 52 53 54
    def __init__(self, block, ops):
        self.block = block
        self.ops = ops
        self.op_deps = {}  # op-> in_ops, out_ops
        self.var_op_deps = {}  # var as input op, var as output op

    def get_input_nodes(self):
        input_names = []
        for name in self.var_op_deps:
55
            if len(self.var_op_deps[name]["var_as_output_ops"]) == 0 and \
T
tangwei12 已提交
56
                    len(self.var_op_deps[name]["var_as_input_ops"]) > 0:
M
mapingshuo 已提交
57 58 59 60 61 62 63 64 65 66 67
                if self.block.var(name).persistable:
                    continue
                input_names.append(name)
        for op in self.ops:
            if op.desc.type() == "read":
                input_names.extend(op.desc.output_arg_names())
        return input_names

    def get_reserved_vars(self):
        var_name = []
        for op in self.ops:
M
mapingshuo 已提交
68
            if op.desc.type() == "seed":
M
mapingshuo 已提交
69 70 71 72 73 74 75 76 77 78 79
                var_name.extend(op.desc.output_arg_names())
        return var_name

    def get_out_of_subgraph_vars(self, begin_op_idx, end_op_idx):
        var_name = []
        for i in range(begin_op_idx, end_op_idx, 1):
            for name in self.ops[i].desc.output_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_input_ops"]:
                        if idx >= end_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
80 81 82 83 84
            for name in self.ops[i].desc.input_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_output_ops"]:
                        if idx < begin_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
        return var_name

    def is_subgraph(self, var_group1, var_group2):
        # should traverse from var_group1 to var_group2
        # max op idx in var_group2
        # min op idx in var_group1
        min_op_idx = len(self.ops)
        max_op_idx = -1
        for name in var_group1:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group2:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group1:
            op_idx = self.var_op_deps[name]["var_as_input_ops"]
            for idx in op_idx:
                min_op_idx = min(min_op_idx, idx)
        for name in var_group2:
            op_idx = self.var_op_deps[name]["var_as_output_ops"]
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if min_op_idx >= max_op_idx:
            return False, min_op_idx, max_op_idx
J
JZ-LIANG 已提交
109

M
mapingshuo 已提交
110 111
        return True, min_op_idx, max_op_idx

J
JZ-LIANG 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124
    def _update_segment_start(self, min_idx, pre_segment_end_idx):
        """
        persist vars of amp-related cast should be included in recompute segment
        """

        def is_amp_cast(op):
            return op.desc.type() == 'cast' and self.block.var(
                op.desc.input_arg_names()[0]).persistable

        idx_ = min_idx - 1
        updated_min_idx = min_idx
        while idx_ > pre_segment_end_idx:
            if is_amp_cast(self.ops[idx_]):
125 126 127
                _logger.info("found amp-cast op: {}, : {}".format(
                    self.ops[idx_].desc.type(),
                    self.ops[idx_].desc.input_arg_names()[0]))
J
JZ-LIANG 已提交
128 129 130 131 132 133 134
                updated_min_idx = idx_
                idx_ -= 1
            else:
                break

        return updated_min_idx

M
mapingshuo 已提交
135 136 137 138 139
    def build_stats(self):
        for i, op in enumerate(self.ops):
            self.op_deps[i] = {"in_ops": [], "out_ops": []}
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
140 141
                    self.op_deps[i]["in_ops"].extend(
                        self.var_op_deps[name]["var_as_output_ops"])
M
mapingshuo 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_input_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = [i]
                    self.var_op_deps[name]["var_as_output_ops"] = []

            for j, name in enumerate(op.desc.output_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_output_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = []
                    self.var_op_deps[name]["var_as_output_ops"] = [i]

            for op_idx in self.op_deps[i]["in_ops"]:
                self.op_deps[op_idx]["out_ops"].extend([i])

161 162 163 164
    def sort_checkpoints(self, checkpoints_name):
        sorted_checkpoints = []
        for name in checkpoints_name:
            if name not in self.var_op_deps:
165
                _logger.info(
166 167 168 169 170 171 172 173 174 175 176
                    "Recompute Optimizer: deleted %s from checkpoints, because it is not used in paddle program."
                    % name)
            elif self.var_op_deps[name]["var_as_output_ops"] == []:
                # input nodes
                sorted_checkpoints.append((name, -1))
            else:
                sorted_checkpoints.append(
                    (name, max(self.var_op_deps[name]["var_as_output_ops"])))
        sorted_checkpoints = sorted(sorted_checkpoints, key=lambda x: x[1])
        return [x[0] for x in sorted_checkpoints]

M
mapingshuo 已提交
177 178 179 180 181 182
    def modify_forward_desc_for_recompute(self):
        op_types = [op.desc.type() for op in self.ops]
        if "dropout" not in op_types:
            return

        op_idx = 0
183
        while op_idx < len(self.ops):
M
mapingshuo 已提交
184 185 186 187
            op = self.ops[op_idx]
            if op.desc.type() != "dropout":
                op_idx += 1
                continue
188 189 190 191
            # already insert seed op before dropout
            if op.input('Seed') is not None and len(op.input('Seed')) == 1:
                op_idx += 1
                continue
M
mapingshuo 已提交
192 193 194 195 196 197 198 199 200 201 202
            # add a seed op so that the two dropout op can generate same output
            op_unique_name = unique_name.generate("seed")
            var_unique_name = unique_name.generate_with_ignorable_key(".".join(
                [op_unique_name, 'tmp']))
            added_var = self.block.create_var(
                name=var_unique_name,
                dtype='int32',
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
                stop_gradient=False)
            seed = 0 if op.attr("fix_seed") is False else int(op.attr("seed"))
203 204 205 206 207 208 209

            op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
            )
            op_device = ""
            if op.desc.has_attr(op_device_attr_name):
                op_device = op.desc.attr(op_device_attr_name)

210
            # Setting the force_cpu of seed to true will make the output of seed in cpu memory,
211
            # reduce the synchronous copy from GPU to CPU in dropout, and reduce the communication hang
212 213 214 215 216 217 218 219 220
            added_op = self.block._insert_op(index=op.idx,
                                             type='seed',
                                             inputs={},
                                             outputs={'Out': [added_var]},
                                             attrs={
                                                 'seed': seed,
                                                 'op_device': op_device,
                                                 'force_cpu': True
                                             })
M
mapingshuo 已提交
221 222 223 224 225 226 227 228
            self.ops.insert(op_idx, added_op)
            # modify dropout op desc so that it accept a seed var as input
            op.desc.set_input("Seed", [var_unique_name])
            op.desc.remove_attr("fix_seed")
            op.desc.remove_attr("seed")
            self.block._sync_with_cpp()
            op_idx += 2

M
mapingshuo 已提交
229 230 231 232 233 234 235 236

def _pretty_op_desc_(op_desc, prefix):
    out_s = "%s\tname:[%s]\n%s    \tinputs:[%s]\n%s    \toutputs:[%s]" % \
            (prefix + "_op", str(op_desc.type()), prefix + "_input", " ".join(op_desc.input_arg_names()),
             prefix + "_output", " ".join(op_desc.output_arg_names()))
    return out_s


237 238 239 240 241
def _add_needed_descs_to_block(descs,
                               block,
                               main_block,
                               in_memory_vars,
                               grad_op_id_to_fwd_op=None):
M
mapingshuo 已提交
242 243 244 245
    if len(descs) == 0:
        return []
    result_descs = []
    op_role_attr_name = \
T
tangwei12 已提交
246
        core.op_proto_and_checker_maker.kOpRoleAttrName()
M
mapingshuo 已提交
247 248
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
249 250
        origin_desc = desc
        origin_is_operator = False
M
mapingshuo 已提交
251 252
        if isinstance(desc, framework.Operator):
            desc = desc.desc
253
            origin_is_operator = True
M
mapingshuo 已提交
254 255 256 257 258 259 260 261 262
        if isinstance(desc, tuple):
            desc = desc[0]
        is_needed = False
        for name in desc.output_arg_names():
            if main_block.has_var(name) and main_block.var(name).persistable:
                continue
            if name not in in_memory_vars:
                is_needed = True
        if is_needed:
263 264
            if origin_is_operator and grad_op_id_to_fwd_op is not None:
                grad_op_id_to_fwd_op[desc.original_id()] = origin_desc
M
mapingshuo 已提交
265 266 267
            new_op_desc = block.desc.append_op()
            new_op_desc.copy_from(desc)
            new_op_desc._set_attr(op_role_attr_name, backward)
268 269
            if desc.has_attr('op_device'):
                new_op_desc._set_attr('op_device', desc.attr('op_device'))
M
mapingshuo 已提交
270 271 272 273
            result_descs.append(new_op_desc)
    return result_descs


274
def _add_descs_to_block(descs, block, grad_op_id_to_fwd_op=None):
M
mapingshuo 已提交
275 276 277 278 279 280 281 282
    if len(descs) == 0:
        return []
    result_descs = []
    op_role_attr_name = \
        core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
        if isinstance(desc, framework.Operator):
283 284 285
            # for recompute, should record recompute ops
            if grad_op_id_to_fwd_op is not None:
                grad_op_id_to_fwd_op[desc.desc.original_id()] = desc
M
mapingshuo 已提交
286 287 288 289 290 291
            desc = desc.desc
        if isinstance(desc, tuple):
            desc = desc[0]
        new_op_desc = block.desc.append_op()
        new_op_desc.copy_from(desc)
        new_op_desc._set_attr(op_role_attr_name, backward)
292 293
        if desc.has_attr('op_device'):
            new_op_desc._set_attr('op_device', desc.attr('op_device'))
M
mapingshuo 已提交
294 295 296 297 298 299 300
        result_descs.append(new_op_desc)
    return result_descs


def _find_loss_op_(loss):
    for op in reversed(loss.block.ops):
        assert isinstance(op, framework.Operator)
301 302
        if len(op.output_arg_names
               ) == 1 and op.output_arg_names[0] == loss.name:
M
mapingshuo 已提交
303 304 305 306
            loss.op = op
            break
    if loss.op is None:
        raise ValueError("loss.op is None. Should not happend")
307 308


309 310
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
311
    Traverse all ops in op_descs[begin_idx : end_idx],
312 313
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
314 315 316
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
317
        end_idx = len(op_descs)
318 319 320 321 322 323 324 325 326 327 328 329 330
    if isinstance(op_descs, (list, tuple)):
        for i in range(begin_idx, end_idx):
            op_desc = op_descs[i]
            if isinstance(op_desc, tuple):
                op_desc = op_desc[0]
            op_desc._rename_input(old_name, new_name)
            op_desc._rename_output(old_name, new_name)
    if isinstance(op_descs, collections.OrderedDict):
        for key, value in op_descs.items():
            if isinstance(value, (list, tuple)):
                for op_desc in value:
                    op_desc._rename_input(old_name, new_name)
                    op_desc._rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
331 332


F
fengjiayi 已提交
333
def _create_op_desc_(op_type, inputs, outputs, attrs):
334 335 336
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
337 338
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
339
    for para, args in inputs.items():
340 341 342
        op_desc.set_input(
            para,
            list(
343 344 345
                map(lambda arg: arg.decode()
                    if isinstance(arg, bytes) else arg, args)))
    for para, args in outputs.items():
346 347 348
        op_desc.set_output(
            para,
            list(
349 350
                map(lambda arg: arg.decode()
                    if isinstance(arg, bytes) else arg, args)))
Y
yuyang18 已提交
351 352

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
353
    op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
Y
yuyang18 已提交
354 355 356 357

    if op_role_attr_name not in attrs:
        attrs[
            op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
358 359
    if op_device_attr_name not in attrs:
        attrs[op_device_attr_name] = ""
360
    for name, val in attrs.items():
F
fengjiayi 已提交
361 362 363
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
W
Wu Yi 已提交
364
            op_desc._set_attr(name, val)
F
fengjiayi 已提交
365 366 367
    return op_desc


M
mapingshuo 已提交
368 369 370 371
def _create_loss_op_desc_(loss):
    op_desc = _create_op_desc_(
        "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
            "shape": [1],
372 373 374 375 376 377
            "value":
            1.0,
            "dtype":
            loss.dtype,
            "force_cpu":
            False,
M
mapingshuo 已提交
378
            core.op_proto_and_checker_maker.kOpRoleAttrName():
379 380
            int(core.op_proto_and_checker_maker.OpRole.Backward)
            | int(core.op_proto_and_checker_maker.OpRole.Loss),
381 382
            core.op_proto_and_checker_maker.kOpDeviceAttrName():
            loss.op.attr(core.op_proto_and_checker_maker.kOpDeviceAttrName())
M
mapingshuo 已提交
383 384 385 386
        })
    return op_desc


387
def _infer_var_data_type_shape_(grad_var_name, block):
388
    """
389
    Infer the data type and shape of given grad variable
390
    """
391
    grad_var = block.desc.find_var(grad_var_name.encode())
M
minqiyang 已提交
392
    fwd_name = _strip_grad_suffix_(grad_var_name)
393 394
    if block.desc.has_var_recursive(fwd_name.encode()):
        fwd_var = block.desc.find_var_recursive(fwd_name.encode())
F
fengjiayi 已提交
395
        grad_var.set_dtype(fwd_var.dtype())
396
        grad_var.set_shape(fwd_var.shape())
F
fengjiayi 已提交
397
    else:
398 399
        # TODO(jiabin): Maybe we should not to this to cause some unexpected error on dtype
        warnings.warn(
400 401
            "Set grad var: {} dtype to default FP32, since we can't find its related forward var"
            .format(grad_var_name))
402
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
403 404


F
fengjiayi 已提交
405
def _all_in_set_(cands, s):
406 407 408
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
409 410
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
411 412 413 414 415 416
    for c in cands:
        if not c in s:
            return False
    return True


417 418 419 420 421 422
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
423 424
    for c in cands:
        if c in s:
425 426 427 428
            return True
    return False


F
fengjiayi 已提交
429
def _strip_grad_suffix_(name):
430
    """
M
mapingshuo 已提交
431
    Strip the grad suffix from the given variable name
432 433 434
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
M
minqiyang 已提交
435
    pos = name.find(core.grad_var_suffix())
436 437 438
    new_name = name[:pos] if pos != -1 else name
    new_pos = name.rfind('grad/')
    return new_name[new_pos + 5:] if new_pos != -1 else new_name
F
fengjiayi 已提交
439 440 441


def _append_grad_suffix_(name):
442 443 444 445
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
446
    return name + core.grad_var_suffix()
F
fengjiayi 已提交
447 448


T
tangwei12 已提交
449 450 451 452 453
def _accumulate_gradients_by_sum_op_(var_name,
                                     renamed_vars,
                                     pending_sum_ops,
                                     op_idx,
                                     op_device=""):
454 455 456 457 458 459
    """
    Use sum op to accumulate_gradients, the gradients are stored in renamed_vars.
    """
    if op_idx not in pending_sum_ops.keys():
        pending_sum_ops[op_idx] = []
    pending_sum_ops[op_idx].append(
460 461 462 463 464
        _create_op_desc_("sum", {"X": renamed_vars[var_name]},
                         {"Out": [var_name]}, {
                             "use_mkldnn": False,
                             "op_device": op_device
                         }))
465 466 467
    renamed_vars[var_name] = [var_name]


T
tangwei12 已提交
468 469 470 471 472
def _accumulate_gradients_by_add_ops_(var_name,
                                      renamed_vars,
                                      pending_sum_ops,
                                      op_idx,
                                      op_device=""):
473 474 475 476 477 478 479 480 481 482 483 484 485 486
    """
    Use several inplace add op to accumulate_gradients, the gradients are stored in renamed_vars.
    """
    if op_idx not in pending_sum_ops.keys():
        pending_sum_ops[op_idx] = []
    out_name = renamed_vars[var_name][0]
    for i in range(1, len(renamed_vars[var_name])):
        x_name = out_name
        y_name = renamed_vars[var_name][i]
        if i != len(renamed_vars[var_name]) - 1:
            out_name = var_name + '@ADD@' + str(i)
        else:
            out_name = var_name
        pending_sum_ops[op_idx].append(
487 488 489 490 491 492 493
            _create_op_desc_("grad_add", {
                "X": [x_name],
                "Y": [y_name]
            }, {"Out": [out_name]}, {
                "use_mkldnn": False,
                "op_device": op_device
            }))
494 495 496
    renamed_vars[var_name] = [var_name]


497 498 499 500
def _addup_repetitive_outputs_(op_descs,
                               block_idx,
                               grad_var_to_var=None,
                               grad_op_id_to_fwd_op=None):
501 502
    """
    In backward part, an variable may be the output of more than one ops.
F
fengjiayi 已提交
503 504
    And one op may yield its multiple outputs to the same variable.
    In these cases, the variable should be the accumulation of all the outputs.
505
    `sum_op`s are added to implement the accumulate.
506 507 508 509

    Args:
        grad_var_to_var(dict): used to build the mapping between grad var name and forward var name.
        Only for auto parallel.
510
    """
511

512
    _MAX_ADD_NUM_ = framework._global_flags()['FLAGS_max_inplace_grad_add']
513 514
    #pending_sum_ops = []
    pending_sum_ops = collections.OrderedDict()
F
update  
fengjiayi 已提交
515
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
516
    renamed_vars = collections.defaultdict(list)
517
    renamed_var_start_idx = collections.defaultdict(list)
518
    var_device = collections.defaultdict(str)
F
fengjiayi 已提交
519
    for idx, op_desc in enumerate(op_descs):
T
tangwei12 已提交
520 521 522 523 524
        op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
        )
        op_device = ""
        if op_desc.has_attr(op_device_attr_name):
            op_device = op_desc.attr(op_device_attr_name)
F
update  
fengjiayi 已提交
525
        for var_name in op_desc.input_arg_names():
M
mapingshuo 已提交
526 527
            if "@GRAD" not in var_name:
                continue
F
fengjiayi 已提交
528
            if len(renamed_vars[var_name]) > 1:
529
                if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
W
WangXi 已提交
530 531 532
                    _accumulate_gradients_by_sum_op_(var_name, renamed_vars,
                                                     pending_sum_ops, idx,
                                                     var_device[var_name])
533
                else:
W
WangXi 已提交
534 535 536
                    _accumulate_gradients_by_add_ops_(var_name, renamed_vars,
                                                      pending_sum_ops, idx,
                                                      var_device[var_name])
537

F
update  
fengjiayi 已提交
538
        for param_idx, param_name in enumerate(op_desc.output_names()):
F
fengjiayi 已提交
539 540
            arg_names = op_desc.output(param_name)
            for arg_idx, var_name in enumerate(arg_names):
M
mapingshuo 已提交
541 542
                if "@GRAD" not in var_name:
                    continue
T
tangwei12 已提交
543
                # if "@RENAME@" in var_name:
M
mapingshuo 已提交
544
                #    continue
F
fengjiayi 已提交
545 546 547 548 549 550 551
                if var_name == core.empty_var_name(
                ) or var_name in op_desc.input_arg_names():
                    # empty variable or inplace op
                    continue
                if len(renamed_vars[var_name]) == 0:
                    # it's the first time we get the variable
                    renamed_vars[var_name] = [var_name]
552
                    renamed_var_start_idx[var_name] = idx
F
fengjiayi 已提交
553 554
                else:
                    if len(renamed_vars[var_name]) == 1:
555
                        new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \
F
fengjiayi 已提交
556 557
                            str(var_rename_count[var_name])
                        var_rename_count[var_name] += 1
558 559 560 561 562 563 564
                        # Build the mapping between the new_name and var_name (Only for auto parallel)
                        if grad_var_to_var is not None:
                            if var_name in grad_var_to_var:
                                grad_var_to_var[new_name] = grad_var_to_var[
                                    var_name]
                            else:
                                grad_var_to_var[new_name] = var_name
F
fengjiayi 已提交
565 566
                        # rename original var_name
                        renamed_vars[var_name][0] = new_name
567 568 569 570 571 572
                        # before change: _rename_arg_(op_descs, var_name,
                        #                             new_name, 0, idx)
                        # rename arg from idx of the first appearance
                        # in backward, not always from 0
                        _rename_arg_(op_descs, var_name, new_name,
                                     renamed_var_start_idx[var_name], idx)
F
fengjiayi 已提交
573 574
                        _rename_arg_(pending_sum_ops, var_name, new_name)

F
update  
fengjiayi 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587
                        for p in op_desc.output_names()[:param_idx]:
                            p_arg_names = op_desc.output(p)
                            if var_name in p_arg_names:
                                op_desc.set_output(p, [
                                    new_name if x == var_name else x
                                    for x in p_arg_names
                                ])

                        arg_names = [
                            new_name if x == var_name else x
                            for x in arg_names[:arg_idx]
                        ] + arg_names[arg_idx:]

588
                    new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \
T
tangwei12 已提交
589
                        str(var_rename_count[var_name])
F
fengjiayi 已提交
590
                    var_rename_count[var_name] += 1
591 592 593 594 595 596 597
                    # Build the mapping between the new_name and var_name (Only for auto parallel)
                    if grad_var_to_var is not None:
                        if var_name in grad_var_to_var:
                            grad_var_to_var[new_name] = grad_var_to_var[
                                var_name]
                        else:
                            grad_var_to_var[new_name] = var_name
F
fengjiayi 已提交
598 599 600
                    arg_names[arg_idx] = new_name
                    op_desc.set_output(param_name, arg_names)
                    renamed_vars[var_name].append(new_name)
W
WangXi 已提交
601
                    # record the latest device
602
                    var_device[var_name] = op_device
F
update  
fengjiayi 已提交
603

604
    for var_name, inputs in renamed_vars.items():
605 606
        if len(renamed_vars[var_name]) > 1:
            if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
607 608 609
                _accumulate_gradients_by_sum_op_(var_name, renamed_vars,
                                                 pending_sum_ops, len(op_descs),
                                                 var_device[var_name])
610
            else:
611 612 613 614
                _accumulate_gradients_by_add_ops_(var_name,
                                                  renamed_vars, pending_sum_ops,
                                                  len(op_descs),
                                                  var_device[var_name])
615

616
    op_descs_len = len(op_descs)
F
fengjiayi 已提交
617
    # sum_op descs are sorted according to their insert position
618 619 620 621 622 623 624 625 626
    for key, value in collections.OrderedDict(
            reversed(list(pending_sum_ops.items()))).items():

        # NOTE(zhiqiu): Since reversed, the idx of op_descs to be inserted will remains correct.
        # For example, [0, 1, 2], and we want to insert 'a' at idx 1, 'b' at idx 2, and the expected result is [0, 1, 'a', 2, 'b'].
        # If reversed, we first insert 'b' at idx 2, it becomes [0, 1, 2, 'b'], and then insert 'a' at idx 1, it becomes [0, 1, 'a', 2, 'b'].
        # If not reverse, we first insert 'a' at idx 1, it becomes [0, 1, 'a', 2], and then insert 'b' at idx 2, it becomes [0, 1, 'a', 'b', 2].
        idx = key
        for i, op in enumerate(value):
627 628 629 630 631 632
            # update the mapping between fwd and bwd
            target_idx = idx - 1 if idx == op_descs_len else idx + i
            if grad_op_id_to_fwd_op is not None and grad_op_id_to_fwd_op.get(
                    op_descs[target_idx].original_id(), None) is not None:
                grad_op_id_to_fwd_op[op.original_id()] = grad_op_id_to_fwd_op[
                    op_descs[target_idx].original_id()]
633
            op_descs.insert(idx + i, op)
F
fengjiayi 已提交
634 635 636 637

    return op_descs


638 639 640 641
def _remove_no_grad_branch_(op_descs,
                            no_grad_set,
                            grad_op_id_to_fwd_op=None,
                            target_vars=[]):
642 643 644 645
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
646
        2. all grad inputs of the grad op are in 'no_grad_set'
647
    NOTE: we will skip target_vars's grad name.
648
    """
F
fengjiayi 已提交
649 650

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
651 652
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
653
            return True
654 655 656 657
        if _all_in_set_([
                name for name in op_desc.input_arg_names()
                if name.find(core.grad_var_suffix()) != -1
        ], no_grad_set):
658
            no_grad_set.update(set(out_arg_names) - target_grad_var_names)
F
fengjiayi 已提交
659 660 661
            return True
        return False

F
fengjiayi 已提交
662
    # Remove ops whose outputs are all in no_grad_dict
663 664
    target_grad_var_names = set(
        [var.name + core.grad_var_suffix() for var in target_vars])
665 666 667 668
    op_descs = [
        op_desc for op_desc in op_descs
        if not _op_can_be_removed_(op_desc, no_grad_set)
    ]
669
    # Insert fill_any_like_op with value 0
F
fengjiayi 已提交
670
    to_insert = []
F
fengjiayi 已提交
671
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
672
        for arg in op_desc.input_arg_names():
M
mapingshuo 已提交
673
            # arg is a gradient var name and arg should not have gradient
F
fengjiayi 已提交
674
            if core.grad_var_suffix() in arg and arg in no_grad_set:
675
                x_in = _strip_grad_suffix_(arg)
M
mapingshuo 已提交
676 677
                # the reason should be: arg can be input of another grad op
                # and the op is a not-to-remove op
678 679 680 681 682
                new_op_desc = _create_op_desc_("fill_any_like", {"X": [x_in]},
                                               {"Out": [arg]}, {
                                                   'value': 0,
                                                   'dtype': -1
                                               })
683 684 685 686 687 688
                # update the mapping between fwd and bwd
                if grad_op_id_to_fwd_op is not None and grad_op_id_to_fwd_op.get(
                        op_desc.original_id(), None) is not None:
                    grad_op_id_to_fwd_op[new_op_desc.original_id(
                    )] = grad_op_id_to_fwd_op[op_desc.original_id()]
                to_insert.append((new_op_desc, idx))
F
fengjiayi 已提交
689

690
    list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)])
F
fengjiayi 已提交
691 692 693 694

    return op_descs


C
chengduo 已提交
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
    """
    Pruning Program with Structural Analysis Method of Computational Graph.
    The nodes of the computational graph composed of backward OPS should be
    interconnected. If there are unconnected sub-graphs in the computational graph,
    these sub-graphs should be cut off.

    Args:
        grad_op_descs(list[core.OpDesc]): The candidate backward OpDescs.
        forward_ops(list[Operator]): The forward ops.
        input_grad_names_set(set): this set is used to store the gradients' name
            which is generated by backward ops, and input_grad_names_set can help
            to prune the unnecessary backward ops.

    Return:
710
        (set[core.OpDesc]): A set of OpDescs which should be pruned.
C
chengduo 已提交
711 712 713
    """

    class Var(object):
714

C
chengduo 已提交
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
        def __init__(self, var_name):
            self.var_name = var_name
            self.gen_op = None
            self.pendding_ops = []

        def set_gen_op(self, gen_op):
            assert isinstance(gen_op, Op)
            assert self.gen_op is None
            self.gen_op = gen_op

        def add_pending_op(self, op):
            assert isinstance(op, Op)
            self.pendding_ops.append(op)

    class Op(object):
730

C
chengduo 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
        def __init__(self, op_desc):
            self.op_desc = op_desc
            self.inputs = []
            self.outputs = []

        def insert_input(self, var):
            assert isinstance(var, Var)
            self.inputs.append(var)

        def insert_output(self, var):
            assert isinstance(var, Var)
            self.outputs.append(var)

    var_versions = dict()

    def _create_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        else:
            var_versions[name].append(Var(name))
        return var_versions[name][-1]

    def _create_or_get_last_version_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        return var_versions[name][-1]

    def _create_op_node(op_desc):
        op_node = Op(op_desc)
        for input in op_desc.input_arg_names():
            var = _create_or_get_last_version_node(name=input)
            var.add_pending_op(op_node)
            op_node.insert_input(var)
        for output in op_desc.output_arg_names():
            var = _create_node(name=output)
            var.set_gen_op(op_node)
            op_node.insert_output(var)
        return op_node

    # Record the forward vars
    forward_vars_set = set() if input_grad_names_set is None else set(
        input_grad_names_set)
    for op in forward_ops:
        forward_vars_set.update(op.desc.input_arg_names())
        forward_vars_set.update(op.desc.output_arg_names())

    # Record the vars which are created during backward and is not generated by op.
    backward_vars_set = set()
    # special_op_nodes is the candidate sub-graph head node.
    special_op_nodes = set()
    for op_desc in grad_op_descs:
        input_set = set(op_desc.input_arg_names())
        # The new_vars are created during backward and is not generated by op.
        new_vars = input_set - forward_vars_set - backward_vars_set
        backward_vars_set.update(op_desc.output_arg_names())

        op_node = _create_op_node(op_desc)
        if len(new_vars) == len(input_set):
            special_op_nodes.add(op_node)

    not_need_op_descs = []
    # Start traversing all candidate sub-graph headers to check whether
    # they are connected to backward computational graphs, and if they are
    # not, list them in not_need_op_descs
    for special_op_node in special_op_nodes:
        op_list = [special_op_node]
        ready_vars = set(special_op_node.inputs)
        remove_ops = True
        candidate_ops = [special_op_node]
        while len(candidate_ops) > 0:
            op_node = candidate_ops.pop(0)
            if _all_in_set_(op_node.inputs, ready_vars):
                for out_var in op_node.outputs:
                    candidate_ops.extend(out_var.pendding_ops)
                    op_list.extend(out_var.pendding_ops)
                ready_vars.update(op_node.outputs)
            else:
                remove_ops = False
                break
        if remove_ops:
            not_need_op_descs.extend([node.op_desc for node in op_list])
812 813 814
    not_need_op_descs_set = set(not_need_op_descs)
    grad_op_descs_set = set(grad_op_descs)
    # If a backward computational graph is simply one sub-graph header, the
815
    # not_need_op_descs will be whole graph, this IF clause avoids it.
816 817 818
    if grad_op_descs_set == not_need_op_descs_set:
        return set()
    return not_need_op_descs_set
C
chengduo 已提交
819 820


Y
Yang Yang 已提交
821 822
def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
823
    proto = framework_pb2.OpDesc.FromString(bytes(protostr))
Y
Yang Yang 已提交
824 825 826
    return proto.__str__()


827 828
def _append_backward_ops_with_checkpoints_(block,
                                           ops,
829
                                           target_vars,
830 831 832 833 834
                                           target_block,
                                           no_grad_dict,
                                           grad_to_var,
                                           checkpoints,
                                           grad_op_id_to_fwd_op=None):
M
mapingshuo 已提交
835 836 837 838 839 840
    """
    Create grad ops with forward ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
        ops(Op): the forward operators whose forward recomputation backward ops need to be added
841
        target_vars(list[Tensor]): the loss vars we want to calculate gradient.
M
mapingshuo 已提交
842 843 844 845 846 847 848
        target_block(Block): the block which is going to hold new generated grad ops
        no_grad_dict(dict):
            key(int) block index
            val(str): corresponding forward variable name
        checkpoints: variables that a user defined as checkpoint for forward recomputation

    Algorithms:
M
mapingshuo 已提交
849
        0) deal with forward recomputing program descs
M
mapingshuo 已提交
850 851 852 853 854
        1) find ops between checkpoints, i.e. recompute_segments
        2) go through all forward ops and induct all variables that will be hold in memory
            a. variables that are used across segments will be held in memory
            b. output of dropout op will be held in memory
            c. input variables will be held in memory
M
mapingshuo 已提交
855 856 857
        3) go through each recompute_segments, add backward ops with forward recomputation
            a. add ops in current recompute_segment as forward recomputation ops
            b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
858 859
            c. add backward ops of current recomputation ops
            d. add sum op for repetitive_outputs
M
mapingshuo 已提交
860 861
        4) remove no grad branch as it is in _remove_no_grad_branch_
        5) Note1: all appended ops' OpRole are Backward
M
mapingshuo 已提交
862 863
        6) Note2: all variables with new name should be returned so that _append_backward_vars_ can be called
        7) Note3: current forward recomputation backpropagation does not handle programs with subblock
M
mapingshuo 已提交
864
    """
M
mapingshuo 已提交
865 866

    checkpoints_name = [x.name for x in checkpoints]
867
    checkpoints_name = list(set(checkpoints_name))
M
mapingshuo 已提交
868 869
    local_block = block.program._create_block()
    buffer_block = block.program._create_block()
870
    # 0) deal with forward recomputing program descs
M
mapingshuo 已提交
871
    program_stat = ProgramStats(block, ops)
M
mapingshuo 已提交
872
    program_stat.modify_forward_desc_for_recompute()
M
mapingshuo 已提交
873
    program_stat.build_stats()
M
mapingshuo 已提交
874 875

    # 1) find ops between checkpoints, i.e. recompute_segments
876
    checkpoints_name = program_stat.sort_checkpoints(checkpoints_name)
M
mapingshuo 已提交
877 878
    segments = []

879
    if len(checkpoints_name) == 1:
M
mapingshuo 已提交
880 881 882 883 884 885 886
        # only one checkpoint
        max_op_idx = -1
        var_group = [checkpoints_name[0]]
        for name in var_group:
            if name not in program_stat.var_op_deps:
                break
            op_idx = program_stat.var_op_deps[name]["var_as_output_ops"]
J
JZ-LIANG 已提交
887
            # only count the last generate op
M
mapingshuo 已提交
888 889 890 891 892 893
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if max_op_idx > 0:
            segments.append([0, max_op_idx + 1])
    else:
        start_idx = 0
J
JZ-LIANG 已提交
894
        pre_segment_end_idx = -1
M
mapingshuo 已提交
895 896 897
        while True:
            if start_idx >= len(checkpoints_name) - 1:
                break
J
JZ-LIANG 已提交
898 899
            # min_idx: checkpoint_1' s input op
            # max_idx: checkpoint_2' s output op
M
mapingshuo 已提交
900 901 902 903
            flag, min_idx, max_idx = program_stat.is_subgraph(
                [checkpoints_name[start_idx]],
                [checkpoints_name[start_idx + 1]])
            if flag:
J
JZ-LIANG 已提交
904 905 906
                # max_idx + 1 since the exact and used segment end idx is max_idx
                min_idx = program_stat._update_segment_start(
                    min_idx, pre_segment_end_idx)
M
mapingshuo 已提交
907
                segments.append([min_idx, max_idx + 1])
908 909 910
            else:
                _logger.info("Could not recompute op range [{}] - [{}] ".format(
                    min_idx, max_idx + 1))
J
JZ-LIANG 已提交
911

M
mapingshuo 已提交
912 913 914 915 916 917
            start_idx += 1

    if segments != [] and segments[0][0] != 0:
        recompute_segments = [[0, segments[0][0]]] + segments
    else:
        recompute_segments = segments
M
mapingshuo 已提交
918

J
JZ-LIANG 已提交
919
    for i, (idx1, idx2) in enumerate(recompute_segments):
920
        _logger.info("recompute segment[{}]".format(i))
921 922 923 924
        _logger.info("segment start op: [{}]: [{}]".format(
            ops[idx1].desc.type(), ops[idx1].desc.input_arg_names()))
        _logger.info("segment end op: [{}]: [{}]".format(
            ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names()))
925
        _logger.info("recompute segment[{}]".format(i))
926 927 928 929
        _logger.info("segment start op: [{}]: [{}]".format(
            ops[idx1].desc.type(), ops[idx1].desc.input_arg_names()))
        _logger.info("segment end op: [{}]: [{}]".format(
            ops[idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names()))
J
JZ-LIANG 已提交
930

M
mapingshuo 已提交
931
    # 2) go through all forward ops and induct all variables that will be hold in memory
M
mapingshuo 已提交
932
    vars_should_be_hold = []
933
    # a. variables that are used across segments will be held in memory
M
mapingshuo 已提交
934 935 936
    for segment in recompute_segments:
        vars_should_be_hold.extend(
            program_stat.get_out_of_subgraph_vars(segment[0], segment[1]))
J
JZ-LIANG 已提交
937 938

    cross_vars = set(vars_should_be_hold) - set(checkpoints_name)
939
    _logger.info("found [{}] vars which cross recompute segment: [{}], better checkpoints might be set to reduce those vars".format( \
J
JZ-LIANG 已提交
940 941
    len(cross_vars), cross_vars))

M
mapingshuo 已提交
942
    # b. output of seed op should be kept in memory
M
mapingshuo 已提交
943
    vars_should_be_hold.extend(program_stat.get_reserved_vars())
M
mapingshuo 已提交
944
    # c. input variables are checkpoints
M
mapingshuo 已提交
945 946 947
    vars_should_be_hold.extend(program_stat.get_input_nodes())
    vars_should_be_hold = list(set(vars_should_be_hold))

M
mapingshuo 已提交
948
    # 3) go through each recompute_segments, add backward ops with forward recomputation
M
mapingshuo 已提交
949 950 951 952 953 954
    grad_op_descs = []
    var_name_dict = {}

    vars_in_memory = vars_should_be_hold + checkpoints_name

    max_calculated_op_position = len(ops)
955
    device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
M
mapingshuo 已提交
956 957 958 959 960 961 962 963
    if recompute_segments == []:
        gap_ops = ops[0:max_calculated_op_position]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
964
                op.desc, no_grad_dict[block.idx], [])
965 966 967 968 969 970

            # record the mapping between fwd and bwd
            if grad_op_id_to_fwd_op is not None:
                for op_desc in grad_op_desc:
                    grad_op_id_to_fwd_op[op_desc.original_id()] = op

971 972 973 974 975
            # Set device for grad_op according to forward Op
            if op.desc.has_attr(device_attr_name):
                op_device = op.desc.attr(device_attr_name)
                for op_desc in grad_op_desc:
                    op_desc._set_attr(device_attr_name, op_device)
976 977
            added_descs = _add_descs_to_block(grad_op_desc, local_block,
                                              grad_op_id_to_fwd_op)
M
mapingshuo 已提交
978 979 980 981 982 983 984 985 986 987 988 989
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

    for i, segment in enumerate(recompute_segments[::-1]):
        gap_ops = ops[segment[1]:max_calculated_op_position]
        max_calculated_op_position = segment[0]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
990
                op.desc, no_grad_dict[block.idx], [])
991 992 993 994 995 996

            # record the mapping between fwd and bwd
            if grad_op_id_to_fwd_op is not None:
                for op_desc in grad_op_desc:
                    grad_op_id_to_fwd_op[op_desc.original_id()] = op

997 998 999 1000 1001
            # Set device for grad_op according to forward Op
            if op.desc.has_attr(device_attr_name):
                op_device = op.desc.attr(device_attr_name)
                for op_desc in grad_op_desc:
                    op_desc._set_attr(device_attr_name, op_device)
1002 1003
            added_descs = _add_descs_to_block(grad_op_desc, local_block,
                                              grad_op_id_to_fwd_op)
M
mapingshuo 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

        ff_ops = ops[segment[0]:segment[1]]
        var_suffix = ".subprog_%d" % i

        for op in ff_ops:
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            input_and_output_names = []
            input_and_output_names.extend(op.desc.input_arg_names())
            input_and_output_names.extend(op.desc.output_arg_names())
            for name in input_and_output_names:
                if block.var(name).persistable or name in checkpoints_name:
                    continue
                if name in vars_should_be_hold:
                    continue
                if name not in var_name_dict:
                    var_name_dict[name] = name + var_suffix
1025 1026 1027

                    # we should create the rename var in subprog, otherwise its VarType will be BOOL
                    ref_var = block.program.global_block().var(name)
1028 1029 1030 1031 1032 1033
                    block.create_var(name=var_name_dict[name],
                                     shape=ref_var.shape,
                                     dtype=ref_var.dtype,
                                     type=ref_var.type,
                                     persistable=ref_var.persistable,
                                     stop_gradient=ref_var.stop_gradient)
1034

M
mapingshuo 已提交
1035
        # 3.a. add ops in current recompute_segment as forward recomputation ops
M
mapingshuo 已提交
1036
        buffer_descs = _add_needed_descs_to_block(ff_ops, buffer_block, block,
1037 1038 1039 1040
                                                  vars_in_memory,
                                                  grad_op_id_to_fwd_op)
        added_descs = _add_descs_to_block(ff_ops, local_block,
                                          grad_op_id_to_fwd_op)
M
mapingshuo 已提交
1041

M
mapingshuo 已提交
1042
        # 3.b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
1043 1044 1045 1046 1047 1048
        for key in var_name_dict:
            _rename_arg_(buffer_descs, key, var_name_dict[key])

        # added_descs should be in grad_op_descs because it is backward op desc
        grad_op_descs.extend(buffer_descs)

1049
        # 3.c. add backward ops for all ops in current segment
M
mapingshuo 已提交
1050 1051
        for op_desc in reversed(added_descs):
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
1052
                op_desc, no_grad_dict[block.idx], [])
1053

1054 1055 1056 1057 1058 1059
            # record the mapping between fwd and bwd
            if grad_op_id_to_fwd_op is not None:
                for g_op_desc in grad_op_desc:
                    grad_op_id_to_fwd_op[g_op_desc.original_id(
                    )] = grad_op_id_to_fwd_op[op_desc.original_id()]

1060 1061 1062 1063 1064 1065
            # Set device for grad_op according to forward Op
            if op_desc.has_attr(device_attr_name):
                op_device = op_desc.attr(device_attr_name)
                for g_op_desc in grad_op_desc:
                    g_op_desc._set_attr(device_attr_name, op_device)

M
mapingshuo 已提交
1066 1067 1068 1069 1070
            for key in var_name_dict:
                _rename_arg_(grad_op_desc, key, var_name_dict[key])
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)

M
mapingshuo 已提交
1071
    # 3.d. add sum op for repetitive_outputs
1072 1073
    grad_op_descs = _addup_repetitive_outputs_(
        grad_op_descs, block.idx, grad_op_id_to_fwd_op=grad_op_id_to_fwd_op)
M
mapingshuo 已提交
1074
    # 4) remove no grad branch as it is in _remove_no_grad_branch_
M
mapingshuo 已提交
1075
    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
1076
                                            no_grad_dict[block.idx],
1077
                                            grad_op_id_to_fwd_op, target_vars)
1078 1079
    added_descs = _add_descs_to_block(grad_op_descs, target_block,
                                      grad_op_id_to_fwd_op)
M
mapingshuo 已提交
1080 1081 1082
    return program_stat, checkpoints_name, vars_should_be_hold, recompute_segments


1083 1084 1085 1086 1087
def _get_sub_block_path(sub_block,
                        sub_block_op_desc,
                        no_grad_set,
                        op_path_dict,
                        sub_block_target_names=None):
1088 1089
    """
    Get output vars in subblock which will be assigned to parent block.
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
    It is used to find the grad path in subblock.

    Args:
        sub_block(Block): The sub-block in which to get op path.
        sub_block_op_desc: The op desc of the sub-block op such as 'while', 'conditional_block' and 'recurrent'.
        no_grad_set(set): The set of no grad var name. no_grad_set will be changed.
        op_path_dict(dict): op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
        sub_block_target_names(set): Target var names of sub-block.
    Return:
        The forward op path of sub-block corresponding to backward op.
1102
    """
1103

1104 1105 1106
    assert sub_block_op_desc.has_attr(
        "sub_block") and sub_block.idx == sub_block_op_desc._block_attr_id(
            "sub_block")
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
    assert isinstance(sub_block_target_names, (set, type(None)))

    if sub_block_target_names is None:
        sub_block_target_names = sub_block_op_desc.output_arg_names

    # TODO(huihuangzheng): add support for recurrent op.
    if sub_block_op_desc.type in ["conditional_block", "while"]:
        # Step1: get the output vars in sub-block
        sub_outputs = [
            sub_block._var_recursive(var) for var in sub_block_target_names
        ]
        for var in sub_block_target_names:
1119
            for op_desc in sub_block.ops:
1120
                if var in op_desc.output_arg_names:
1121
                    for name in op_desc.input_arg_names:
1122
                        sub_outputs.append(sub_block._var_recursive(name))
1123

1124 1125
        # Step2: find op path of sub-block
        is_while = sub_block_op_desc.type in ["while"]
1126
        sub_block_op_path = _find_op_path_(sub_block, sub_outputs, [],
1127
                                           no_grad_set, op_path_dict, is_while)
1128 1129 1130 1131
        return sub_block_op_path
    return sub_block.ops


1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
def _is_grad_op_(op):
    op_maker = core.op_proto_and_checker_maker
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    if op_maker.kOpRoleVarAttrName() in op.attr_names and \
            int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):
        return True
    return False


def _rename_grad_name_(name, grad_order):
    return 'grad/' * grad_order + name


1145 1146
def _append_backward_ops_(block,
                          ops,
1147
                          target_vars,
F
fengjiayi 已提交
1148 1149 1150
                          target_block,
                          no_grad_dict,
                          grad_to_var,
1151
                          callbacks=None,
1152
                          input_grad_names_set=None,
1153
                          op_path_dict=None,
1154
                          distop_context=None,
1155 1156
                          rename_var_map=None,
                          grad_op_id_to_fwd_op=None):
1157 1158 1159 1160 1161
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
1162
        ops(Op): the forward operators whose backward ops need to be added
1163
        target_vars(list[Tensor]): the loss vars we want to calculate gradient.
1164
        target_block(Block): the block which is going to hold new generated grad ops
1165
        no_grad_dict(dict):
1166
            key(int)  block index
T
tianshuo78520a 已提交
1167
            val(set) a set of variable names. These variables have no gradient
1168 1169 1170
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
C
chengduo 已提交
1171 1172 1173 1174
        callbacks(callable object): a callable object used to decorate new generated grad ops
        input_grad_names_set(set): this set is used to store the gradients' name which is
            generated by backward ops, and input_grad_names_set can help to prune the unnecessary
            backward ops.
1175 1176 1177
        op_path_dict(dict): op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
1178 1179
        rename_var_map(dict): used to associate target_grad var name with first grad_op input name.
            Only used in for high order gradient.
1180
    """
1181 1182 1183 1184 1185 1186 1187

    # Build the mapping between the forward op and backward op (Only for auto parallel)
    def update_distop_context(distop_context, op_grad_to_var,
                              appending_grad_times):
        distop_context.grad_var_to_var[appending_grad_times].update(
            op_grad_to_var)
        for op_desc in grad_op_desc:
1188 1189
            assert op_desc.original_id(
            ) not in distop_context.grad_op_id_to_op_id
1190 1191
            distop_context.grad_op_id_to_op_id[
                op_desc.original_id()] = op.desc.original_id()
1192

Y
Yang Yang 已提交
1193
    if callbacks is not None:
1194
        assert (isinstance(callbacks, (list, tuple)))
Y
Yang Yang 已提交
1195 1196 1197
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
1198

F
fengjiayi 已提交
1199
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
1200 1201
    grad_op_descs = []
    program = block.program
1202

1203 1204 1205
    if rename_var_map is None:
        rename_var_map = {}
    assert isinstance(rename_var_map, dict)
1206

1207
    # add grad_op_desc by reversed ops
1208
    for op in reversed(ops):
F
fengjiayi 已提交
1209 1210 1211
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
W
Wu Yi 已提交
1212
            sub_block = program.block(op._block_attr_id("sub_block"))
W
Wu Yi 已提交
1213
            grad_sub_block = program._create_block()
W
Wu Yi 已提交
1214
            grad_sub_block._set_forward_block_idx(sub_block.idx)
1215 1216 1217
            # see follwing comments for why set None here.
            pre_input_grad_names_set = copy.copy(input_grad_names_set)
            input_grad_names_set = None
1218
            sub_block_path = op_path_dict[op._block_attr_id("sub_block")]
1219 1220
            _append_backward_ops_(sub_block,
                                  sub_block_path,
1221
                                  target_vars,
1222 1223 1224 1225 1226 1227 1228
                                  grad_sub_block,
                                  no_grad_dict,
                                  grad_to_var,
                                  callbacks,
                                  input_grad_names_set,
                                  op_path_dict,
                                  grad_op_id_to_fwd_op=grad_op_id_to_fwd_op)
1229
            input_grad_names_set = pre_input_grad_names_set
Y
Yu Yang 已提交
1230

W
Wu Yi 已提交
1231
            program._rollback()
F
fengjiayi 已提交
1232 1233
            grad_sub_block_list.append(grad_sub_block.desc)

F
fengjiayi 已提交
1234
        # Getting op's corresponding grad_op
F
fengjiayi 已提交
1235
        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
1236
            op.desc, no_grad_dict[block.idx], grad_sub_block_list)
1237

1238 1239 1240 1241 1242
        # record the mapping between fwd and bwd
        if grad_op_id_to_fwd_op is not None:
            for op_desc in grad_op_desc:
                grad_op_id_to_fwd_op[op_desc.original_id()] = op

1243
        # Build the mapping between the forward op and backward op (Only for auto parallel)
1244
        if distop_context is not None:
1245 1246 1247 1248 1249 1250 1251 1252 1253
            update_distop_context(distop_context, op_grad_to_var,
                                  program._appending_grad_times)
        else:
            default_ctx = getattr(paddle.distributed.auto_parallel.dist_context,
                                  '_g_default_distributed_context', None)
            if default_ctx is not None:
                distop_context = default_ctx.dist_op_context
                update_distop_context(distop_context, op_grad_to_var,
                                      program._appending_grad_times)
Y
Yang Yu 已提交
1254

1255 1256
        # Set device for grad_op according to forward Op
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
1257 1258 1259 1260
        if op.desc.has_attr(device_attr_name):
            op_device = op.desc.attr(device_attr_name)
            for op_desc in grad_op_desc:
                op_desc._set_attr(device_attr_name, op_device)
1261

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
        # Rename internal gradient variables in multiple backward
        # so that they have different names with previous backward.
        # For example:
        #  y = x * x, grad = fluid.gradients(fluid.gradients(y, x) + y * y, x)
        # In second-time backward, gradient variable names of partial
        # forward network (y * y) may be have same names with first-time
        # fluid.gradients(y, x).
        # So rename here before _addup_repetitive_outputs_.
        if program._appending_grad_times > 1:
            for op_desc in grad_op_desc:
T
Tongxin Bai 已提交
1272 1273 1274 1275
                forward_op_inputs = op.desc.input_arg_names()
                for name in op_desc.input_arg_names():
                    if name in rename_var_map and name not in forward_op_inputs:
                        op_desc._rename_input(name, rename_var_map[name])
1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
                for name in op_desc.output_arg_names():
                    if "@GRAD" not in name:
                        continue
                    if block.desc.find_var(name.encode("ascii")):
                        new_name = _rename_grad_name_(
                            name, program._appending_grad_times)
                        op_desc._rename_output(name, new_name)
                        rename_var_map[name] = new_name

                        if name in op_grad_to_var:
1286 1287 1288 1289 1290
                            # Build the mapping between the grad var name and var name (Only for auto parallel)
                            if distop_context is not None:
                                distop_context.grad_var_to_var[
                                    program._appending_grad_times][
                                        new_name] = op_grad_to_var[name]
1291 1292 1293
                            op_grad_to_var[new_name] = op_grad_to_var[name]
                            op_grad_to_var.pop(name)

1294 1295 1296 1297 1298
        # If input_grad_names_set is not None, extend grad_op_descs only when
        # any input grad in outputs of previous grad ops.
        # But this strategy is not suited for while op for some control flow,
        # for example, for while op, the grads maybe generated in next loop.
        if input_grad_names_set is not None:
1299 1300
            is_grad_name = lambda name: name.find(core.grad_var_suffix(
            )) != -1 or name in input_grad_names_set
1301 1302 1303 1304
            is_append_grad = False
            for op_desc in grad_op_desc:
                input_grad_names = [
                    name for name in op_desc.input_arg_names()
1305
                    if is_grad_name(name)
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
                ]
                # some code of gradient ops, like increment, are not very
                # standard, there is no @GRAD in these ops' inputs.
                if len(input_grad_names) == 0:
                    is_append_grad = True
                    break

                if _some_in_set_(input_grad_names, input_grad_names_set):
                    grad_op_descs.append(op_desc)
                    is_append_grad = True
                    for name in op_desc.output_arg_names():
                        input_grad_names_set.add(name)
            if is_append_grad:
                grad_to_var.update(op_grad_to_var)
        else:
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)
F
fengjiayi 已提交
1323

1324 1325 1326 1327 1328
    # record mapping bewteen grad var name and var name (Only for auto parallel)
    grad_var_to_var = None
    if distop_context is not None:
        grad_var_to_var = distop_context.grad_var_to_var[
            program._appending_grad_times]
M
mapingshuo 已提交
1329
    # sum parameter's gradients' var given multiple var gradient
1330 1331 1332 1333 1334
    grad_op_descs = _addup_repetitive_outputs_(
        grad_op_descs,
        block.idx,
        grad_var_to_var,
        grad_op_id_to_fwd_op=grad_op_id_to_fwd_op)
F
fengjiayi 已提交
1335

M
mapingshuo 已提交
1336 1337
    # if all outputs of the grad op are in no_grad_set, then just remove and fill zero
    # if all inputs of the grad op are in no_grad_set, just remove this op
F
fengjiayi 已提交
1338
    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
1339
                                            no_grad_dict[block.idx],
1340
                                            grad_op_id_to_fwd_op, target_vars)
F
fengjiayi 已提交
1341

M
mapingshuo 已提交
1342
    # remove some backward ops
C
chengduo 已提交
1343
    not_need_ops = _find_not_need_ops(grad_op_descs, ops, input_grad_names_set)
M
mapingshuo 已提交
1344

C
chengduo 已提交
1345 1346 1347
    grad_op_descs = [
        op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops
    ]
1348

F
fengjiayi 已提交
1349
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
1350 1351
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
1352
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
1353 1354
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
W
Wu Yi 已提交
1355
        new_op_desc._set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
1356
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
1357
        if callbacks is not None:
1358
            assert (isinstance(callbacks, (list, tuple)))
Y
Yang Yang 已提交
1359 1360
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
1361

F
fengjiayi 已提交
1362

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
def _is_grad_var_(var_name):
    return core.grad_var_suffix() in var_name


# Find the op who holds the sub_block as its "sub_block" attr
def _find_parent_op_(sub_block):
    sub_block_id = sub_block.idx

    if sub_block_id == 0:
        return None

    program = sub_block.program
1375
    for block_id in range(program.num_blocks):
1376
        block_desc = program.block(block_id).desc
1377
        for op_idx in range(block_desc.op_size()):
1378 1379 1380 1381 1382
            op = block_desc.op(op_idx)
            if op.has_attr("sub_block") and op._block_attr_id(
                    "sub_block") == sub_block_id:
                return op

1383
    # NOTE(paddle-dev): When optimizer is added in conditional block,
1384 1385 1386 1387
    # sub_block may not be found.
    return None


F
fengjiayi 已提交
1388
def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
1401
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
1402
    """
1403 1404
    ops_to_remove = []
    '''
1405 1406 1407 1408 1409
    NOTE(paddle-dev): while_grad op may hold some inputs which are not found
    in the parent/forward block, and they are also the outputs of while_grad
    op. These kinds of inputs are the recursive outputs inside while_grad op.
    They should be considered as "already created" when scanning the inner
    ops of while_grad ops.
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
    '''
    parent_op = _find_parent_op_(block)
    parent_op_vars = []
    if parent_op is not None:
        input_args = parent_op.input_arg_names()
        output_args = parent_op.output_arg_names()
        for in_arg in input_args:
            if in_arg in output_args:
                parent_op_vars.append(in_arg)

F
fengjiayi 已提交
1420 1421 1422
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
W
Wu Yi 已提交
1423
            sub_block = block.program.block(op_desc._block_attr_id("sub_block"))
F
fengjiayi 已提交
1424
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441

        grad_var_ins = [
            var for var in op_desc.input_arg_names() if _is_grad_var_(var)
        ]
        grad_var_outs = [
            var for var in op_desc.output_arg_names() if _is_grad_var_(var)
        ]

        inputs = [
            var for var in op_desc.input_arg_names()
            if var != core.empty_var_name()
        ]
        outputs = [
            var for var in op_desc.output_arg_names()
            if var != core.empty_var_name()
        ]

1442
        # If the outputs of grad op is empty, just remove it
1443 1444 1445 1446 1447
        if not outputs:
            ops_to_remove.append(op_idx)
            continue
        else:
            '''
1448
            If the output is not empty and there is any grad input, find
1449 1450 1451 1452 1453
            whether there is any existing input. If not, just remove it.
            '''
            if grad_var_ins:
                existing_grad_var_ins = [
                    var for var in grad_var_ins
1454
                    if block.desc.has_var_recursive(var.encode())
1455
                    or var in parent_op_vars
1456 1457 1458 1459
                ]
                if not existing_grad_var_ins:
                    '''
                    FIXME(paddle-dev, zengjinle): rnn_memory_helper_grad is used
1460 1461
                    in recurrent op. The input of this op does not even exist in
                    the program! Therefore, any dependency analysis would not
1462
                    work to this op! If I do not add the following code, this op
1463 1464
                    would be pruned, and the calculation result would be wrong.
                    Maybe we should re-design this op later...
1465 1466 1467
                    '''
                    if op_desc.type() not in ['rnn_memory_helper_grad']:
                        ops_to_remove.append(op_idx)
1468
                        continue
1469

F
fengjiayi 已提交
1470 1471 1472
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
1473 1474
            if block.desc.has_var_recursive(grad_var_name.encode(
            )) or grad_var_name == core.empty_var_name():
F
fengjiayi 已提交
1475
                continue
1476
            block.desc.var(grad_var_name.encode())
F
fengjiayi 已提交
1477
            new_vars.add(grad_var_name)
1478
            if grad_var_name not in grad_to_var:
F
fengjiayi 已提交
1479 1480 1481
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
H
hong 已提交
1482
        op_desc.check_attrs()
F
fengjiayi 已提交
1483 1484
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
1485

F
fengjiayi 已提交
1486 1487
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
1488
                _infer_var_data_type_shape_(arg, block)
F
update  
fengjiayi 已提交
1489

1490 1491 1492
    for op_idx in reversed(ops_to_remove):
        block.desc._remove_op(op_idx, op_idx + 1)

F
update  
fengjiayi 已提交
1493

1494 1495 1496 1497 1498 1499
def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
W
Wu Yi 已提交
1500
                op_desc._rename_input(name, var_map[name])
1501 1502

        for name in op_desc.output_arg_names():
M
mapingshuo 已提交
1503 1504
            if "@GRAD" not in name:
                continue
1505
            if block.desc.find_var(name.encode("ascii")):
Y
Yu Yang 已提交
1506
                new_name = unique_name.generate(name)
W
Wu Yi 已提交
1507
                op_desc._rename_output(name, new_name)
1508 1509
                var_map[name] = new_name

1510
    for g, ng in var_map.items():
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
1522
        for var in list(block.vars.values()):
1523 1524 1525 1526 1527 1528 1529
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
def _get_son_parent_block_idx_dict(program, current_block_idx):

    son_parent_block_idx_dict = collections.OrderedDict()
    while current_block_idx >= 0:
        parent_block_idx = program.block(current_block_idx).parent_idx
        son_parent_block_idx_dict[current_block_idx] = parent_block_idx
        current_block_idx = parent_block_idx

    return son_parent_block_idx_dict


1541 1542 1543 1544 1545 1546 1547
def _get_no_grad_set_name(no_grad_set):
    no_grad_set_name = set()
    if no_grad_set is not None:
        if isinstance(no_grad_set, (set, list, tuple)):
            for i, no_grad_var in enumerate(no_grad_set):
                if isinstance(no_grad_var, framework.Variable):
                    no_grad_set_name.add(no_grad_var.name)
1548
                elif isinstance(no_grad_var, str):
1549 1550 1551 1552 1553 1554 1555
                    no_grad_set_name.add(no_grad_var)
                else:
                    raise TypeError(
                        "The type of no_grad_set's member must be paddle.fluid.Variable or str, but received %s."
                        % (type(no_grad_var)))
        else:
            raise TypeError(
1556 1557
                "The type of no_grad_set should be set or list or tuple, but received {}"
                .format(type(no_grad_set)))
1558 1559 1560
    return no_grad_set_name


1561
@framework.static_only
M
mapingshuo 已提交
1562 1563 1564 1565
def append_backward(loss,
                    parameter_list=None,
                    no_grad_set=None,
                    callbacks=None,
1566 1567
                    checkpoints=None,
                    distop_context=None):
1568
    """
1569 1570
    :api_attr: Static Graph

1571
    This function appends backward part to main_program.
F
fengjiayi 已提交
1572

1573 1574
    A complete neural network training is made up of forward and backward
    propagation. However, when we configure a network, we only need to
1575 1576
    specify its forward part. This function uses the chain rule to automatically
    generate the backward part according to the forward part.
F
fengjiayi 已提交
1577

1578 1579
    In most cases, users do not need to invoke this function manually.
    It will be automatically invoked by the optimizer's `minimize` function.
F
fengjiayi 已提交
1580

1581
    Parameters:
1582
        loss(Tensor): The loss Tensor of the network.
1583
        parameter_list(list[Tensor|str]|tuple[Tensor|str], optional): List/Tuple of Parameters or Parameter.names
1584
                                           that need to be updated by optimizers.
1585
                                           If it is None, all parameters
F
fengjiayi 已提交
1586
                                           will be updated.
1587
                                           Default: None.
1588 1589
        no_grad_set(set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All Tensors with
1590
                               `stop_gradient=True` from all blocks will
F
fengjiayi 已提交
1591
                               be automatically added into this set.
1592
                               If this parameter is not None, the Tensors or Tensor.names in this set will be added to the default set.
1593
                               Default: None.
1594
        callbacks(list[callable object]|tuple[callable object], optional): List/Tuple of callback functions.
1595
                                               The callbacks are used for
1596 1597 1598 1599 1600 1601
                                               doing some custom jobs during
                                               backward part building. All
                                               callable objects in it will
                                               be invoked once each time a
                                               new gradient operator is added
                                               into the program. The callable
Z
zhangchunle 已提交
1602
                                               object must have two input
1603 1604
                                               parameters: ``block`` and ``context`` .
                                               The ``block`` is the :ref:`api_guide_Block_en` which
1605
                                               the new gradient operator will
1606
                                               be added to. The ``context`` is a
1607
                                               map, whose keys are gradient
1608 1609 1610
                                               Tensor names and values are
                                               corresponding original :ref:`api_guide_tensor_en` .
                                               In addition to this, the ``context``
1611
                                               has another special key-value pair:
1612
                                               the key is string ``__current_op_desc__``
1613 1614 1615
                                               and the value is the op_desc of the
                                               gradient operator who has just
                                               triggered the callable object.
1616
                                               Default: None.
F
fengjiayi 已提交
1617 1618

    Returns:
1619 1620
        list of tuple ( :ref:`api_guide_tensor_en` , :ref:`api_guide_tensor_en` ): Pairs of parameter and its corresponding gradients.
        The key is the parameter and the value is gradient Tensor.
F
fengjiayi 已提交
1621 1622

    Raises:
1623
        AssertionError: If ``loss`` is not an instance of Tensor.
F
fengjiayi 已提交
1624 1625 1626 1627

    Examples:
        .. code-block:: python

1628 1629
            import paddle
            import paddle.nn.functional as F
L
lujun 已提交
1630

1631 1632 1633 1634 1635
            paddle.enable_static()

            x = paddle.static.data(name='x', shape=[None, 13], dtype='int64')
            y = paddle.static.data(name='y', shape=[None, 1], dtype='float32')
            x_emb = paddle.static.nn.embedding(x, size=[100, 256])
1636
            y_predict = paddle.static.nn.fc(x=x_emb, size=1, activation=None, name='my_fc')
1637 1638
            loss = F.square_error_cost(input=y_predict, label=y)
            avg_loss = paddle.mean(loss)
1639 1640

            # Get all weights in main_program, not include bias.
1641
            all_weights = [param for param in paddle.static.default_main_program().block(0).all_parameters() if 'w_' in param.name]
1642 1643 1644
            all_weights_name = [w.name for w in all_weights]

            # return all param_grads needed to be updated if parameter_list set default None.
1645
            p_g_list1 = paddle.static.append_backward(loss=avg_loss)
1646 1647
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

1648 1649
            # return the param_grads corresponding to parameter_list that can be list of param (Tensor).
            p_g_list2 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights)
1650 1651 1652
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # parameter_list can be list of param.name (str).
1653
            p_g_list3 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights_name)
1654 1655
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

1656 1657
            # no_grad_set can be set of Tensors that means grad will be cut off from these Tensors.
            p_g_list4 = paddle.static.append_backward(loss=avg_loss, no_grad_set=set([x_emb]))
1658 1659
            # output: [(my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

1660 1661
            # no_grad_set can be set of Tensor.name when the Tensor is created inside layers and can't be specified explicitly.
            p_g_list5 = paddle.static.append_backward(loss=avg_loss, no_grad_set=set(['my_fc.b_0']))
1662 1663 1664
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # return [] because all param_grads are filtered by no_grad_set.
1665
            p_g_list6 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights, no_grad_set=set(all_weights))
1666

1667
    """
1668 1669 1670
    grad_op_id_to_fwd_op = {
    }  # for cuda graph usage, recording the mapping between grad op original id to fwd op

1671
    check_type(loss, 'loss', framework.Variable,
1672
               'paddle.static.append_backward')
Y
yuyang18 已提交
1673

Y
Fix bug  
yuyang18 已提交
1674 1675
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
M
mapingshuo 已提交
1676
        _find_loss_op_(loss)
Y
Fix bug  
yuyang18 已提交
1677

1678 1679 1680 1681
    loss.op._set_attr(
        core.op_proto_and_checker_maker.kOpRoleAttrName(),
        int(core.op_proto_and_checker_maker.OpRole.Forward)
        | int(core.op_proto_and_checker_maker.OpRole.Loss))
Y
yuyang18 已提交
1682

Y
Yang Yang 已提交
1683
    if callbacks is not None:
1684
        check_type(callbacks, 'callbacks', (list, tuple),
1685
                   'paddle.static.append_backward')
Y
Yu Yang 已提交
1686

F
fengjiayi 已提交
1687
    program = loss.block.program
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
    root_block = program.block(0)
    current_block_idx = program.current_block_idx
    current_block = program.block(current_block_idx)

    is_in_control_flow = current_block_idx != 0

    # Double grad is not supported in sub-block (control flow)
    if not is_in_control_flow:
        # _appending_grad_times used for double grad
        program._appending_grad_times += 1
1698

F
fengjiayi 已提交
1699
    if no_grad_set is None:
1700
        no_grad_set = set()
1701 1702
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
1703
    no_grad_dict = _get_stop_gradients_(program)
1704 1705
    # no_grad_set only contains vars in block 0
    # Todo(liym27): support vars in sub block
1706
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
Y
Yu Yang 已提交
1707

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
    # Currently it is only to support the optimizer.minimize
    # in a switch branch, which can append_backward in a sub_block.
    # Note: while_loop is in control flow, but it makes no sense to call optimizer in while.
    # Todo: report error when it is in while_loop
    if is_in_control_flow:
        # create grad block if in switch control flow.
        target_grad_block = program._create_block(
            parent_idx=current_block.parent_idx)
        target_grad_block._set_forward_block_idx(current_block_idx)
        # after _create_block, program.current_block changes
    else:
        target_grad_block = root_block

    son_parent_block_idx_dict = _get_son_parent_block_idx_dict(
        program, current_block_idx)

    block_fwd_op_num_dict = {}  # block_id: fwd_op_num
    for idx in son_parent_block_idx_dict:
        block_fwd_op_num_dict[idx] = program.block(idx).desc.op_size()
F
fengjiayi 已提交
1727

F
fengjiayi 已提交
1728 1729
    grad_to_var = dict()

1730
    # pass the cuda_graph_attr to the fill_constant which generates the loss_grad
M
mapingshuo 已提交
1731
    op_desc = _create_loss_op_desc_(loss)
1732
    grad_op_id_to_fwd_op[op_desc.original_id()] = loss.op
1733 1734 1735 1736 1737 1738 1739
    target_grad_block.desc.append_op().copy_from(op_desc)

    for block_idx in son_parent_block_idx_dict:
        block = program.block(block_idx)

        block_no_grad_set = set(
            map(_strip_grad_suffix_, no_grad_dict[block_idx]))
1740 1741 1742 1743

        op_path_dict = dict()
        op_path = _find_op_path_(block, [loss], [], block_no_grad_set,
                                 op_path_dict)
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755

        no_grad_vars = _find_no_grad_vars(block, op_path, [loss],
                                          block_no_grad_set)

        block_no_grad_set.update(no_grad_vars)
        no_grad_dict[block_idx].update(
            list(map(_append_grad_suffix_, block_no_grad_set)))

        input_grad_names_set = None
        # For double backward, input_grad_names is used for filtering
        # some non-used gradients op(s).

1756
        # TODO(liym27): need a better design.
1757 1758 1759 1760 1761
        # not support double grad in control flow sub-block now.
        if not is_in_control_flow:
            if program._appending_grad_times > 1:
                input_grad_names_set = set([_append_grad_suffix_(loss.name)])

1762
        # TODO: support _append_backward_ops_with_checkpoints_ in
1763
        #  sub-block (control flow)
J
JZ-LIANG 已提交
1764
        is_recompute = False
1765 1766 1767
        if checkpoints != None and \
                isinstance(checkpoints, list) and \
                len(checkpoints) > 0:
J
JZ-LIANG 已提交
1768
            is_recompute = True
1769
            program_stat, checkpoint_names, \
T
tangwei12 已提交
1770 1771
                vars_should_be_hold, \
                recompute_segments = \
1772 1773 1774
                _append_backward_ops_with_checkpoints_(
                    root_block,
                    op_path,
1775
                    [loss],
1776 1777 1778
                    root_block,
                    no_grad_dict,
                    grad_to_var,
1779 1780
                    checkpoints,
                    grad_op_id_to_fwd_op)
1781 1782 1783 1784
        else:
            _append_backward_ops_(
                block,  # the block where forward ops are in
                op_path,
1785
                [loss],
1786 1787 1788 1789
                target_grad_block,
                no_grad_dict,
                grad_to_var,
                callbacks,
1790
                input_grad_names_set=input_grad_names_set,
1791
                op_path_dict=op_path_dict,
1792
                distop_context=distop_context,
1793
                grad_op_id_to_fwd_op=grad_op_id_to_fwd_op)
1794 1795 1796 1797 1798 1799 1800 1801 1802

    grad_info_map = dict()

    # if in control flow, target_grad_block is a created new block which only contains grad ops,
    # so fwd_op_num is set to 0.
    fwd_op_num = block_fwd_op_num_dict[
        current_block_idx] if not is_in_control_flow else 0

    # Because append_backward may be called multiple times,
1803 1804
    # we need rename the internal gradient variables so that they have
    # different names.
1805
    _rename_grad_(target_grad_block, fwd_op_num, grad_to_var, {})
1806

1807 1808
    _append_backward_vars_(target_grad_block, fwd_op_num, grad_to_var,
                           grad_info_map)
F
fengjiayi 已提交
1809

F
fengjiayi 已提交
1810
    program.current_block_idx = current_block_idx
W
Wu Yi 已提交
1811
    program._sync_with_cpp()
F
fengjiayi 已提交
1812

1813 1814 1815 1816 1817 1818
    # for cuda graph, copy the cuda graph attr from forward op to backward op
    for op in target_grad_block.ops:
        if grad_op_id_to_fwd_op.get(op.desc.original_id(), None) is not None:
            fwd_op = grad_op_id_to_fwd_op[op.desc.original_id()]
            op._cuda_graph_attr = fwd_op._cuda_graph_attr

1819
    if parameter_list is not None:
1820 1821
        check_type(parameter_list, 'parameter_list', (list, tuple, set),
                   'fluid.backward.append_backward')
1822 1823
        parameters = []
        for i, param in enumerate(parameter_list):
1824
            check_type(param, 'parameter_list[%s]' % i,
1825
                       (framework.Variable, str),
1826
                       'fluid.backward.append_backward')
1827 1828
            if isinstance(param, framework.Variable):
                parameters.append(param.name)
1829
            elif isinstance(param, str):
1830
                parameters.append(param)
1831
    else:
F
fengjiayi 已提交
1832
        params = program.global_block().all_parameters()
C
chengduo 已提交
1833
        parameters = [param.name for param in params if param.trainable]
1834

1835
    params_and_grads = []
1836
    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
1837
    for param in parameters:
1838
        if param not in grad_info_map:
F
fengjiayi 已提交
1839
            continue
F
update  
fengjiayi 已提交
1840
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
1841
        grad_block = grad_info[1]
1842 1843 1844 1845
        if not grad_block.has_var(grad_info[0]):
            raise ValueError("grad block[{0}] did not have grad var {1}".format(
                grad_info[1], grad_info[0]))
        # Get the param var from the global block
F
fengjiayi 已提交
1846
        param_var = program.global_block().var(param)
1847
        grad_var = grad_block.var(grad_info[0])
1848 1849 1850 1851 1852
        if not is_in_control_flow:
            if loss.block.has_var(grad_info[0]):
                params_and_grads.append((param_var, grad_var))
            else:
                params_and_grads.append((param_var, None))
1853
        else:
1854
            params_and_grads.append((param_var, grad_var))
Y
yuyang18 已提交
1855 1856 1857 1858

    for p, g in params_and_grads:
        if g is None:
            continue
1859 1860 1861
        ops = grad_block.ops if is_in_control_flow else program.global_block(
        ).ops
        for op in reversed(ops):
Y
yuyang18 已提交
1862 1863 1864 1865 1866 1867 1868
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
1869
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
1870 1871
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
W
Wu Yi 已提交
1872
        g.op._set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
1873

J
JZ-LIANG 已提交
1874 1875 1876 1877
    if is_recompute:
        return params_and_grads, checkpoint_names
    else:
        return params_and_grads
1878 1879 1880 1881 1882


def _as_list(x):
    if x is None:
        return []
1883
    return list(x) if isinstance(x, Sequence) else [x]
1884 1885


1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
def _is_ancestor_block(ancestor_block, block):
    prog = block.program
    ancestor_idx = ancestor_block.idx
    parent_idx = block.parent_idx

    while parent_idx != -1:
        if parent_idx == ancestor_idx:
            return True
        parent_idx = prog.block(parent_idx).parent_idx

    return False


def _get_output_names(cur_block, targets):
    """
    In `cur_block`, get output names those linked to targets.
    NOTE:
    1. `targets` can be in `cur_block`;
    Usually, `targets` is in `cur_block`. However, considering control flow,
    2. `targets` may be in sub-block but `cur_block` is an ancestor of `targets[0].block`;
    3. `targets` may be in the block which is ancestor of `cur_block`.
    """

    block = targets[0].block if targets else cur_block
    current_output_names = set([out.name for out in targets])

1912 1913 1914 1915 1916 1917
    # 1. If `targets` in cur_block or the ancestral block of `cur_block`
    if block.idx == cur_block.idx or _is_ancestor_block(block, cur_block):
        return current_output_names

    # 2. If `cur_block` is an ancestor of `targets[0].block`, run while loop
    prog = cur_block.program
1918 1919 1920 1921 1922 1923 1924 1925 1926
    while block.idx != cur_block.idx:
        assert block.parent_idx != -1
        parent_block = prog.block(block.parent_idx)

        parent_block_output_names = set()
        for op in reversed(block.ops):
            if _some_in_set_(op.desc.output_arg_names(), current_output_names):
                for name in op.desc.input_arg_names():
                    current_output_names.add(name)
1927 1928
                    if not block.desc.find_var(name.encode()) \
                            and parent_block.desc.find_var(name.encode()):
1929 1930 1931 1932 1933 1934 1935 1936
                        parent_block_output_names.add(name)

        block = parent_block
        current_output_names = parent_block_output_names

    return current_output_names


1937 1938 1939
def _find_no_grad_vars(block, op_path, targets, no_grad_set):
    """
    Find the vars which is not used in the program, and
1940
    those vars belong to no_grad_var.
1941
    """
1942
    output_names = _get_output_names(block, targets)
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
    no_grad_var = []
    for i, op in reversed(list(enumerate(op_path))):
        # If the op has sub_block, it is too complicated to find the correct no_grad_var.
        if not op.has_attr("sub_block"):
            for out_var in op.desc.output_arg_names():
                if out_var not in output_names and out_var not in op.desc.input_arg_names(
                ) and not block.vars[out_var].stop_gradient:
                    no_grad_var.append(out_var)
        for name in op.desc.input_arg_names():
            if name not in no_grad_set:
                output_names.add(name)
    return set(no_grad_var)


1957 1958 1959 1960 1961 1962
def _find_op_path_(block,
                   targets,
                   inputs,
                   no_grad_set,
                   op_path_dict=None,
                   is_while=False):
1963
    """
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
    It is used to find the grad path in `block`.

    Args:
        block(Block): The block in which to get op path.
        targets(list[Variable]): The target variables.
        inputs(list[Variable]): The input variables.
        no_grad_set(set): The set of no grad var name. no_grad_set will be changed.
        op_path_dict(dict): op_path_dict will be changed. op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
        is_while(bool): Whether or not `block` is while block
    Return:
        The forward op path of block corresponding to backward op.
1977
    """
1978

1979
    input_names = set([inp.name for inp in inputs])
1980 1981 1982
    output_names = _get_output_names(block, targets)
    if op_path_dict is None:
        op_path_dict = dict()
1983 1984 1985 1986 1987 1988

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
1989 1990 1991
            if _some_in_set_(op.desc.input_arg_names(),
                             input_names) and core.has_non_empty_grad_op_maker(
                                 op.type):
1992 1993 1994 1995 1996 1997 1998
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
1999 2000 2001 2002
        if op.has_attr("sub_block"):
            sub_block_id = op._block_attr_id("sub_block")
            sub_block = block.program.block(sub_block_id)
            sub_block_target_names = output_names & set(op.output_arg_names)
2003 2004
            sub_block_path = _get_sub_block_path(sub_block, op, set(),
                                                 op_path_dict,
2005 2006 2007
                                                 sub_block_target_names)
            op_path_dict[sub_block_id] = sub_block_path

2008 2009 2010
        if _some_in_set_(op.desc.output_arg_names(),
                         output_names) and core.has_non_empty_grad_op_maker(
                             op.type):
2011 2012 2013 2014 2015 2016
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

2017 2018 2019 2020 2021
    if is_while:
        # If block is while block, dealing with op specifically again.
        # TODO(liym27): Consider special types of ops.
        for i, op in reversed(list(enumerate(block.ops))):
            if relevant_op_flags[i] == False \
T
tangwei12 已提交
2022
                    and _some_in_set_(op.desc.output_arg_names(), output_names):
2023 2024
                relevant_op_flags[i] = True

2025 2026 2027 2028 2029 2030 2031
    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
2032
                if name not in input_names and block.vars[name].stop_gradient:
2033 2034 2035 2036 2037 2038 2039
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
2040
    Backpropagate the gradients of targets to inputs.
2041 2042

    Args:
2043 2044 2045
        targets(Tensor|list[Tensor]|tuple[Tensor]): The target Tensors
        inputs(Tensor|list[Tensor]|tuple[Tensor]): The input Tensors
        target_gradients (Tensor|list[Tensor]|tuple[Tensor], optional): The gradient Tensors
2046 2047
            of targets which has the same shape with targets, If None, ones will
            be created for them.
2048 2049
        no_grad_set(set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All Tensors with
2050 2051
                               `stop_gradient=True` from all blocks will
                               be automatically added into this set.
2052
                               If this parameter is not None, the Tensors or Tensor.names in this set will be added to the default set.
2053
                               Default: None.
2054 2055

    Return:
2056 2057
        (list[Tensor]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient Tensor
2058 2059 2060 2061 2062 2063 2064 2065
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
2066 2067
    # increase appending gradients times
    prog._appending_grad_times += 1
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
            "Should have the same number of target_gradients as targets")

    if no_grad_set is None:
        no_grad_set = set()
2079 2080
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
2081
    no_grad_dict = _get_stop_gradients_(prog)
2082
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
2083 2084 2085

    fwd_op_num = block.desc.op_size()

2086 2087
    input_grad_names_set = set()

2088
    target_grad_map = {}
2089
    rename_var_map = {}
2090 2091
    for i, grad in enumerate(target_gradients):
        target = targets[i]
2092
        grad_name = _append_grad_suffix_(target.name)
2093
        if grad is None:
L
lvmengsi 已提交
2094 2095 2096 2097 2098
            target_shape = target.name + '_shape'
            block.desc.append_op().copy_from(
                _create_op_desc_("shape", {'Input': [target.name]},
                                 {"Out": [target_shape]}, {}))
            input_grad_names_set.add(target_shape)
L
liym27 已提交
2099
            op_desc = _create_op_desc_("fill_constant",
L
lvmengsi 已提交
2100
                                       {"ShapeTensor": [target_shape]},
2101
                                       {"Out": [grad_name]}, {
2102
                                           "shape": target.shape,
2103 2104 2105
                                           "value": 1.0,
                                           "dtype": target.dtype,
                                       })
L
liym27 已提交
2106

2107
            block.desc.append_op().copy_from(op_desc)
2108
            input_grad_names_set.add(grad_name)
2109 2110 2111 2112 2113
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
2114 2115
                    "The shapes of target and grad are different: %s %s" %
                    (target.name, grad.name))
2116
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name
2117
            input_grad_names_set.add(grad.name)
2118
            rename_var_map[grad_name] = grad.name
2119 2120

    # For double backward, input_grad_names is used for filter
2121 2122
    # some non-used gradients op. rename_var_map is used to
    # associate target_grad var name with first grad_op input name.
2123 2124
    if prog._appending_grad_times == 1:
        input_grad_names_set = None
2125
        rename_var_map = {}
2126 2127 2128 2129 2130 2131

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
2132 2133 2134 2135

    op_path_dict = dict()
    op_path = _find_op_path_(block, targets, inputs, block_no_grad_set,
                             op_path_dict)
2136 2137 2138 2139 2140 2141

    # find no grad var by op_path
    no_grad_vars = _find_no_grad_vars(block, op_path, targets,
                                      block_no_grad_set)
    block_no_grad_set.update(no_grad_vars)

2142
    no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set)))
2143 2144
    grad_to_var = dict()
    grad_info_map = dict()
2145 2146
    _append_backward_ops_(block,
                          op_path,
2147
                          targets,
2148 2149 2150 2151 2152 2153
                          block,
                          no_grad_dict,
                          grad_to_var,
                          input_grad_names_set=input_grad_names_set,
                          op_path_dict=op_path_dict,
                          rename_var_map=rename_var_map)
2154 2155 2156 2157 2158 2159 2160

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map)

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
W
Wu Yi 已提交
2161
    prog._sync_with_cpp()
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars
2177 2178


2179
@framework.static_only
2180 2181
def gradients(targets, inputs, target_gradients=None, no_grad_set=None):
    """
T
tangwei12 已提交
2182

2183 2184 2185
    Backpropagate the gradients of targets to inputs.

    Args:
2186 2187 2188
        targets (Tensor|list[Tensor]|tuple[Tensor]): The target Tensors.
        inputs (Tensor|list[Tensor]|tuple[Tensor]): The input Tensors.
        target_gradients (Tensor|list[Tensor]|tuple[Tensor], optional): The gradient Tensor
2189 2190
            of targets which has the same shape with targets, If None, ones will
            be created for them.
2191 2192 2193
        no_grad_set (set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
            should be ignored. All Tensors with ``stop_gradient=True`` from all blocks will
            be automatically added into this set. If this parameter is not None, the Tensors or Tensor.names
2194
            in this set will be added to the default set. Default: None.
2195 2196

    Return:
2197 2198
        (list[Tensor]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient Tensor
2199 2200 2201
        will be None.

    Examples:
2202

2203
        .. code-block:: python
2204
          :name: code-example
2205 2206 2207 2208
            import paddle
            import paddle.nn.functional as F

            paddle.enable_static()
2209

2210
            x = paddle.static.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
2211
            x.stop_gradient=False
2212 2213 2214
            y = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False)
            y = F.relu(y)
            z = paddle.static.gradients([y], x)
2215
            print(z) # [var x@GRAD : LOD_TENSOR.shape(-1, 2, 8, 8).dtype(float32).stop_gradient(False)]
2216
    """
2217
    check_type(targets, 'targets', (framework.Variable, list, tuple),
2218
               'paddle.static.gradients')
2219
    check_type(inputs, 'inputs', (framework.Variable, list, tuple),
2220
               'paddle.static.gradients')
2221 2222 2223
    check_type(target_gradients, 'target_gradients',
               (framework.Variable, list, tuple, type(None)),
               'paddle.static.gradients')
2224 2225
    outs = calc_gradient(targets, inputs, target_gradients, no_grad_set)
    return _as_list(outs)
2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288


@framework.static_only
def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None):
    """
    :api_attr: Static Graph

    Backpropagate the gradients of the program and apply the gradients with the given optimizer.

    Args:
        program (Program): The input program.
        optimizer (Optimizer): The optimizer to apply the gradients.
        inputs (Tensor|list[Tensor]|tuple[Tensor], optional): The input Tensors.
            If None, the inputs will be created from the input variables in the given program. Default:None.
        outputs (Tensor|list[Tensor]|tuple[Tensor], optional): The output Tensors.
            If None, the outputs will be created from the output variables in the given program. Default: None.

    Return:
        tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by gradients_with_optimizer and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
            ``fetch_list`` before run, see details in ``Executor``.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.static as static

            paddle.enable_static()

            img = static.data(name='image', shape=[None, 784])
            pred = static.nn.fc(x=img, size=10, activation='relu')
            loss = paddle.mean(pred)
            opt_ops, pram_grads = paddle.fluid.backward.gradients_with_optimizer(static.default_main_program(), opt)
            print(opt_ops)

    """
    check_type(program, 'program', paddle.fluid.Program,
               'paddle.static.gradients_with_optimizer')
    check_type(optimizer, 'optimizer', paddle.optimizer.Optimizer,
               'paddle.static.gradients_with_optimizer')

    if inputs is None or outputs is None:
        in_set = set()
        out_set = set()
        for block in program.blocks:
            for op in block.ops:
                for name in op.input_arg_names:
                    in_set.add(block.vars[name])
                for name in op.output_arg_names:
                    out_set.add(block.vars[name])
        if inputs is None:
            inputs = list(in_set.difference(out_set))
        if outputs is None:
            outputs = list(out_set.difference(in_set))

    grads = gradients(outputs, inputs)

    with program_guard(program, None):
        pram_grads = [(pram, grad) for pram, grad in zip(inputs, grads)
2289 2290
                      if isinstance(pram, paddle.fluid.framework.Parameter)
                      and grad is not None]
2291 2292 2293 2294

        optimize_ops = optimizer.apply_gradients(pram_grads)

    return optimize_ops, pram_grads