backward.py 87.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
T
tangwei12 已提交
16
from .proto import framework_pb2
17

18
from paddle.fluid import framework as framework
19
from paddle.fluid import program_guard
F
update  
fengjiayi 已提交
20
from . import core
F
update  
fengjiayi 已提交
21
import collections
22
import copy
23
import six
24
import logging
M
minqiyang 已提交
25
from .. import compat as cpt
26
from . import unique_name
27
from . import log_helper
L
liym27 已提交
28
import paddle.fluid
29
from .data_feeder import check_type
30
import warnings
M
mapingshuo 已提交
31 32 33 34 35
__all__ = [
    'append_backward',
    'gradients',
]

36 37 38
_logger = log_helper.get_logger(
    __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')

M
mapingshuo 已提交
39 40 41 42 43 44 45 46 47 48 49

class ProgramStats(object):
    def __init__(self, block, ops):
        self.block = block
        self.ops = ops
        self.op_deps = {}  # op-> in_ops, out_ops
        self.var_op_deps = {}  # var as input op, var as output op

    def get_input_nodes(self):
        input_names = []
        for name in self.var_op_deps:
50
            if len(self.var_op_deps[name]["var_as_output_ops"]) == 0 and \
T
tangwei12 已提交
51
                    len(self.var_op_deps[name]["var_as_input_ops"]) > 0:
M
mapingshuo 已提交
52 53 54 55 56 57 58 59 60 61 62
                if self.block.var(name).persistable:
                    continue
                input_names.append(name)
        for op in self.ops:
            if op.desc.type() == "read":
                input_names.extend(op.desc.output_arg_names())
        return input_names

    def get_reserved_vars(self):
        var_name = []
        for op in self.ops:
M
mapingshuo 已提交
63
            if op.desc.type() == "seed":
M
mapingshuo 已提交
64 65 66 67 68 69 70 71 72 73 74
                var_name.extend(op.desc.output_arg_names())
        return var_name

    def get_out_of_subgraph_vars(self, begin_op_idx, end_op_idx):
        var_name = []
        for i in range(begin_op_idx, end_op_idx, 1):
            for name in self.ops[i].desc.output_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_input_ops"]:
                        if idx >= end_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
75 76 77 78 79
            for name in self.ops[i].desc.input_arg_names():
                if name in self.var_op_deps:
                    for idx in self.var_op_deps[name]["var_as_output_ops"]:
                        if idx < begin_op_idx:
                            var_name.append(name)
M
mapingshuo 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
        return var_name

    def is_subgraph(self, var_group1, var_group2):
        # should traverse from var_group1 to var_group2
        # max op idx in var_group2
        # min op idx in var_group1
        min_op_idx = len(self.ops)
        max_op_idx = -1
        for name in var_group1:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group2:
            if name not in self.var_op_deps:
                return False, min_op_idx, max_op_idx
        for name in var_group1:
            op_idx = self.var_op_deps[name]["var_as_input_ops"]
            for idx in op_idx:
                min_op_idx = min(min_op_idx, idx)
        for name in var_group2:
            op_idx = self.var_op_deps[name]["var_as_output_ops"]
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if min_op_idx >= max_op_idx:
            return False, min_op_idx, max_op_idx
J
JZ-LIANG 已提交
104

M
mapingshuo 已提交
105 106
        return True, min_op_idx, max_op_idx

J
JZ-LIANG 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119
    def _update_segment_start(self, min_idx, pre_segment_end_idx):
        """
        persist vars of amp-related cast should be included in recompute segment
        """

        def is_amp_cast(op):
            return op.desc.type() == 'cast' and self.block.var(
                op.desc.input_arg_names()[0]).persistable

        idx_ = min_idx - 1
        updated_min_idx = min_idx
        while idx_ > pre_segment_end_idx:
            if is_amp_cast(self.ops[idx_]):
120
                _logger.info("found amp-cast op: {}, : {}".format(self.ops[
J
JZ-LIANG 已提交
121 122 123 124 125 126 127 128 129
                    idx_].desc.type(), self.ops[idx_].desc.input_arg_names()[
                        0]))
                updated_min_idx = idx_
                idx_ -= 1
            else:
                break

        return updated_min_idx

M
mapingshuo 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
    def build_stats(self):
        for i, op in enumerate(self.ops):
            self.op_deps[i] = {"in_ops": [], "out_ops": []}
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
                    self.op_deps[i]["in_ops"].extend(self.var_op_deps[name][
                        "var_as_output_ops"])
            for j, name in enumerate(op.desc.input_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_input_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = [i]
                    self.var_op_deps[name]["var_as_output_ops"] = []

            for j, name in enumerate(op.desc.output_arg_names()):
                if name in self.var_op_deps:
                    self.var_op_deps[name]["var_as_output_ops"].extend([i])
                else:
                    self.var_op_deps[name] = {}
                    self.var_op_deps[name]["var_as_input_ops"] = []
                    self.var_op_deps[name]["var_as_output_ops"] = [i]

            for op_idx in self.op_deps[i]["in_ops"]:
                self.op_deps[op_idx]["out_ops"].extend([i])

156 157 158 159
    def sort_checkpoints(self, checkpoints_name):
        sorted_checkpoints = []
        for name in checkpoints_name:
            if name not in self.var_op_deps:
160
                _logger.info(
161 162 163 164 165 166 167 168 169 170 171
                    "Recompute Optimizer: deleted %s from checkpoints, because it is not used in paddle program."
                    % name)
            elif self.var_op_deps[name]["var_as_output_ops"] == []:
                # input nodes
                sorted_checkpoints.append((name, -1))
            else:
                sorted_checkpoints.append(
                    (name, max(self.var_op_deps[name]["var_as_output_ops"])))
        sorted_checkpoints = sorted(sorted_checkpoints, key=lambda x: x[1])
        return [x[0] for x in sorted_checkpoints]

M
mapingshuo 已提交
172 173 174 175 176 177
    def modify_forward_desc_for_recompute(self):
        op_types = [op.desc.type() for op in self.ops]
        if "dropout" not in op_types:
            return

        op_idx = 0
178
        while op_idx < len(self.ops):
M
mapingshuo 已提交
179 180 181 182
            op = self.ops[op_idx]
            if op.desc.type() != "dropout":
                op_idx += 1
                continue
183 184 185 186
            # already insert seed op before dropout
            if op.input('Seed') is not None and len(op.input('Seed')) == 1:
                op_idx += 1
                continue
M
mapingshuo 已提交
187 188 189 190 191 192 193 194 195 196 197
            # add a seed op so that the two dropout op can generate same output
            op_unique_name = unique_name.generate("seed")
            var_unique_name = unique_name.generate_with_ignorable_key(".".join(
                [op_unique_name, 'tmp']))
            added_var = self.block.create_var(
                name=var_unique_name,
                dtype='int32',
                type=core.VarDesc.VarType.LOD_TENSOR,
                persistable=False,
                stop_gradient=False)
            seed = 0 if op.attr("fix_seed") is False else int(op.attr("seed"))
198 199 200 201 202 203 204

            op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
            )
            op_device = ""
            if op.desc.has_attr(op_device_attr_name):
                op_device = op.desc.attr(op_device_attr_name)

205
            # Setting the force_cpu of seed to true will make the output of seed in cpu memory,
206
            # reduce the synchronous copy from GPU to CPU in dropout, and reduce the communication hang
M
mapingshuo 已提交
207 208 209 210 211
            added_op = self.block._insert_op(
                index=op.idx,
                type='seed',
                inputs={},
                outputs={'Out': [added_var]},
212 213 214 215 216
                attrs={
                    'seed': seed,
                    'op_device': op_device,
                    'force_cpu': True
                })
M
mapingshuo 已提交
217 218 219 220 221 222 223 224
            self.ops.insert(op_idx, added_op)
            # modify dropout op desc so that it accept a seed var as input
            op.desc.set_input("Seed", [var_unique_name])
            op.desc.remove_attr("fix_seed")
            op.desc.remove_attr("seed")
            self.block._sync_with_cpp()
            op_idx += 2

M
mapingshuo 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237

def _pretty_op_desc_(op_desc, prefix):
    out_s = "%s\tname:[%s]\n%s    \tinputs:[%s]\n%s    \toutputs:[%s]" % \
            (prefix + "_op", str(op_desc.type()), prefix + "_input", " ".join(op_desc.input_arg_names()),
             prefix + "_output", " ".join(op_desc.output_arg_names()))
    return out_s


def _add_needed_descs_to_block(descs, block, main_block, in_memory_vars):
    if len(descs) == 0:
        return []
    result_descs = []
    op_role_attr_name = \
T
tangwei12 已提交
238
        core.op_proto_and_checker_maker.kOpRoleAttrName()
M
mapingshuo 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
        if isinstance(desc, framework.Operator):
            desc = desc.desc
        if isinstance(desc, tuple):
            desc = desc[0]
        is_needed = False
        for name in desc.output_arg_names():
            if main_block.has_var(name) and main_block.var(name).persistable:
                continue
            if name not in in_memory_vars:
                is_needed = True
        if is_needed:
            new_op_desc = block.desc.append_op()
            new_op_desc.copy_from(desc)
            new_op_desc._set_attr(op_role_attr_name, backward)
255 256
            if desc.has_attr('op_device'):
                new_op_desc._set_attr('op_device', desc.attr('op_device'))
M
mapingshuo 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
            result_descs.append(new_op_desc)
    return result_descs


def _add_descs_to_block(descs, block):
    if len(descs) == 0:
        return []
    result_descs = []
    op_role_attr_name = \
        core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    for desc in descs:
        if isinstance(desc, framework.Operator):
            desc = desc.desc
        if isinstance(desc, tuple):
            desc = desc[0]
        new_op_desc = block.desc.append_op()
        new_op_desc.copy_from(desc)
        new_op_desc._set_attr(op_role_attr_name, backward)
276 277
        if desc.has_attr('op_device'):
            new_op_desc._set_attr('op_device', desc.attr('op_device'))
M
mapingshuo 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290
        result_descs.append(new_op_desc)
    return result_descs


def _find_loss_op_(loss):
    for op in reversed(loss.block.ops):
        assert isinstance(op, framework.Operator)
        if len(op.output_arg_names) == 1 and op.output_arg_names[
                0] == loss.name:
            loss.op = op
            break
    if loss.op is None:
        raise ValueError("loss.op is None. Should not happend")
291 292


293 294
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
295
    Traverse all ops in op_descs[begin_idx : end_idx],
296 297
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
298 299 300
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
301
        end_idx = len(op_descs)
302 303 304 305 306 307 308 309 310 311 312 313 314
    if isinstance(op_descs, (list, tuple)):
        for i in range(begin_idx, end_idx):
            op_desc = op_descs[i]
            if isinstance(op_desc, tuple):
                op_desc = op_desc[0]
            op_desc._rename_input(old_name, new_name)
            op_desc._rename_output(old_name, new_name)
    if isinstance(op_descs, collections.OrderedDict):
        for key, value in op_descs.items():
            if isinstance(value, (list, tuple)):
                for op_desc in value:
                    op_desc._rename_input(old_name, new_name)
                    op_desc._rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
315 316


F
fengjiayi 已提交
317
def _create_op_desc_(op_type, inputs, outputs, attrs):
318 319 320
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
321 322
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
M
minqiyang 已提交
323
    for para, args in six.iteritems(inputs):
324 325 326 327 328
        op_desc.set_input(
            para,
            list(
                map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg,
                    args)))
M
minqiyang 已提交
329
    for para, args in six.iteritems(outputs):
330 331 332 333 334
        op_desc.set_output(
            para,
            list(
                map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg,
                    args)))
Y
yuyang18 已提交
335 336

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
337
    op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
Y
yuyang18 已提交
338 339 340 341

    if op_role_attr_name not in attrs:
        attrs[
            op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
342 343
    if op_device_attr_name not in attrs:
        attrs[op_device_attr_name] = ""
M
minqiyang 已提交
344
    for name, val in six.iteritems(attrs):
F
fengjiayi 已提交
345 346 347
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
W
Wu Yi 已提交
348
            op_desc._set_attr(name, val)
F
fengjiayi 已提交
349 350 351
    return op_desc


M
mapingshuo 已提交
352 353 354 355 356 357 358 359 360 361
def _create_loss_op_desc_(loss):
    op_desc = _create_op_desc_(
        "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
            "shape": [1],
            "value": 1.0,
            "dtype": loss.dtype,
            "force_cpu": False,
            core.op_proto_and_checker_maker.kOpRoleAttrName():
            int(core.op_proto_and_checker_maker.OpRole.Backward) |
            int(core.op_proto_and_checker_maker.OpRole.Loss),
362 363
            core.op_proto_and_checker_maker.kOpDeviceAttrName():
            loss.op.attr(core.op_proto_and_checker_maker.kOpDeviceAttrName())
M
mapingshuo 已提交
364 365 366 367
        })
    return op_desc


368
def _infer_var_data_type_shape_(grad_var_name, block):
369
    """
370
    Infer the data type and shape of given grad variable
371
    """
M
minqiyang 已提交
372 373 374 375
    grad_var = block.desc.find_var(cpt.to_bytes(grad_var_name))
    fwd_name = _strip_grad_suffix_(grad_var_name)
    if block.desc.has_var_recursive(cpt.to_bytes(fwd_name)):
        fwd_var = block.desc.find_var_recursive(cpt.to_bytes(fwd_name))
F
fengjiayi 已提交
376
        grad_var.set_dtype(fwd_var.dtype())
377
        grad_var.set_shape(fwd_var.shape())
F
fengjiayi 已提交
378
    else:
379 380 381 382
        # TODO(jiabin): Maybe we should not to this to cause some unexpected error on dtype
        warnings.warn(
            "Set grad var: {} dtype to default FP32, since we can't find its related forward var".
            format(grad_var_name))
383
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
384 385


F
fengjiayi 已提交
386
def _all_in_set_(cands, s):
387 388 389
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
390 391
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
392 393 394 395 396 397
    for c in cands:
        if not c in s:
            return False
    return True


398 399 400 401 402 403
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
M
minqiyang 已提交
404 405
    literal_set = cpt.to_text(s)
    literal_cands = cpt.to_text(cands)
M
minqiyang 已提交
406 407
    for c in literal_cands:
        if c in literal_set:
408 409 410 411
            return True
    return False


F
fengjiayi 已提交
412
def _strip_grad_suffix_(name):
413
    """
M
mapingshuo 已提交
414
    Strip the grad suffix from the given variable name
415 416 417
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
M
minqiyang 已提交
418
    name = cpt.to_text(name)
M
minqiyang 已提交
419
    pos = name.find(core.grad_var_suffix())
420 421 422
    new_name = name[:pos] if pos != -1 else name
    new_pos = name.rfind('grad/')
    return new_name[new_pos + 5:] if new_pos != -1 else new_name
F
fengjiayi 已提交
423 424 425


def _append_grad_suffix_(name):
426 427 428 429
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
M
minqiyang 已提交
430
    return cpt.to_text(name) + core.grad_var_suffix()
F
fengjiayi 已提交
431 432


T
tangwei12 已提交
433 434 435 436 437
def _accumulate_gradients_by_sum_op_(var_name,
                                     renamed_vars,
                                     pending_sum_ops,
                                     op_idx,
                                     op_device=""):
438 439 440 441 442 443
    """
    Use sum op to accumulate_gradients, the gradients are stored in renamed_vars.
    """
    if op_idx not in pending_sum_ops.keys():
        pending_sum_ops[op_idx] = []
    pending_sum_ops[op_idx].append(
T
tangwei12 已提交
444 445 446 447
        _create_op_desc_("sum", {"X": renamed_vars[var_name]}, {
            "Out": [var_name]
        }, {"use_mkldnn": False,
            "op_device": op_device}))
448 449 450
    renamed_vars[var_name] = [var_name]


T
tangwei12 已提交
451 452 453 454 455
def _accumulate_gradients_by_add_ops_(var_name,
                                      renamed_vars,
                                      pending_sum_ops,
                                      op_idx,
                                      op_device=""):
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
    """
    Use several inplace add op to accumulate_gradients, the gradients are stored in renamed_vars.
    """
    if op_idx not in pending_sum_ops.keys():
        pending_sum_ops[op_idx] = []
    out_name = renamed_vars[var_name][0]
    for i in range(1, len(renamed_vars[var_name])):
        x_name = out_name
        y_name = renamed_vars[var_name][i]
        if i != len(renamed_vars[var_name]) - 1:
            out_name = var_name + '@ADD@' + str(i)
        else:
            out_name = var_name
        pending_sum_ops[op_idx].append(
            _create_op_desc_("grad_add", {"X": [x_name],
                                          "Y": [y_name]}, {"Out": [out_name]},
T
tangwei12 已提交
472 473
                             {"use_mkldnn": False,
                              "op_device": op_device}))
474 475 476
    renamed_vars[var_name] = [var_name]


477
def _addup_repetitive_outputs_(op_descs, block_idx):
478 479
    """
    In backward part, an variable may be the output of more than one ops.
F
fengjiayi 已提交
480 481
    And one op may yield its multiple outputs to the same variable.
    In these cases, the variable should be the accumulation of all the outputs.
482 483
    `sum_op`s are added to implement the accumulate.
    """
484
    _MAX_ADD_NUM_ = framework._global_flags()['FLAGS_max_inplace_grad_add']
485 486
    #pending_sum_ops = []
    pending_sum_ops = collections.OrderedDict()
F
update  
fengjiayi 已提交
487
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
488
    renamed_vars = collections.defaultdict(list)
489
    renamed_var_start_idx = collections.defaultdict(list)
490
    var_device = collections.defaultdict(str)
F
fengjiayi 已提交
491
    for idx, op_desc in enumerate(op_descs):
T
tangwei12 已提交
492 493 494 495 496
        op_device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName(
        )
        op_device = ""
        if op_desc.has_attr(op_device_attr_name):
            op_device = op_desc.attr(op_device_attr_name)
F
update  
fengjiayi 已提交
497
        for var_name in op_desc.input_arg_names():
M
mapingshuo 已提交
498 499
            if "@GRAD" not in var_name:
                continue
F
fengjiayi 已提交
500
            if len(renamed_vars[var_name]) > 1:
501
                if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
W
WangXi 已提交
502 503 504
                    _accumulate_gradients_by_sum_op_(var_name, renamed_vars,
                                                     pending_sum_ops, idx,
                                                     var_device[var_name])
505
                else:
W
WangXi 已提交
506 507 508
                    _accumulate_gradients_by_add_ops_(var_name, renamed_vars,
                                                      pending_sum_ops, idx,
                                                      var_device[var_name])
509

F
update  
fengjiayi 已提交
510
        for param_idx, param_name in enumerate(op_desc.output_names()):
F
fengjiayi 已提交
511 512
            arg_names = op_desc.output(param_name)
            for arg_idx, var_name in enumerate(arg_names):
M
mapingshuo 已提交
513 514
                if "@GRAD" not in var_name:
                    continue
T
tangwei12 已提交
515
                # if "@RENAME@" in var_name:
M
mapingshuo 已提交
516
                #    continue
F
fengjiayi 已提交
517 518 519 520 521 522 523
                if var_name == core.empty_var_name(
                ) or var_name in op_desc.input_arg_names():
                    # empty variable or inplace op
                    continue
                if len(renamed_vars[var_name]) == 0:
                    # it's the first time we get the variable
                    renamed_vars[var_name] = [var_name]
524
                    renamed_var_start_idx[var_name] = idx
F
fengjiayi 已提交
525 526
                else:
                    if len(renamed_vars[var_name]) == 1:
527
                        new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \
F
fengjiayi 已提交
528 529 530 531
                            str(var_rename_count[var_name])
                        var_rename_count[var_name] += 1
                        # rename original var_name
                        renamed_vars[var_name][0] = new_name
532 533 534 535 536 537
                        # before change: _rename_arg_(op_descs, var_name,
                        #                             new_name, 0, idx)
                        # rename arg from idx of the first appearance
                        # in backward, not always from 0
                        _rename_arg_(op_descs, var_name, new_name,
                                     renamed_var_start_idx[var_name], idx)
F
fengjiayi 已提交
538 539
                        _rename_arg_(pending_sum_ops, var_name, new_name)

F
update  
fengjiayi 已提交
540 541 542 543 544 545 546 547 548 549 550 551 552
                        for p in op_desc.output_names()[:param_idx]:
                            p_arg_names = op_desc.output(p)
                            if var_name in p_arg_names:
                                op_desc.set_output(p, [
                                    new_name if x == var_name else x
                                    for x in p_arg_names
                                ])

                        arg_names = [
                            new_name if x == var_name else x
                            for x in arg_names[:arg_idx]
                        ] + arg_names[arg_idx:]

553
                    new_name = var_name + "@RENAME@block" + str(block_idx) + "@" + \
T
tangwei12 已提交
554
                        str(var_rename_count[var_name])
F
fengjiayi 已提交
555
                    var_rename_count[var_name] += 1
F
fengjiayi 已提交
556 557 558
                    arg_names[arg_idx] = new_name
                    op_desc.set_output(param_name, arg_names)
                    renamed_vars[var_name].append(new_name)
W
WangXi 已提交
559
                    # record the latest device
560
                    var_device[var_name] = op_device
F
update  
fengjiayi 已提交
561

M
minqiyang 已提交
562
    for var_name, inputs in six.iteritems(renamed_vars):
563 564
        if len(renamed_vars[var_name]) > 1:
            if len(renamed_vars[var_name]) > _MAX_ADD_NUM_:
565 566 567
                _accumulate_gradients_by_sum_op_(
                    var_name, renamed_vars, pending_sum_ops,
                    len(op_descs), var_device[var_name])
568
            else:
569 570 571
                _accumulate_gradients_by_add_ops_(
                    var_name, renamed_vars, pending_sum_ops,
                    len(op_descs), var_device[var_name])
572

F
fengjiayi 已提交
573
    # sum_op descs are sorted according to their insert position
574 575 576 577 578 579 580 581 582 583
    for key, value in collections.OrderedDict(
            reversed(list(pending_sum_ops.items()))).items():

        # NOTE(zhiqiu): Since reversed, the idx of op_descs to be inserted will remains correct.
        # For example, [0, 1, 2], and we want to insert 'a' at idx 1, 'b' at idx 2, and the expected result is [0, 1, 'a', 2, 'b'].
        # If reversed, we first insert 'b' at idx 2, it becomes [0, 1, 2, 'b'], and then insert 'a' at idx 1, it becomes [0, 1, 'a', 2, 'b'].
        # If not reverse, we first insert 'a' at idx 1, it becomes [0, 1, 'a', 2], and then insert 'b' at idx 2, it becomes [0, 1, 'a', 'b', 2].
        idx = key
        for i, op in enumerate(value):
            op_descs.insert(idx + i, op)
F
fengjiayi 已提交
584 585 586 587 588

    return op_descs


def _remove_no_grad_branch_(op_descs, no_grad_set):
589 590 591 592
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
593
        2. all grad inputs of the grad op are in 'no_grad_set'
594
    """
F
fengjiayi 已提交
595 596

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
597 598
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
599
            return True
600 601 602 603
        if _all_in_set_([
                name for name in op_desc.input_arg_names()
                if name.find(core.grad_var_suffix()) != -1
        ], no_grad_set):
F
fengjiayi 已提交
604
            no_grad_set.update(out_arg_names)
F
fengjiayi 已提交
605 606 607
            return True
        return False

F
fengjiayi 已提交
608
    # Remove ops whose outputs are all in no_grad_dict
609 610 611 612
    op_descs = [
        op_desc for op_desc in op_descs
        if not _op_can_be_removed_(op_desc, no_grad_set)
    ]
F
fengjiayi 已提交
613 614
    # Insert fill_zeros_like_op
    to_insert = []
F
fengjiayi 已提交
615
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
616
        for arg in op_desc.input_arg_names():
M
mapingshuo 已提交
617
            # arg is a gradient var name and arg should not have gradient
F
fengjiayi 已提交
618
            if core.grad_var_suffix() in arg and arg in no_grad_set:
619
                x_in = _strip_grad_suffix_(arg)
M
mapingshuo 已提交
620 621
                # the reason should be: arg can be input of another grad op
                # and the op is a not-to-remove op
622 623
                to_insert.append((_create_op_desc_(
                    "fill_zeros_like", {"X": [x_in]}, {"Out": [arg]}, {}), idx))
F
fengjiayi 已提交
624

625
    list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)])
F
fengjiayi 已提交
626 627 628 629

    return op_descs


C
chengduo 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
def _find_not_need_ops(grad_op_descs, forward_ops, input_grad_names_set):
    """
    Pruning Program with Structural Analysis Method of Computational Graph.
    The nodes of the computational graph composed of backward OPS should be
    interconnected. If there are unconnected sub-graphs in the computational graph,
    these sub-graphs should be cut off.

    Args:
        grad_op_descs(list[core.OpDesc]): The candidate backward OpDescs.
        forward_ops(list[Operator]): The forward ops.
        input_grad_names_set(set): this set is used to store the gradients' name
            which is generated by backward ops, and input_grad_names_set can help
            to prune the unnecessary backward ops.

    Return:
645
        (set[core.OpDesc]): A set of OpDescs which should be pruned.
C
chengduo 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
    """

    class Var(object):
        def __init__(self, var_name):
            self.var_name = var_name
            self.gen_op = None
            self.pendding_ops = []

        def set_gen_op(self, gen_op):
            assert isinstance(gen_op, Op)
            assert self.gen_op is None
            self.gen_op = gen_op

        def add_pending_op(self, op):
            assert isinstance(op, Op)
            self.pendding_ops.append(op)

    class Op(object):
        def __init__(self, op_desc):
            self.op_desc = op_desc
            self.inputs = []
            self.outputs = []

        def insert_input(self, var):
            assert isinstance(var, Var)
            self.inputs.append(var)

        def insert_output(self, var):
            assert isinstance(var, Var)
            self.outputs.append(var)

    var_versions = dict()

    def _create_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        else:
            var_versions[name].append(Var(name))
        return var_versions[name][-1]

    def _create_or_get_last_version_node(name):
        if name not in var_versions.keys():
            var_versions[name] = [Var(name)]
        return var_versions[name][-1]

    def _create_op_node(op_desc):
        op_node = Op(op_desc)
        for input in op_desc.input_arg_names():
            var = _create_or_get_last_version_node(name=input)
            var.add_pending_op(op_node)
            op_node.insert_input(var)
        for output in op_desc.output_arg_names():
            var = _create_node(name=output)
            var.set_gen_op(op_node)
            op_node.insert_output(var)
        return op_node

    # Record the forward vars
    forward_vars_set = set() if input_grad_names_set is None else set(
        input_grad_names_set)
    for op in forward_ops:
        forward_vars_set.update(op.desc.input_arg_names())
        forward_vars_set.update(op.desc.output_arg_names())

    # Record the vars which are created during backward and is not generated by op.
    backward_vars_set = set()
    # special_op_nodes is the candidate sub-graph head node.
    special_op_nodes = set()
    for op_desc in grad_op_descs:
        input_set = set(op_desc.input_arg_names())
        # The new_vars are created during backward and is not generated by op.
        new_vars = input_set - forward_vars_set - backward_vars_set
        backward_vars_set.update(op_desc.output_arg_names())

        op_node = _create_op_node(op_desc)
        if len(new_vars) == len(input_set):
            special_op_nodes.add(op_node)

    not_need_op_descs = []
    # Start traversing all candidate sub-graph headers to check whether
    # they are connected to backward computational graphs, and if they are
    # not, list them in not_need_op_descs
    for special_op_node in special_op_nodes:
        op_list = [special_op_node]
        ready_vars = set(special_op_node.inputs)
        remove_ops = True
        candidate_ops = [special_op_node]
        while len(candidate_ops) > 0:
            op_node = candidate_ops.pop(0)
            if _all_in_set_(op_node.inputs, ready_vars):
                for out_var in op_node.outputs:
                    candidate_ops.extend(out_var.pendding_ops)
                    op_list.extend(out_var.pendding_ops)
                ready_vars.update(op_node.outputs)
            else:
                remove_ops = False
                break
        if remove_ops:
            not_need_op_descs.extend([node.op_desc for node in op_list])
745 746 747
    not_need_op_descs_set = set(not_need_op_descs)
    grad_op_descs_set = set(grad_op_descs)
    # If a backward computational graph is simply one sub-graph header, the
748
    # not_need_op_descs will be whole graph, this IF clause avoids it.
749 750 751
    if grad_op_descs_set == not_need_op_descs_set:
        return set()
    return not_need_op_descs_set
C
chengduo 已提交
752 753


Y
Yang Yang 已提交
754 755
def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
M
minqiyang 已提交
756
    proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))
Y
Yang Yang 已提交
757 758 759
    return proto.__str__()


M
mapingshuo 已提交
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
def _append_backward_ops_with_checkpoints_(
        block, ops, target_block, no_grad_dict, grad_to_var, checkpoints):
    """
    Create grad ops with forward ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
        ops(Op): the forward operators whose forward recomputation backward ops need to be added
        target_block(Block): the block which is going to hold new generated grad ops
        no_grad_dict(dict):
            key(int) block index
            val(str): corresponding forward variable name
        checkpoints: variables that a user defined as checkpoint for forward recomputation

    Algorithms:
M
mapingshuo 已提交
775
        0) deal with forward recomputing program descs
M
mapingshuo 已提交
776 777 778 779 780
        1) find ops between checkpoints, i.e. recompute_segments
        2) go through all forward ops and induct all variables that will be hold in memory
            a. variables that are used across segments will be held in memory
            b. output of dropout op will be held in memory
            c. input variables will be held in memory
M
mapingshuo 已提交
781 782 783
        3) go through each recompute_segments, add backward ops with forward recomputation
            a. add ops in current recompute_segment as forward recomputation ops
            b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
784 785
            c. add backward ops of current recomputation ops
            d. add sum op for repetitive_outputs
M
mapingshuo 已提交
786 787
        4) remove no grad branch as it is in _remove_no_grad_branch_
        5) Note1: all appended ops' OpRole are Backward
M
mapingshuo 已提交
788 789
        6) Note2: all variables with new name should be returned so that _append_backward_vars_ can be called
        7) Note3: current forward recomputation backpropagation does not handle programs with subblock
M
mapingshuo 已提交
790
    """
M
mapingshuo 已提交
791 792

    checkpoints_name = [x.name for x in checkpoints]
793
    checkpoints_name = list(set(checkpoints_name))
M
mapingshuo 已提交
794 795
    local_block = block.program._create_block()
    buffer_block = block.program._create_block()
796
    # 0) deal with forward recomputing program descs
M
mapingshuo 已提交
797
    program_stat = ProgramStats(block, ops)
M
mapingshuo 已提交
798
    program_stat.modify_forward_desc_for_recompute()
M
mapingshuo 已提交
799
    program_stat.build_stats()
M
mapingshuo 已提交
800 801

    # 1) find ops between checkpoints, i.e. recompute_segments
802
    checkpoints_name = program_stat.sort_checkpoints(checkpoints_name)
M
mapingshuo 已提交
803 804
    segments = []

805
    if len(checkpoints_name) == 1:
M
mapingshuo 已提交
806 807 808 809 810 811 812
        # only one checkpoint
        max_op_idx = -1
        var_group = [checkpoints_name[0]]
        for name in var_group:
            if name not in program_stat.var_op_deps:
                break
            op_idx = program_stat.var_op_deps[name]["var_as_output_ops"]
J
JZ-LIANG 已提交
813
            # only count the last generate op
M
mapingshuo 已提交
814 815 816 817 818 819
            for idx in op_idx:
                max_op_idx = max(max_op_idx, idx)
        if max_op_idx > 0:
            segments.append([0, max_op_idx + 1])
    else:
        start_idx = 0
J
JZ-LIANG 已提交
820
        pre_segment_end_idx = -1
M
mapingshuo 已提交
821 822 823
        while True:
            if start_idx >= len(checkpoints_name) - 1:
                break
J
JZ-LIANG 已提交
824 825
            # min_idx: checkpoint_1' s input op
            # max_idx: checkpoint_2' s output op
M
mapingshuo 已提交
826 827 828 829
            flag, min_idx, max_idx = program_stat.is_subgraph(
                [checkpoints_name[start_idx]],
                [checkpoints_name[start_idx + 1]])
            if flag:
J
JZ-LIANG 已提交
830 831 832
                # max_idx + 1 since the exact and used segment end idx is max_idx
                min_idx = program_stat._update_segment_start(
                    min_idx, pre_segment_end_idx)
M
mapingshuo 已提交
833
                segments.append([min_idx, max_idx + 1])
834 835 836
            else:
                _logger.info("Could not recompute op range [{}] - [{}] ".format(
                    min_idx, max_idx + 1))
J
JZ-LIANG 已提交
837

M
mapingshuo 已提交
838 839 840 841 842 843
            start_idx += 1

    if segments != [] and segments[0][0] != 0:
        recompute_segments = [[0, segments[0][0]]] + segments
    else:
        recompute_segments = segments
M
mapingshuo 已提交
844

J
JZ-LIANG 已提交
845
    for i, (idx1, idx2) in enumerate(recompute_segments):
846 847
        _logger.info("recompute segment[{}]".format(i))
        _logger.info("segment start op: [{}]: [{}]".format(ops[idx1].desc.type(
J
JZ-LIANG 已提交
848
        ), ops[idx1].desc.input_arg_names()))
849
        _logger.info("segment end op: [{}]: [{}]".format(ops[
J
JZ-LIANG 已提交
850
            idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names()))
851 852
        _logger.info("recompute segment[{}]".format(i))
        _logger.info("segment start op: [{}]: [{}]".format(ops[idx1].desc.type(
J
JZ-LIANG 已提交
853
        ), ops[idx1].desc.input_arg_names()))
854
        _logger.info("segment end op: [{}]: [{}]".format(ops[
J
JZ-LIANG 已提交
855 856
            idx2 - 1].desc.type(), ops[idx2 - 1].desc.input_arg_names()))

M
mapingshuo 已提交
857
    # 2) go through all forward ops and induct all variables that will be hold in memory
M
mapingshuo 已提交
858
    vars_should_be_hold = []
859
    # a. variables that are used across segments will be held in memory
M
mapingshuo 已提交
860 861 862
    for segment in recompute_segments:
        vars_should_be_hold.extend(
            program_stat.get_out_of_subgraph_vars(segment[0], segment[1]))
J
JZ-LIANG 已提交
863 864

    cross_vars = set(vars_should_be_hold) - set(checkpoints_name)
865
    _logger.info("found [{}] vars which cross recompute segment: [{}], better checkpoints might be set to reduce those vars".format( \
J
JZ-LIANG 已提交
866 867
    len(cross_vars), cross_vars))

M
mapingshuo 已提交
868
    # b. output of seed op should be kept in memory
M
mapingshuo 已提交
869
    vars_should_be_hold.extend(program_stat.get_reserved_vars())
M
mapingshuo 已提交
870
    # c. input variables are checkpoints
M
mapingshuo 已提交
871 872 873
    vars_should_be_hold.extend(program_stat.get_input_nodes())
    vars_should_be_hold = list(set(vars_should_be_hold))

M
mapingshuo 已提交
874
    # 3) go through each recompute_segments, add backward ops with forward recomputation
M
mapingshuo 已提交
875 876 877 878 879 880
    grad_op_descs = []
    var_name_dict = {}

    vars_in_memory = vars_should_be_hold + checkpoints_name

    max_calculated_op_position = len(ops)
881
    device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
M
mapingshuo 已提交
882 883 884 885 886 887 888 889 890
    if recompute_segments == []:
        gap_ops = ops[0:max_calculated_op_position]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op.desc, cpt.to_text(no_grad_dict[block.idx]), [])
891 892 893 894 895
            # Set device for grad_op according to forward Op
            if op.desc.has_attr(device_attr_name):
                op_device = op.desc.attr(device_attr_name)
                for op_desc in grad_op_desc:
                    op_desc._set_attr(device_attr_name, op_device)
M
mapingshuo 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909
            added_descs = _add_descs_to_block(grad_op_desc, local_block)
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

    for i, segment in enumerate(recompute_segments[::-1]):
        gap_ops = ops[segment[1]:max_calculated_op_position]
        max_calculated_op_position = segment[0]
        for op in reversed(gap_ops):
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op.desc, cpt.to_text(no_grad_dict[block.idx]), [])
910 911 912 913 914
            # Set device for grad_op according to forward Op
            if op.desc.has_attr(device_attr_name):
                op_device = op.desc.attr(device_attr_name)
                for op_desc in grad_op_desc:
                    op_desc._set_attr(device_attr_name, op_device)
M
mapingshuo 已提交
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
            added_descs = _add_descs_to_block(grad_op_desc, local_block)
            grad_op_descs.extend(added_descs)
            grad_to_var.update(op_grad_to_var)

        ff_ops = ops[segment[0]:segment[1]]
        var_suffix = ".subprog_%d" % i

        for op in ff_ops:
            if op.has_attr("sub_block"):
                raise Exception("Recompute don't support ops with sub_block"
                                "invoke op: %s" %
                                _pretty_op_desc_(op.desc, "with_sub_block"))
            input_and_output_names = []
            input_and_output_names.extend(op.desc.input_arg_names())
            input_and_output_names.extend(op.desc.output_arg_names())
            for name in input_and_output_names:
                if block.var(name).persistable or name in checkpoints_name:
                    continue
                if name in vars_should_be_hold:
                    continue
                if name not in var_name_dict:
                    var_name_dict[name] = name + var_suffix
937 938 939 940 941 942 943 944 945 946 947

                    # we should create the rename var in subprog, otherwise its VarType will be BOOL
                    ref_var = block.program.global_block().var(name)
                    block.create_var(
                        name=var_name_dict[name],
                        shape=ref_var.shape,
                        dtype=ref_var.dtype,
                        type=ref_var.type,
                        persistable=ref_var.persistable,
                        stop_gradient=ref_var.stop_gradient)

M
mapingshuo 已提交
948
        # 3.a. add ops in current recompute_segment as forward recomputation ops
M
mapingshuo 已提交
949 950 951 952
        buffer_descs = _add_needed_descs_to_block(ff_ops, buffer_block, block,
                                                  vars_in_memory)
        added_descs = _add_descs_to_block(ff_ops, local_block)

M
mapingshuo 已提交
953
        # 3.b. rename all non-checkpoint variables in recomputation ops
M
mapingshuo 已提交
954 955 956 957 958 959
        for key in var_name_dict:
            _rename_arg_(buffer_descs, key, var_name_dict[key])

        # added_descs should be in grad_op_descs because it is backward op desc
        grad_op_descs.extend(buffer_descs)

960
        # 3.c. add backward ops for all ops in current segment
M
mapingshuo 已提交
961 962 963
        for op_desc in reversed(added_descs):
            grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
                op_desc, cpt.to_text(no_grad_dict[block.idx]), [])
964 965 966 967 968 969 970

            # Set device for grad_op according to forward Op
            if op_desc.has_attr(device_attr_name):
                op_device = op_desc.attr(device_attr_name)
                for g_op_desc in grad_op_desc:
                    g_op_desc._set_attr(device_attr_name, op_device)

M
mapingshuo 已提交
971 972 973 974 975
            for key in var_name_dict:
                _rename_arg_(grad_op_desc, key, var_name_dict[key])
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)

M
mapingshuo 已提交
976
    # 3.d. add sum op for repetitive_outputs
977
    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs, block.idx)
M
mapingshuo 已提交
978
    # 4) remove no grad branch as it is in _remove_no_grad_branch_
M
mapingshuo 已提交
979 980 981 982 983 984
    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
    added_descs = _add_descs_to_block(grad_op_descs, target_block)
    return program_stat, checkpoints_name, vars_should_be_hold, recompute_segments


985 986 987 988 989
def _get_sub_block_path(sub_block,
                        sub_block_op_desc,
                        no_grad_set,
                        op_path_dict,
                        sub_block_target_names=None):
990 991
    """
    Get output vars in subblock which will be assigned to parent block.
992 993 994 995 996 997 998 999 1000 1001 1002 1003
    It is used to find the grad path in subblock.

    Args:
        sub_block(Block): The sub-block in which to get op path.
        sub_block_op_desc: The op desc of the sub-block op such as 'while', 'conditional_block' and 'recurrent'.
        no_grad_set(set): The set of no grad var name. no_grad_set will be changed.
        op_path_dict(dict): op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
        sub_block_target_names(set): Target var names of sub-block.
    Return:
        The forward op path of sub-block corresponding to backward op.
1004
    """
1005

1006 1007 1008
    assert sub_block_op_desc.has_attr(
        "sub_block") and sub_block.idx == sub_block_op_desc._block_attr_id(
            "sub_block")
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
    assert isinstance(sub_block_target_names, (set, type(None)))

    if sub_block_target_names is None:
        sub_block_target_names = sub_block_op_desc.output_arg_names

    # TODO(huihuangzheng): add support for recurrent op.
    if sub_block_op_desc.type in ["conditional_block", "while"]:
        # Step1: get the output vars in sub-block
        sub_outputs = [
            sub_block._var_recursive(var) for var in sub_block_target_names
        ]
        for var in sub_block_target_names:
1021
            for op_desc in sub_block.ops:
1022
                if var in op_desc.output_arg_names:
1023
                    for name in op_desc.input_arg_names:
1024
                        sub_outputs.append(sub_block._var_recursive(name))
1025

1026 1027
        # Step2: find op path of sub-block
        is_while = sub_block_op_desc.type in ["while"]
1028
        sub_block_op_path = _find_op_path_(sub_block, sub_outputs, [],
1029
                                           no_grad_set, op_path_dict, is_while)
1030 1031 1032 1033
        return sub_block_op_path
    return sub_block.ops


1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
def _is_grad_op_(op):
    op_maker = core.op_proto_and_checker_maker
    backward = core.op_proto_and_checker_maker.OpRole.Backward
    if op_maker.kOpRoleVarAttrName() in op.attr_names and \
            int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):
        return True
    return False


def _rename_grad_name_(name, grad_order):
    return 'grad/' * grad_order + name


1047 1048
def _append_backward_ops_(block,
                          ops,
F
fengjiayi 已提交
1049 1050 1051
                          target_block,
                          no_grad_dict,
                          grad_to_var,
1052
                          callbacks=None,
1053
                          input_grad_names_set=None,
1054 1055
                          op_path_dict=None,
                          distop_context=None):
1056 1057 1058 1059 1060
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
1061
        ops(Op): the forward operators whose backward ops need to be added
1062
        target_block(Block): the block which is going to hold new generated grad ops
1063
        no_grad_dict(dict):
1064
            key(int)  block index
T
tianshuo78520a 已提交
1065
            val(set) a set of variable names. These variables have no gradient
1066 1067 1068
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
C
chengduo 已提交
1069 1070 1071 1072
        callbacks(callable object): a callable object used to decorate new generated grad ops
        input_grad_names_set(set): this set is used to store the gradients' name which is
            generated by backward ops, and input_grad_names_set can help to prune the unnecessary
            backward ops.
1073 1074 1075
        op_path_dict(dict): op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
1076
    """
Y
Yang Yang 已提交
1077
    if callbacks is not None:
1078
        assert (isinstance(callbacks, (list, tuple)))
Y
Yang Yang 已提交
1079 1080 1081
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
1082

F
fengjiayi 已提交
1083
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
1084 1085
    grad_op_descs = []
    program = block.program
1086

1087 1088
    rename_var_map = {}

1089
    # add grad_op_desc by reversed ops
1090
    for op in reversed(ops):
F
fengjiayi 已提交
1091 1092 1093
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
W
Wu Yi 已提交
1094
            sub_block = program.block(op._block_attr_id("sub_block"))
W
Wu Yi 已提交
1095
            grad_sub_block = program._create_block()
W
Wu Yi 已提交
1096
            grad_sub_block._set_forward_block_idx(sub_block.idx)
1097 1098 1099
            # see follwing comments for why set None here.
            pre_input_grad_names_set = copy.copy(input_grad_names_set)
            input_grad_names_set = None
1100
            sub_block_path = op_path_dict[op._block_attr_id("sub_block")]
1101
            _append_backward_ops_(sub_block, sub_block_path, grad_sub_block,
1102
                                  no_grad_dict, grad_to_var, callbacks,
1103
                                  input_grad_names_set, op_path_dict)
1104
            input_grad_names_set = pre_input_grad_names_set
Y
Yu Yang 已提交
1105

W
Wu Yi 已提交
1106
            program._rollback()
F
fengjiayi 已提交
1107 1108
            grad_sub_block_list.append(grad_sub_block.desc)

F
fengjiayi 已提交
1109
        # Getting op's corresponding grad_op
F
fengjiayi 已提交
1110
        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
M
minqiyang 已提交
1111
            op.desc, cpt.to_text(no_grad_dict[block.idx]), grad_sub_block_list)
1112
        # Build the mapping between the forward op and bacckward op (Only for auto parallel)
1113 1114
        if distop_context is not None:
            for op_desc in grad_op_desc:
1115 1116
                assert op_desc.id() not in distop_context.grad_op_id_to_op_id
                distop_context.grad_op_id_to_op_id[op_desc.id()] = op.desc.id()
Y
Yang Yu 已提交
1117

1118 1119
        # Set device for grad_op according to forward Op
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
1120 1121 1122 1123
        if op.desc.has_attr(device_attr_name):
            op_device = op.desc.attr(device_attr_name)
            for op_desc in grad_op_desc:
                op_desc._set_attr(device_attr_name, op_device)
1124

1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
        # Rename internal gradient variables in multiple backward
        # so that they have different names with previous backward.
        # For example:
        #  y = x * x, grad = fluid.gradients(fluid.gradients(y, x) + y * y, x)
        # In second-time backward, gradient variable names of partial
        # forward network (y * y) may be have same names with first-time
        # fluid.gradients(y, x).
        # So rename here before _addup_repetitive_outputs_.
        if program._appending_grad_times > 1:
            for op_desc in grad_op_desc:
                if not _is_grad_op_(op):
                    for name in op_desc.input_arg_names():
                        if name in rename_var_map:
                            op_desc._rename_input(name, rename_var_map[name])
                for name in op_desc.output_arg_names():
                    if "@GRAD" not in name:
                        continue
                    if block.desc.find_var(name.encode("ascii")):
                        new_name = _rename_grad_name_(
                            name, program._appending_grad_times)
                        op_desc._rename_output(name, new_name)
                        rename_var_map[name] = new_name

                        if name in op_grad_to_var:
                            op_grad_to_var[new_name] = op_grad_to_var[name]
                            op_grad_to_var.pop(name)

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
        # If input_grad_names_set is not None, extend grad_op_descs only when
        # any input grad in outputs of previous grad ops.
        # But this strategy is not suited for while op for some control flow,
        # for example, for while op, the grads maybe generated in next loop.
        if input_grad_names_set is not None:
            is_append_grad = False
            for op_desc in grad_op_desc:
                input_grad_names = [
                    name for name in op_desc.input_arg_names()
                    if name.find(core.grad_var_suffix()) != -1
                ]
                # some code of gradient ops, like increment, are not very
                # standard, there is no @GRAD in these ops' inputs.
                if len(input_grad_names) == 0:
                    is_append_grad = True
                    break

                if _some_in_set_(input_grad_names, input_grad_names_set):
                    grad_op_descs.append(op_desc)
                    is_append_grad = True
                    for name in op_desc.output_arg_names():
                        input_grad_names_set.add(name)
            if is_append_grad:
                grad_to_var.update(op_grad_to_var)
        else:
            grad_op_descs.extend(grad_op_desc)
            grad_to_var.update(op_grad_to_var)
F
fengjiayi 已提交
1179

M
mapingshuo 已提交
1180
    # sum parameter's gradients' var given multiple var gradient
1181
    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs, block.idx)
F
fengjiayi 已提交
1182

M
mapingshuo 已提交
1183 1184
    # if all outputs of the grad op are in no_grad_set, then just remove and fill zero
    # if all inputs of the grad op are in no_grad_set, just remove this op
F
fengjiayi 已提交
1185 1186
    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
F
fengjiayi 已提交
1187

M
mapingshuo 已提交
1188
    # remove some backward ops
C
chengduo 已提交
1189
    not_need_ops = _find_not_need_ops(grad_op_descs, ops, input_grad_names_set)
M
mapingshuo 已提交
1190

C
chengduo 已提交
1191 1192 1193
    grad_op_descs = [
        op_desc for op_desc in grad_op_descs if op_desc not in not_need_ops
    ]
1194

F
fengjiayi 已提交
1195
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
1196 1197
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
1198
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
1199 1200
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
1201 1202 1203 1204 1205 1206
        # Rebuild the mapping because new_op_desc has a differnt id (Only for auto parallel)
        if distop_context is not None:
            if op_desc.id() in distop_context.grad_op_id_to_op_id:
                distop_context.grad_op_id_to_op_id[new_op_desc.id(
                )] = distop_context.grad_op_id_to_op_id[op_desc.id()]
                distop_context.grad_op_id_to_op_id.pop(op_desc.id())
W
Wu Yi 已提交
1207
        new_op_desc._set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
1208
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
1209
        if callbacks is not None:
1210
            assert (isinstance(callbacks, (list, tuple)))
Y
Yang Yang 已提交
1211 1212
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
1213

F
fengjiayi 已提交
1214

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
def _is_grad_var_(var_name):
    return core.grad_var_suffix() in var_name


# Find the op who holds the sub_block as its "sub_block" attr
def _find_parent_op_(sub_block):
    sub_block_id = sub_block.idx

    if sub_block_id == 0:
        return None

    program = sub_block.program
    for block_id in six.moves.range(program.num_blocks):
        block_desc = program.block(block_id).desc
        for op_idx in six.moves.range(block_desc.op_size()):
            op = block_desc.op(op_idx)
            if op.has_attr("sub_block") and op._block_attr_id(
                    "sub_block") == sub_block_id:
                return op

1235
    # NOTE(paddle-dev): When optimizer is added in conditional block,
1236 1237 1238 1239
    # sub_block may not be found.
    return None


F
fengjiayi 已提交
1240
def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
1253
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
1254
    """
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
    ops_to_remove = []
    '''
    NOTE(paddle-dev): while_grad op may hold some inputs which are not found 
    in the parent/forward block, and they are also the outputs of while_grad 
    op. These kinds of inputs are the recursive outputs inside while_grad op. 
    They should be considered as "already created" when scanning the inner 
    ops of while_grad ops.  
    '''
    parent_op = _find_parent_op_(block)
    parent_op_vars = []
    if parent_op is not None:
        input_args = parent_op.input_arg_names()
        output_args = parent_op.output_arg_names()
        for in_arg in input_args:
            if in_arg in output_args:
                parent_op_vars.append(in_arg)

F
fengjiayi 已提交
1272 1273 1274
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
W
Wu Yi 已提交
1275
            sub_block = block.program.block(op_desc._block_attr_id("sub_block"))
F
fengjiayi 已提交
1276
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293

        grad_var_ins = [
            var for var in op_desc.input_arg_names() if _is_grad_var_(var)
        ]
        grad_var_outs = [
            var for var in op_desc.output_arg_names() if _is_grad_var_(var)
        ]

        inputs = [
            var for var in op_desc.input_arg_names()
            if var != core.empty_var_name()
        ]
        outputs = [
            var for var in op_desc.output_arg_names()
            if var != core.empty_var_name()
        ]

1294
        # If the outputs of grad op is empty, just remove it
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
        if not outputs:
            ops_to_remove.append(op_idx)
            continue
        else:
            '''
            If the output is not empty and there is any grad input, find 
            whether there is any existing input. If not, just remove it.
            '''
            if grad_var_ins:
                existing_grad_var_ins = [
                    var for var in grad_var_ins
                    if block.desc.has_var_recursive(cpt.to_bytes(var)) or var in
                    parent_op_vars
                ]
                if not existing_grad_var_ins:
                    '''
                    FIXME(paddle-dev, zengjinle): rnn_memory_helper_grad is used
                    in recurrent op. The input of this op does not even exist in 
                    the program! Therefore, any dependency analysis would not 
                    work to this op! If I do not add the following code, this op
                    would be pruned, and the calculation result would be wrong. 
                    Maybe we should re-design this op later...  
                    '''
                    if op_desc.type() not in ['rnn_memory_helper_grad']:
                        ops_to_remove.append(op_idx)
1320
                        continue
1321

F
fengjiayi 已提交
1322 1323 1324
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
M
minqiyang 已提交
1325 1326
            if block.desc.has_var_recursive(cpt.to_bytes(
                    grad_var_name)) or grad_var_name == core.empty_var_name():
F
fengjiayi 已提交
1327
                continue
M
minqiyang 已提交
1328
            block.desc.var(cpt.to_bytes(grad_var_name))
F
fengjiayi 已提交
1329
            new_vars.add(grad_var_name)
1330
            if grad_var_name not in grad_to_var:
F
fengjiayi 已提交
1331 1332 1333 1334 1335
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
1336

F
fengjiayi 已提交
1337 1338
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
1339
                _infer_var_data_type_shape_(arg, block)
F
update  
fengjiayi 已提交
1340

1341 1342 1343
    for op_idx in reversed(ops_to_remove):
        block.desc._remove_op(op_idx, op_idx + 1)

F
update  
fengjiayi 已提交
1344

1345 1346 1347 1348 1349 1350
def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
W
Wu Yi 已提交
1351
                op_desc._rename_input(name, var_map[name])
1352 1353

        for name in op_desc.output_arg_names():
M
mapingshuo 已提交
1354 1355
            if "@GRAD" not in name:
                continue
1356
            if block.desc.find_var(name.encode("ascii")):
Y
Yu Yang 已提交
1357
                new_name = unique_name.generate(name)
W
Wu Yi 已提交
1358
                op_desc._rename_output(name, new_name)
1359 1360
                var_map[name] = new_name

M
minqiyang 已提交
1361
    for g, ng in six.iteritems(var_map):
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
1373
        for var in list(block.vars.values()):
1374 1375 1376 1377 1378 1379 1380
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
def _get_son_parent_block_idx_dict(program, current_block_idx):

    son_parent_block_idx_dict = collections.OrderedDict()
    while current_block_idx >= 0:
        parent_block_idx = program.block(current_block_idx).parent_idx
        son_parent_block_idx_dict[current_block_idx] = parent_block_idx
        current_block_idx = parent_block_idx

    return son_parent_block_idx_dict


1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
def _get_no_grad_set_name(no_grad_set):
    no_grad_set_name = set()
    if no_grad_set is not None:
        if isinstance(no_grad_set, (set, list, tuple)):
            for i, no_grad_var in enumerate(no_grad_set):
                if isinstance(no_grad_var, framework.Variable):
                    no_grad_set_name.add(no_grad_var.name)
                elif isinstance(no_grad_var, six.string_types):
                    no_grad_set_name.add(no_grad_var)
                else:
                    raise TypeError(
                        "The type of no_grad_set's member must be paddle.fluid.Variable or str, but received %s."
                        % (type(no_grad_var)))
        else:
            raise TypeError(
                "The type of no_grad_set should be set or list or tuple, but received {}".
                format(type(no_grad_set)))
    return no_grad_set_name


1412
@framework.static_only
M
mapingshuo 已提交
1413 1414 1415 1416
def append_backward(loss,
                    parameter_list=None,
                    no_grad_set=None,
                    callbacks=None,
1417 1418
                    checkpoints=None,
                    distop_context=None):
1419
    """
1420 1421
    :api_attr: Static Graph

1422
    This function appends backward part to main_program.
F
fengjiayi 已提交
1423

1424 1425
    A complete neural network training is made up of forward and backward
    propagation. However, when we configure a network, we only need to
1426 1427
    specify its forward part. This function uses the chain rule to automatically
    generate the backward part according to the forward part.
F
fengjiayi 已提交
1428

1429 1430
    In most cases, users do not need to invoke this function manually.
    It will be automatically invoked by the optimizer's `minimize` function.
F
fengjiayi 已提交
1431

1432
    Parameters:
1433
        loss(Tensor): The loss Tensor of the network.
1434
        parameter_list(list[Tensor|str]|tuple[Tensor|str], optional): List/Tuple of Parameters or Parameter.names
1435
                                           that need to be updated by optimizers.
1436
                                           If it is None, all parameters
F
fengjiayi 已提交
1437
                                           will be updated.
1438
                                           Default: None.
1439 1440
        no_grad_set(set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All Tensors with
1441
                               `stop_gradient=True` from all blocks will
F
fengjiayi 已提交
1442
                               be automatically added into this set.
1443
                               If this parameter is not None, the Tensors or Tensor.names in this set will be added to the default set.
1444
                               Default: None.
1445
        callbacks(list[callable object]|tuple[callable object], optional): List/Tuple of callback functions.
1446
                                               The callbacks are used for
1447 1448 1449 1450 1451 1452
                                               doing some custom jobs during
                                               backward part building. All
                                               callable objects in it will
                                               be invoked once each time a
                                               new gradient operator is added
                                               into the program. The callable
Z
zhangchunle 已提交
1453
                                               object must have two input
1454 1455
                                               parameters: ``block`` and ``context`` .
                                               The ``block`` is the :ref:`api_guide_Block_en` which
1456
                                               the new gradient operator will
1457
                                               be added to. The ``context`` is a
1458
                                               map, whose keys are gradient
1459 1460 1461
                                               Tensor names and values are
                                               corresponding original :ref:`api_guide_tensor_en` .
                                               In addition to this, the ``context``
1462
                                               has another special key-value pair:
1463
                                               the key is string ``__current_op_desc__``
1464 1465 1466
                                               and the value is the op_desc of the
                                               gradient operator who has just
                                               triggered the callable object.
1467
                                               Default: None.
F
fengjiayi 已提交
1468 1469

    Returns:
1470 1471
        list of tuple ( :ref:`api_guide_tensor_en` , :ref:`api_guide_tensor_en` ): Pairs of parameter and its corresponding gradients.
        The key is the parameter and the value is gradient Tensor.
F
fengjiayi 已提交
1472 1473

    Raises:
1474
        AssertionError: If ``loss`` is not an instance of Tensor.
F
fengjiayi 已提交
1475 1476 1477 1478

    Examples:
        .. code-block:: python

1479 1480
            import paddle
            import paddle.nn.functional as F
L
lujun 已提交
1481

1482 1483 1484 1485 1486
            paddle.enable_static()

            x = paddle.static.data(name='x', shape=[None, 13], dtype='int64')
            y = paddle.static.data(name='y', shape=[None, 1], dtype='float32')
            x_emb = paddle.static.nn.embedding(x, size=[100, 256])
1487
            y_predict = paddle.static.nn.fc(x=x_emb, size=1, activation=None, name='my_fc')
1488 1489
            loss = F.square_error_cost(input=y_predict, label=y)
            avg_loss = paddle.mean(loss)
1490 1491

            # Get all weights in main_program, not include bias.
1492
            all_weights = [param for param in paddle.static.default_main_program().block(0).all_parameters() if 'w_' in param.name]
1493 1494 1495
            all_weights_name = [w.name for w in all_weights]

            # return all param_grads needed to be updated if parameter_list set default None.
1496
            p_g_list1 = paddle.static.append_backward(loss=avg_loss)
1497 1498
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

1499 1500
            # return the param_grads corresponding to parameter_list that can be list of param (Tensor).
            p_g_list2 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights)
1501 1502 1503
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # parameter_list can be list of param.name (str).
1504
            p_g_list3 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights_name)
1505 1506
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

1507 1508
            # no_grad_set can be set of Tensors that means grad will be cut off from these Tensors.
            p_g_list4 = paddle.static.append_backward(loss=avg_loss, no_grad_set=set([x_emb]))
1509 1510
            # output: [(my_fc.w_0, my_fc.w_0@GRAD), (my_fc.b_0, my_fc.b_0@GRAD)]

1511 1512
            # no_grad_set can be set of Tensor.name when the Tensor is created inside layers and can't be specified explicitly.
            p_g_list5 = paddle.static.append_backward(loss=avg_loss, no_grad_set=set(['my_fc.b_0']))
1513 1514 1515
            # output: [(embedding_0.w_0, embedding_0.w_0@GRAD), (my_fc.w_0, my_fc.w_0@GRAD)]

            # return [] because all param_grads are filtered by no_grad_set.
1516
            p_g_list6 = paddle.static.append_backward(loss=avg_loss, parameter_list=all_weights, no_grad_set=set(all_weights))
1517

1518
    """
1519
    check_type(loss, 'loss', framework.Variable,
1520
               'paddle.static.append_backward')
Y
yuyang18 已提交
1521

Y
Fix bug  
yuyang18 已提交
1522 1523
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
M
mapingshuo 已提交
1524
        _find_loss_op_(loss)
Y
Fix bug  
yuyang18 已提交
1525

W
Wu Yi 已提交
1526 1527 1528
    loss.op._set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(),
                      int(core.op_proto_and_checker_maker.OpRole.Forward) |
                      int(core.op_proto_and_checker_maker.OpRole.Loss))
Y
yuyang18 已提交
1529

Y
Yang Yang 已提交
1530
    if callbacks is not None:
1531
        check_type(callbacks, 'callbacks', (list, tuple),
1532
                   'paddle.static.append_backward')
Y
Yu Yang 已提交
1533

F
fengjiayi 已提交
1534
    program = loss.block.program
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
    root_block = program.block(0)
    current_block_idx = program.current_block_idx
    current_block = program.block(current_block_idx)

    is_in_control_flow = current_block_idx != 0

    # Double grad is not supported in sub-block (control flow)
    if not is_in_control_flow:
        # _appending_grad_times used for double grad
        program._appending_grad_times += 1
1545

F
fengjiayi 已提交
1546
    if no_grad_set is None:
1547
        no_grad_set = set()
1548 1549
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
1550
    no_grad_dict = _get_stop_gradients_(program)
1551 1552
    # no_grad_set only contains vars in block 0
    # Todo(liym27): support vars in sub block
1553
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
Y
Yu Yang 已提交
1554

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
    # Currently it is only to support the optimizer.minimize
    # in a switch branch, which can append_backward in a sub_block.
    # Note: while_loop is in control flow, but it makes no sense to call optimizer in while.
    # Todo: report error when it is in while_loop
    if is_in_control_flow:
        # create grad block if in switch control flow.
        target_grad_block = program._create_block(
            parent_idx=current_block.parent_idx)
        target_grad_block._set_forward_block_idx(current_block_idx)
        # after _create_block, program.current_block changes
    else:
        target_grad_block = root_block

    son_parent_block_idx_dict = _get_son_parent_block_idx_dict(
        program, current_block_idx)

    block_fwd_op_num_dict = {}  # block_id: fwd_op_num
    for idx in son_parent_block_idx_dict:
        block_fwd_op_num_dict[idx] = program.block(idx).desc.op_size()
F
fengjiayi 已提交
1574

F
fengjiayi 已提交
1575 1576
    grad_to_var = dict()

M
mapingshuo 已提交
1577
    op_desc = _create_loss_op_desc_(loss)
1578 1579 1580 1581 1582 1583 1584
    target_grad_block.desc.append_op().copy_from(op_desc)

    for block_idx in son_parent_block_idx_dict:
        block = program.block(block_idx)

        block_no_grad_set = set(
            map(_strip_grad_suffix_, no_grad_dict[block_idx]))
1585 1586 1587 1588

        op_path_dict = dict()
        op_path = _find_op_path_(block, [loss], [], block_no_grad_set,
                                 op_path_dict)
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600

        no_grad_vars = _find_no_grad_vars(block, op_path, [loss],
                                          block_no_grad_set)

        block_no_grad_set.update(no_grad_vars)
        no_grad_dict[block_idx].update(
            list(map(_append_grad_suffix_, block_no_grad_set)))

        input_grad_names_set = None
        # For double backward, input_grad_names is used for filtering
        # some non-used gradients op(s).

1601
        # TODO(liym27): need a better design.
1602 1603 1604 1605 1606
        # not support double grad in control flow sub-block now.
        if not is_in_control_flow:
            if program._appending_grad_times > 1:
                input_grad_names_set = set([_append_grad_suffix_(loss.name)])

1607
        # TODO: support _append_backward_ops_with_checkpoints_ in
1608
        #  sub-block (control flow)
J
JZ-LIANG 已提交
1609
        is_recompute = False
1610 1611 1612
        if checkpoints != None and \
                isinstance(checkpoints, list) and \
                len(checkpoints) > 0:
J
JZ-LIANG 已提交
1613
            is_recompute = True
1614
            program_stat, checkpoint_names, \
T
tangwei12 已提交
1615 1616
                vars_should_be_hold, \
                recompute_segments = \
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631
                _append_backward_ops_with_checkpoints_(
                    root_block,
                    op_path,
                    root_block,
                    no_grad_dict,
                    grad_to_var,
                    checkpoints)
        else:
            _append_backward_ops_(
                block,  # the block where forward ops are in
                op_path,
                target_grad_block,
                no_grad_dict,
                grad_to_var,
                callbacks,
1632
                input_grad_names_set=input_grad_names_set,
1633 1634
                op_path_dict=op_path_dict,
                distop_context=distop_context, )
1635 1636 1637 1638 1639 1640 1641 1642 1643

    grad_info_map = dict()

    # if in control flow, target_grad_block is a created new block which only contains grad ops,
    # so fwd_op_num is set to 0.
    fwd_op_num = block_fwd_op_num_dict[
        current_block_idx] if not is_in_control_flow else 0

    # Because append_backward may be called multiple times,
1644 1645
    # we need rename the internal gradient variables so that they have
    # different names.
1646
    _rename_grad_(target_grad_block, fwd_op_num, grad_to_var, {})
1647

1648 1649
    _append_backward_vars_(target_grad_block, fwd_op_num, grad_to_var,
                           grad_info_map)
F
fengjiayi 已提交
1650

F
fengjiayi 已提交
1651
    program.current_block_idx = current_block_idx
W
Wu Yi 已提交
1652
    program._sync_with_cpp()
F
fengjiayi 已提交
1653

1654
    if parameter_list is not None:
1655 1656
        check_type(parameter_list, 'parameter_list', (list, tuple, set),
                   'fluid.backward.append_backward')
1657 1658
        parameters = []
        for i, param in enumerate(parameter_list):
1659 1660 1661
            check_type(param, 'parameter_list[%s]' % i, (framework.Variable,
                                                         six.string_types),
                       'fluid.backward.append_backward')
1662 1663 1664 1665
            if isinstance(param, framework.Variable):
                parameters.append(param.name)
            elif isinstance(param, six.string_types):
                parameters.append(param)
1666
    else:
F
fengjiayi 已提交
1667
        params = program.global_block().all_parameters()
C
chengduo 已提交
1668
        parameters = [param.name for param in params if param.trainable]
1669

1670
    params_and_grads = []
1671
    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
1672
    for param in parameters:
M
minqiyang 已提交
1673
        if cpt.to_text(param) not in grad_info_map:
F
fengjiayi 已提交
1674
            continue
F
update  
fengjiayi 已提交
1675
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
1676
        grad_block = grad_info[1]
1677 1678 1679 1680
        if not grad_block.has_var(grad_info[0]):
            raise ValueError("grad block[{0}] did not have grad var {1}".format(
                grad_info[1], grad_info[0]))
        # Get the param var from the global block
F
fengjiayi 已提交
1681
        param_var = program.global_block().var(param)
1682
        grad_var = grad_block.var(grad_info[0])
1683 1684 1685 1686 1687
        if not is_in_control_flow:
            if loss.block.has_var(grad_info[0]):
                params_and_grads.append((param_var, grad_var))
            else:
                params_and_grads.append((param_var, None))
1688
        else:
1689
            params_and_grads.append((param_var, grad_var))
Y
yuyang18 已提交
1690 1691 1692 1693

    for p, g in params_and_grads:
        if g is None:
            continue
1694 1695 1696
        ops = grad_block.ops if is_in_control_flow else program.global_block(
        ).ops
        for op in reversed(ops):
Y
yuyang18 已提交
1697 1698 1699 1700 1701 1702 1703
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
1704
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
1705 1706
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
W
Wu Yi 已提交
1707
        g.op._set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
1708

J
JZ-LIANG 已提交
1709 1710 1711 1712
    if is_recompute:
        return params_and_grads, checkpoint_names
    else:
        return params_and_grads
1713 1714 1715 1716 1717 1718 1719 1720


def _as_list(x):
    if x is None:
        return []
    return list(x) if isinstance(x, collections.Sequence) else [x]


1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
def _is_ancestor_block(ancestor_block, block):
    prog = block.program
    ancestor_idx = ancestor_block.idx
    parent_idx = block.parent_idx

    while parent_idx != -1:
        if parent_idx == ancestor_idx:
            return True
        parent_idx = prog.block(parent_idx).parent_idx

    return False


def _get_output_names(cur_block, targets):
    """
    In `cur_block`, get output names those linked to targets.
    NOTE:
    1. `targets` can be in `cur_block`;
    Usually, `targets` is in `cur_block`. However, considering control flow,
    2. `targets` may be in sub-block but `cur_block` is an ancestor of `targets[0].block`;
    3. `targets` may be in the block which is ancestor of `cur_block`.
    """

    block = targets[0].block if targets else cur_block
    current_output_names = set([out.name for out in targets])

1747 1748 1749 1750 1751 1752
    # 1. If `targets` in cur_block or the ancestral block of `cur_block`
    if block.idx == cur_block.idx or _is_ancestor_block(block, cur_block):
        return current_output_names

    # 2. If `cur_block` is an ancestor of `targets[0].block`, run while loop
    prog = cur_block.program
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
    while block.idx != cur_block.idx:
        assert block.parent_idx != -1
        parent_block = prog.block(block.parent_idx)

        parent_block_output_names = set()
        for op in reversed(block.ops):
            if _some_in_set_(op.desc.output_arg_names(), current_output_names):
                for name in op.desc.input_arg_names():
                    current_output_names.add(name)
                    if not block.desc.find_var(cpt.to_bytes(name)) \
                            and parent_block.desc.find_var(cpt.to_bytes(name)):
                        parent_block_output_names.add(name)

        block = parent_block
        current_output_names = parent_block_output_names

    return current_output_names


1772 1773 1774
def _find_no_grad_vars(block, op_path, targets, no_grad_set):
    """
    Find the vars which is not used in the program, and
1775
    those vars belong to no_grad_var.
1776
    """
1777
    output_names = _get_output_names(block, targets)
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
    no_grad_var = []
    for i, op in reversed(list(enumerate(op_path))):
        # If the op has sub_block, it is too complicated to find the correct no_grad_var.
        if not op.has_attr("sub_block"):
            for out_var in op.desc.output_arg_names():
                if out_var not in output_names and out_var not in op.desc.input_arg_names(
                ) and not block.vars[out_var].stop_gradient:
                    no_grad_var.append(out_var)
        for name in op.desc.input_arg_names():
            if name not in no_grad_set:
                output_names.add(name)
    return set(no_grad_var)


1792 1793 1794 1795 1796 1797
def _find_op_path_(block,
                   targets,
                   inputs,
                   no_grad_set,
                   op_path_dict=None,
                   is_while=False):
1798
    """
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
    It is used to find the grad path in `block`.

    Args:
        block(Block): The block in which to get op path.
        targets(list[Variable]): The target variables.
        inputs(list[Variable]): The input variables.
        no_grad_set(set): The set of no grad var name. no_grad_set will be changed.
        op_path_dict(dict): op_path_dict will be changed. op_path_dict will be changed.
            key(int) block index
            val(list) the op path of block(index)
        is_while(bool): Whether or not `block` is while block
    Return:
        The forward op path of block corresponding to backward op.
1812
    """
1813

1814
    input_names = set([inp.name for inp in inputs])
1815 1816 1817
    output_names = _get_output_names(block, targets)
    if op_path_dict is None:
        op_path_dict = dict()
1818 1819 1820 1821 1822 1823

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
1824 1825 1826
            if _some_in_set_(
                    op.desc.input_arg_names(),
                    input_names) and core.has_non_empty_grad_op_maker(op.type):
1827 1828 1829 1830 1831 1832 1833
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
1834 1835 1836 1837 1838 1839 1840 1841 1842
        if op.has_attr("sub_block"):
            sub_block_id = op._block_attr_id("sub_block")
            sub_block = block.program.block(sub_block_id)
            sub_block_target_names = output_names & set(op.output_arg_names)
            sub_block_path = _get_sub_block_path(sub_block, op,
                                                 set(), op_path_dict,
                                                 sub_block_target_names)
            op_path_dict[sub_block_id] = sub_block_path

1843 1844 1845
        if _some_in_set_(
                op.desc.output_arg_names(),
                output_names) and core.has_non_empty_grad_op_maker(op.type):
1846 1847 1848 1849 1850 1851
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

1852 1853 1854 1855 1856
    if is_while:
        # If block is while block, dealing with op specifically again.
        # TODO(liym27): Consider special types of ops.
        for i, op in reversed(list(enumerate(block.ops))):
            if relevant_op_flags[i] == False \
T
tangwei12 已提交
1857
                    and _some_in_set_(op.desc.output_arg_names(), output_names):
1858 1859
                relevant_op_flags[i] = True

1860 1861 1862 1863 1864 1865 1866
    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
1867
                if name not in input_names and block.vars[name].stop_gradient:
1868 1869 1870 1871 1872 1873 1874
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
1875
    Backpropagate the gradients of targets to inputs.
1876 1877

    Args:
1878 1879 1880
        targets(Tensor|list[Tensor]|tuple[Tensor]): The target Tensors
        inputs(Tensor|list[Tensor]|tuple[Tensor]): The input Tensors
        target_gradients (Tensor|list[Tensor]|tuple[Tensor], optional): The gradient Tensors
1881 1882
            of targets which has the same shape with targets, If None, ones will
            be created for them.
1883 1884
        no_grad_set(set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
                               should be ignored. All Tensors with
1885 1886
                               `stop_gradient=True` from all blocks will
                               be automatically added into this set.
1887
                               If this parameter is not None, the Tensors or Tensor.names in this set will be added to the default set.
1888
                               Default: None.
1889 1890

    Return:
1891 1892
        (list[Tensor]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient Tensor
1893 1894 1895 1896 1897 1898 1899 1900
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
1901 1902
    # increase appending gradients times
    prog._appending_grad_times += 1
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
            "Should have the same number of target_gradients as targets")

    if no_grad_set is None:
        no_grad_set = set()
1914 1915
    else:
        no_grad_set = _get_no_grad_set_name(copy.copy(no_grad_set))
1916
    no_grad_dict = _get_stop_gradients_(prog)
1917
    no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set)))
1918 1919 1920

    fwd_op_num = block.desc.op_size()

1921 1922
    input_grad_names_set = set()

1923 1924 1925 1926 1927
    target_grad_map = {}
    for i, grad in enumerate(target_gradients):
        target = targets[i]
        if grad is None:
            grad_name = _append_grad_suffix_(target.name)
L
lvmengsi 已提交
1928 1929 1930 1931 1932
            target_shape = target.name + '_shape'
            block.desc.append_op().copy_from(
                _create_op_desc_("shape", {'Input': [target.name]},
                                 {"Out": [target_shape]}, {}))
            input_grad_names_set.add(target_shape)
L
liym27 已提交
1933
            op_desc = _create_op_desc_("fill_constant",
L
lvmengsi 已提交
1934
                                       {"ShapeTensor": [target_shape]},
1935
                                       {"Out": [grad_name]}, {
1936
                                           "shape": target.shape,
1937 1938 1939
                                           "value": 1.0,
                                           "dtype": target.dtype,
                                       })
L
liym27 已提交
1940

1941
            block.desc.append_op().copy_from(op_desc)
1942
            input_grad_names_set.add(grad_name)
1943 1944 1945 1946 1947 1948 1949 1950
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
                    "The shapes of target and grad are different: %s %s" % (
                        target.name, grad.name))
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name
1951 1952 1953 1954 1955 1956
            input_grad_names_set.add(grad.name)

    # For double backward, input_grad_names is used for filter
    # some non-used gradients op.
    if prog._appending_grad_times == 1:
        input_grad_names_set = None
1957 1958 1959 1960 1961 1962

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
1963 1964 1965 1966

    op_path_dict = dict()
    op_path = _find_op_path_(block, targets, inputs, block_no_grad_set,
                             op_path_dict)
1967 1968 1969 1970 1971 1972

    # find no grad var by op_path
    no_grad_vars = _find_no_grad_vars(block, op_path, targets,
                                      block_no_grad_set)
    block_no_grad_set.update(no_grad_vars)

1973
    no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set)))
1974 1975
    grad_to_var = dict()
    grad_info_map = dict()
1976 1977 1978 1979 1980 1981
    _append_backward_ops_(
        block,
        op_path,
        block,
        no_grad_dict,
        grad_to_var,
1982 1983
        input_grad_names_set=input_grad_names_set,
        op_path_dict=op_path_dict)
1984 1985 1986 1987 1988 1989 1990

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map)

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
W
Wu Yi 已提交
1991
    prog._sync_with_cpp()
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars
2007 2008


2009
@framework.static_only
2010 2011
def gradients(targets, inputs, target_gradients=None, no_grad_set=None):
    """
2012
    :api_attr: Static Graph
T
tangwei12 已提交
2013

2014 2015 2016
    Backpropagate the gradients of targets to inputs.

    Args:
2017 2018 2019
        targets (Tensor|list[Tensor]|tuple[Tensor]): The target Tensors.
        inputs (Tensor|list[Tensor]|tuple[Tensor]): The input Tensors.
        target_gradients (Tensor|list[Tensor]|tuple[Tensor], optional): The gradient Tensor
2020 2021
            of targets which has the same shape with targets, If None, ones will
            be created for them.
2022 2023 2024
        no_grad_set (set[Tensor|str], optional): Set of Tensors or Tensor.names in the :ref:`api_guide_Block_en` 0 whose gradients
            should be ignored. All Tensors with ``stop_gradient=True`` from all blocks will
            be automatically added into this set. If this parameter is not None, the Tensors or Tensor.names
2025
            in this set will be added to the default set. Default: None.
2026 2027

    Return:
2028 2029
        (list[Tensor]): A list of gradients for inputs
        If an input does not affect targets, the corresponding gradient Tensor
2030 2031 2032 2033 2034
        will be None.

    Examples:
        .. code-block:: python

2035 2036 2037 2038
            import paddle
            import paddle.nn.functional as F

            paddle.enable_static()
2039

2040
            x = paddle.static.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
2041
            x.stop_gradient=False
2042 2043 2044 2045
            y = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False)
            y = F.relu(y)
            z = paddle.static.gradients([y], x)
            print(z) # [var x@GRAD : fluid.VarType.LOD_TENSOR.shape(-1L, 2L, 8L, 8L).astype(VarType.FP32)]
2046
    """
2047
    check_type(targets, 'targets', (framework.Variable, list, tuple),
2048
               'paddle.static.gradients')
2049
    check_type(inputs, 'inputs', (framework.Variable, list, tuple),
2050
               'paddle.static.gradients')
2051
    check_type(target_gradients, 'target_gradients', (
2052
        framework.Variable, list, tuple, type(None)), 'paddle.static.gradients')
2053

2054 2055
    outs = calc_gradient(targets, inputs, target_gradients, no_grad_set)
    return _as_list(outs)
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124


@framework.static_only
def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None):
    """
    :api_attr: Static Graph

    Backpropagate the gradients of the program and apply the gradients with the given optimizer.

    Args:
        program (Program): The input program.
        optimizer (Optimizer): The optimizer to apply the gradients.
        inputs (Tensor|list[Tensor]|tuple[Tensor], optional): The input Tensors.
            If None, the inputs will be created from the input variables in the given program. Default:None.
        outputs (Tensor|list[Tensor]|tuple[Tensor], optional): The output Tensors.
            If None, the outputs will be created from the output variables in the given program. Default: None.

    Return:
        tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by gradients_with_optimizer and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
            ``fetch_list`` before run, see details in ``Executor``.

    Examples:
        .. code-block:: python

            import paddle
            import paddle.static as static

            paddle.enable_static()

            img = static.data(name='image', shape=[None, 784])
            pred = static.nn.fc(x=img, size=10, activation='relu')
            loss = paddle.mean(pred)
            opt_ops, pram_grads = paddle.fluid.backward.gradients_with_optimizer(static.default_main_program(), opt)
            print(opt_ops)

    """
    check_type(program, 'program', paddle.fluid.Program,
               'paddle.static.gradients_with_optimizer')
    check_type(optimizer, 'optimizer', paddle.optimizer.Optimizer,
               'paddle.static.gradients_with_optimizer')

    if inputs is None or outputs is None:
        in_set = set()
        out_set = set()
        for block in program.blocks:
            for op in block.ops:
                for name in op.input_arg_names:
                    in_set.add(block.vars[name])
                for name in op.output_arg_names:
                    out_set.add(block.vars[name])
        if inputs is None:
            inputs = list(in_set.difference(out_set))
        if outputs is None:
            outputs = list(out_set.difference(in_set))

    grads = gradients(outputs, inputs)

    with program_guard(program, None):
        pram_grads = [(pram, grad) for pram, grad in zip(inputs, grads)
                      if isinstance(pram, paddle.fluid.framework.Parameter) and
                      grad is not None]

        optimize_ops = optimizer.apply_gradients(pram_grads)

    return optimize_ops, pram_grads