backward.py 29.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from paddle.fluid import framework as framework
F
update  
fengjiayi 已提交
16
from . import core
F
update  
fengjiayi 已提交
17
import collections
18
import copy
Y
Yu Yang 已提交
19
import unique_name
20

Y
yuyang18 已提交
21
__all__ = ['append_backward']
22 23


24 25
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
26
    Traverse all ops in op_descs[begin_idx : end_idx],
27 28
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
29 30 31
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
32
        end_idx = len(op_descs)
F
update  
fengjiayi 已提交
33
    for i in range(begin_idx, end_idx):
34
        op_desc = op_descs[i]
F
fengjiayi 已提交
35 36 37 38
        if isinstance(op_desc, tuple):
            op_desc = op_desc[0]
        op_desc.rename_input(old_name, new_name)
        op_desc.rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
39 40


F
fengjiayi 已提交
41
def _create_op_desc_(op_type, inputs, outputs, attrs):
42 43 44
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
45 46 47 48 49 50
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
    for para, args in inputs.iteritems():
        op_desc.set_input(para, args)
    for para, args in outputs.iteritems():
        op_desc.set_output(para, args)
Y
yuyang18 已提交
51 52 53 54 55 56

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()

    if op_role_attr_name not in attrs:
        attrs[
            op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
F
fengjiayi 已提交
57 58 59 60 61 62 63 64
    for name, val in attrs.iteritems():
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
            op_desc.set_attr(name, val)
    return op_desc


65 66 67 68 69 70
def _infer_var_data_type_(grad_var_name, block):
    """
    Infer the data type of given grad variable
    """
    grad_var = block.desc.find_var(grad_var_name.encode("ascii"))
    fwd_name = _strip_grad_suffix_(grad_var_name.encode("ascii"))
F
fengjiayi 已提交
71 72 73 74
    if block.desc.has_var_recursive(fwd_name):
        fwd_var = block.desc.find_var_recursive(fwd_name.encode("ascii"))
        grad_var.set_dtype(fwd_var.dtype())
    else:
75
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
76 77


F
fengjiayi 已提交
78
def _all_in_set_(cands, s):
79 80 81
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
82 83
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
84 85 86 87 88 89
    for c in cands:
        if not c in s:
            return False
    return True


90 91 92 93 94 95 96 97 98 99 100 101
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
    for c in cands:
        if c in s:
            return True
    return False


F
fengjiayi 已提交
102
def _strip_grad_suffix_(name):
103 104 105 106 107
    """
    Strip the grad suffix from the given varibale name
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
F
fengjiayi 已提交
108 109
    pos = name.find(core.grad_var_suffix())
    return name[:pos] if pos != -1 else name
F
fengjiayi 已提交
110 111 112


def _append_grad_suffix_(name):
113 114 115 116
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
F
fengjiayi 已提交
117 118 119
    return name + core.grad_var_suffix()


F
fengjiayi 已提交
120
def _addup_repetitive_outputs_(op_descs):
121 122
    """
    In backward part, an variable may be the output of more than one ops.
F
fengjiayi 已提交
123 124
    And one op may yield its multiple outputs to the same variable.
    In these cases, the variable should be the accumulation of all the outputs.
125 126
    `sum_op`s are added to implement the accumulate.
    """
F
update  
fengjiayi 已提交
127 128
    pending_sum_ops = []
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
129 130
    renamed_vars = collections.defaultdict(list)
    for idx, op_desc in enumerate(op_descs):
F
update  
fengjiayi 已提交
131
        for var_name in op_desc.input_arg_names():
F
fengjiayi 已提交
132
            if len(renamed_vars[var_name]) > 1:
133 134 135
                pending_sum_ops.append((_create_op_desc_(
                    "sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]},
                    {"use_mkldnn": False}), idx))
F
fengjiayi 已提交
136
                renamed_vars[var_name] = [var_name]
F
update  
fengjiayi 已提交
137
        for param_idx, param_name in enumerate(op_desc.output_names()):
F
fengjiayi 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
            arg_names = op_desc.output(param_name)
            for arg_idx, var_name in enumerate(arg_names):
                if var_name == core.empty_var_name(
                ) or var_name in op_desc.input_arg_names():
                    # empty variable or inplace op
                    continue
                if len(renamed_vars[var_name]) == 0:
                    # it's the first time we get the variable
                    renamed_vars[var_name] = [var_name]
                else:
                    if len(renamed_vars[var_name]) == 1:
                        new_name = var_name + "@RENAME@" + \
                            str(var_rename_count[var_name])
                        var_rename_count[var_name] += 1
                        # rename original var_name
                        renamed_vars[var_name][0] = new_name
                        _rename_arg_(op_descs, var_name, new_name, 0, idx)
                        _rename_arg_(pending_sum_ops, var_name, new_name)

F
update  
fengjiayi 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169
                        for p in op_desc.output_names()[:param_idx]:
                            p_arg_names = op_desc.output(p)
                            if var_name in p_arg_names:
                                op_desc.set_output(p, [
                                    new_name if x == var_name else x
                                    for x in p_arg_names
                                ])

                        arg_names = [
                            new_name if x == var_name else x
                            for x in arg_names[:arg_idx]
                        ] + arg_names[arg_idx:]

F
update  
fengjiayi 已提交
170
                    new_name = var_name + "@RENAME@" + \
F
fengjiayi 已提交
171
                        str(var_rename_count[var_name])
F
fengjiayi 已提交
172
                    var_rename_count[var_name] += 1
F
fengjiayi 已提交
173 174 175
                    arg_names[arg_idx] = new_name
                    op_desc.set_output(param_name, arg_names)
                    renamed_vars[var_name].append(new_name)
F
update  
fengjiayi 已提交
176

F
fengjiayi 已提交
177
    for var_name, inputs in renamed_vars.iteritems():
F
update  
fengjiayi 已提交
178
        if len(inputs) > 1:
179 180 181
            pending_sum_ops.append(
                (_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]},
                                  {"use_mkldnn": False}), len(op_descs)))
F
fengjiayi 已提交
182
    # sum_op descs are sorted according to their insert position
F
update  
fengjiayi 已提交
183
    for p in reversed(pending_sum_ops):
F
fengjiayi 已提交
184 185 186 187 188 189
        op_descs.insert(p[1], p[0])

    return op_descs


def _remove_no_grad_branch_(op_descs, no_grad_set):
190 191 192 193
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
194
        2. all grad inputs of the grad op are in 'no_grad_set'
195
    """
F
fengjiayi 已提交
196 197

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
198 199
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
200 201 202 203
            return True
        if _all_in_set_(
                filter(lambda name: name.find(core.grad_var_suffix()) != -1,
                       op_desc.input_arg_names()), no_grad_set):
F
fengjiayi 已提交
204
            no_grad_set.update(out_arg_names)
F
fengjiayi 已提交
205 206 207
            return True
        return False

F
fengjiayi 已提交
208 209
    # Remove ops whose outputs are all in no_grad_dict
    op_descs = filter(
F
fengjiayi 已提交
210
        lambda op_desc: not _op_can_be_removed_(op_desc, no_grad_set), op_descs)
F
fengjiayi 已提交
211 212
    # Insert fill_zeros_like_op
    to_insert = []
F
fengjiayi 已提交
213
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
214
        for arg in op_desc.input_arg_names():
F
fengjiayi 已提交
215 216 217
            if core.grad_var_suffix() in arg and arg in no_grad_set:
                to_insert.append((_create_op_desc_("fill_zeros_like", {
                    "X": [_strip_grad_suffix_(arg)]
218
                }, {"Out": [arg]}, {}), idx))
F
fengjiayi 已提交
219 220 221 222 223 224

    map(lambda p: op_descs.insert(p[1], p[0]), reversed(to_insert))

    return op_descs


Y
Yang Yang 已提交
225 226 227 228 229 230 231 232 233
import proto.framework_pb2 as framework_pb2


def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
    proto = framework_pb2.OpDesc.FromString(str(protostr))
    return proto.__str__()


234 235 236 237 238 239 240 241 242 243
def _callback_lookup_(op):
    """
    Only used in _append_backward_ops_
    Build and returns a callback function for certain op. For example

    parallel_do:           AllReduce

    :param op:
    :return: callback function
    """
Y
Yang Yang 已提交
244
    if op.type == 'parallel_do' and op.attr('use_nccl'):
Q
qiaolongfei 已提交
245
        all_vars = op.block.vars
246
        param_names = set(op.input('parameters'))
Q
qiaolongfei 已提交
247 248
        param_names = filter(lambda name: all_vars[name].stop_gradient is False,
                             param_names)
249 250 251
        param_grad_names = [n + "@GRAD" for n in param_names]

        class ParallelDoCallBack(object):
Y
Yang Yang 已提交
252
            def __init__(self, param_grad_names, parallel_scopes_name):
253 254
                self.has_inserted_nccl_init = False
                self.param_grad_names = param_grad_names
Y
Yang Yang 已提交
255
                self.parallel_scopes_name = parallel_scopes_name
256 257

            def __call__(self, block, context):
Y
Yang Yang 已提交
258
                if not self.has_inserted_nccl_init:
Y
Yang Yang 已提交
259
                    op_desc = _create_op_desc_(
Y
Yang Yang 已提交
260 261
                        "ncclInit",
                        {"parallel_scopes": self.parallel_scopes_name},
Y
Yang Yang 已提交
262 263 264
                        {"Communicator": ['nccl_com__do_not_change_']}, {})
                    block.program.global_block().desc.append_op().copy_from(
                        op_desc)
Y
Yang Yang 已提交
265 266 267 268 269
                    self.has_inserted_nccl_init = True

                current_op_desc = context["__current_op_desc__"]
                for o_param in current_op_desc.output_names():
                    for o_argu in current_op_desc.output(o_param):
270
                        if o_argu in self.param_grad_names:
Y
Yang Yang 已提交
271 272
                            allreduce_out_name = o_argu + "__nccl_all_reduce__"
                            op_desc = _create_op_desc_(
C
chengduoZH 已提交
273 274
                                "ncclReduce",
                                {
Y
Yang Yang 已提交
275
                                    "X": [o_argu],
Y
Yang Yang 已提交
276 277
                                    "Communicator":
                                    ['nccl_com__do_not_change_']
C
chengduoZH 已提交
278 279 280 281
                                },
                                {"Out": [allreduce_out_name]},
                                {"reduction": "ncclSum",
                                 "root": 0}, )
Y
Yang Yang 已提交
282 283 284 285 286 287
                            block.desc.append_op().copy_from(op_desc)

                            op_desc = _create_op_desc_(
                                "assign", {"X": [allreduce_out_name]},
                                {"Out": [o_argu]}, {})
                            block.desc.append_op().copy_from(op_desc)
288

Y
Yang Yang 已提交
289 290
        return ParallelDoCallBack(param_grad_names,
                                  op.output("parallel_scopes"))
291 292 293 294
    else:
        return None


295 296
def _append_backward_ops_(block,
                          ops,
F
fengjiayi 已提交
297 298 299
                          target_block,
                          no_grad_dict,
                          grad_to_var,
Y
Yang Yang 已提交
300
                          callbacks=None):
301 302 303 304 305
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
306
        ops(Op): the forward operators whose backward ops need to be added
307
        target_block(Block): the block which is going to hold new generated grad ops
308
        no_grad_dict(dict):
309 310 311 312 313
            key(int)  block index
            val(set) a set of varibale names. These varibales have no gradient
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
F
fengjiayi 已提交
314
        callback(callable object): a callable object used to decorate new generated grad ops
315
    """
Y
Yang Yang 已提交
316
    if callbacks is not None:
Y
Yang Yang 已提交
317 318 319 320
        assert (isinstance(callbacks, list))
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
321

F
fengjiayi 已提交
322
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
323 324
    grad_op_descs = []
    program = block.program
325
    for op in reversed(ops):
F
fengjiayi 已提交
326 327 328 329
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
            sub_block = program.block(op.block_attr("sub_block"))
Y
Yu Yang 已提交
330
            grad_sub_block = program.create_block()
W
Wu Yi 已提交
331
            grad_sub_block._set_forward_block_idx(sub_block.idx)
Y
Yang Yang 已提交
332 333 334 335 336 337 338 339
            cb = _callback_lookup_(op)
            if cb is not None:
                if callbacks is None:
                    new_callbacks = [cb]
                else:
                    new_callbacks = callbacks + [_callback_lookup_(op)]
                _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
                                      no_grad_dict, grad_to_var, new_callbacks)
Y
Yang Yang 已提交
340
            else:
Y
Yang Yang 已提交
341 342
                _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
                                      no_grad_dict, grad_to_var, callbacks)
Y
Yu Yang 已提交
343 344

            program.rollback()
F
fengjiayi 已提交
345 346
            grad_sub_block_list.append(grad_sub_block.desc)

F
fengjiayi 已提交
347
        # Getting op's corresponding grad_op
F
fengjiayi 已提交
348 349
        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
            op.desc, no_grad_dict[block.idx], grad_sub_block_list)
Y
Yang Yu 已提交
350

F
fengjiayi 已提交
351 352 353 354 355 356 357
        grad_op_descs.extend(grad_op_desc)
        grad_to_var.update(op_grad_to_var)

    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs)

    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
F
fengjiayi 已提交
358

F
fengjiayi 已提交
359
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
360 361
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
362
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
363 364
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
Y
yuyang18 已提交
365
        new_op_desc.set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
366
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
367 368 369 370
        if callbacks is not None:
            assert (isinstance(callbacks, list))
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
371

F
fengjiayi 已提交
372 373

def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
374 375 376 377 378 379 380 381 382 383 384 385
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
386
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
387
    """
F
fengjiayi 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
            sub_block = block.program.block(op_desc.block_attr("sub_block"))
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
            grad_var_name = grad_var_name.encode("ascii")
            if block.desc.has_var_recursive(
                    grad_var_name) or grad_var_name == core.empty_var_name():
                continue
            block.desc.var(grad_var_name)
            new_vars.add(grad_var_name)
            if not grad_to_var.has_key(grad_var_name):
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
Y
Yang Yang 已提交
408 409 410
        # ncclInit dones't need to set data_type
        if op_desc.type() == 'ncclInit':
            continue
F
fengjiayi 已提交
411 412 413
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
                _infer_var_data_type_(arg, block)
F
update  
fengjiayi 已提交
414 415


416 417 418 419 420 421 422 423 424 425
def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
                op_desc.rename_input(name, var_map[name])

        for name in op_desc.output_arg_names():
            if block.desc.find_var(name.encode("ascii")):
Y
Yu Yang 已提交
426
                new_name = unique_name.generate(name)
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
                op_desc.rename_output(name, new_name)
                var_map[name] = new_name

    for g, ng in var_map.iteritems():
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
        for var in block.vars.itervalues():
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


Y
Yang Yang 已提交
450 451
def append_backward(loss, parameter_list=None, no_grad_set=None,
                    callbacks=None):
452
    """
F
fengjiayi 已提交
453 454 455 456 457 458 459 460 461
    Append backward part to main_program.

    A complete neural network training is made up of forward and backward 
    propagation. However, when we configure a network, we only need to 
    specify its forwrd part. The backward part is generated automatically 
    according to the forward part by this function.

    In most cases, users do not need to invoke this function manually. It 
    will be automatically invoked by the optimizer's `minimize` function.
F
fengjiayi 已提交
462 463

    Args:
F
fengjiayi 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
        loss(Variable): The loss variable of the network.
        parameter_list(list[string]|None): Names of parameters that need 
                                           to be updated by optimizers. 
                                           If it is None, all parameters 
                                           will be updated.
                                           Default: None
        no_grad_set(set|None): Variables in the Block 0 whose gradients 
                               should be ignored. All variables with 
                               `step_gradient=True` from all blocks will 
                               be automatically added into this set.
                               Default: None
        callbacks(list[callable object]|None): The callbacks are used for 
                                               doing some custom jobs during 
                                               backward part building. All 
                                               callable objects in it will 
                                               be invoked once each time a 
                                               new gradient operator is added 
                                               into the program. The callable 
                                               object must has two input 
                                               parameters: 'block' and 'context'. 
                                               The 'block' is the block which 
                                               the new gradient operator will 
                                               be added to. The 'context' is a 
                                               map, whose keys are gradient 
                                               variable names and values are 
                                               corresponding original variables.
                                               In addition to this, the 'context' 
                                               has another special key-value pair: 
                                               the key is string '__current_op_desc__' 
                                               and the value is the op_desc of the 
                                               gradient operator who has just 
                                               triggered the callable object. 

    Returns:
        list[(Variable,Variable)]: Pairs of parameter and its 
        corresponding gradients. The key is the parameter and the 
        value is gradient variable.

    Raises:
        AssertionError: If `loss` is not an instance of Variable.

    Examples:
        .. code-block:: python

F
fengjiayi 已提交
508 509 510 511
            # network configuration code
            # ...
            avg_loss = fluid.layers.mean(loss)
            param_grad_list = fluid.backward.append_backward(loss=avg_loss)
512 513
    """
    assert isinstance(loss, framework.Variable)
Y
yuyang18 已提交
514

Y
Fix bug  
yuyang18 已提交
515 516 517 518 519 520 521 522 523 524 525
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
        for op in reversed(loss.block.ops):
            assert isinstance(op, framework.Operator)
            if len(op.output_arg_names) == 1 and op.output_arg_names[
                    0] == loss.name:
                loss.op = op
                break
        if loss.op is None:
            raise ValueError("loss.op is None. Should not happend")

Y
yuyang18 已提交
526 527 528 529
    loss.op.set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(),
                     int(core.op_proto_and_checker_maker.OpRole.Forward) |
                     int(core.op_proto_and_checker_maker.OpRole.Loss))

Y
Yang Yang 已提交
530 531
    if callbacks is not None:
        isinstance(callbacks, list)
Y
Yu Yang 已提交
532

F
fengjiayi 已提交
533
    program = loss.block.program
F
fengjiayi 已提交
534
    if no_grad_set is None:
535 536 537 538
        no_grad_set = set()
    no_grad_set = copy.copy(no_grad_set)
    no_grad_dict = _get_stop_gradients_(program)
    no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set))
Y
Yu Yang 已提交
539

F
update  
fengjiayi 已提交
540
    grad_info_map = dict()
F
fengjiayi 已提交
541
    root_block = program.block(0)
F
fengjiayi 已提交
542

F
fengjiayi 已提交
543 544
    fwd_op_num = root_block.desc.op_size()
    current_block_idx = program.current_block_idx
F
fengjiayi 已提交
545 546
    grad_to_var = dict()

Y
yuyang18 已提交
547 548 549 550 551 552 553 554 555 556
    op_desc = _create_op_desc_(
        "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
            "shape": [1],
            "value": 1.0,
            "dtype": loss.dtype,
            "force_cpu": False,
            core.op_proto_and_checker_maker.kOpRoleAttrName():
            int(core.op_proto_and_checker_maker.OpRole.Backward) |
            int(core.op_proto_and_checker_maker.OpRole.Loss),
        })
557 558 559 560 561 562 563
    root_block.desc.append_op().copy_from(op_desc)

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(root_block, [loss], [], block_no_grad_set)
    no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set))

    _append_backward_ops_(root_block, op_path, root_block, no_grad_dict,
Y
Yang Yang 已提交
564
                          grad_to_var, callbacks)
565 566 567 568 569 570

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(root_block, fwd_op_num, grad_to_var, {})

F
fengjiayi 已提交
571
    _append_backward_vars_(root_block, fwd_op_num, grad_to_var, grad_info_map)
F
fengjiayi 已提交
572

F
fengjiayi 已提交
573
    program.current_block_idx = current_block_idx
W
Wu Yi 已提交
574
    program._sync_with_cpp()
C
chengduoZH 已提交
575 576
    # FIXME(zcd): prevent loss.grad optimized by mem_opt.
    loss.block.var(_append_grad_suffix_(loss.name)).persistable = True
F
fengjiayi 已提交
577

578 579 580
    if parameter_list is not None:
        parameters = parameter_list
    else:
F
fengjiayi 已提交
581
        params = program.global_block().all_parameters()
582
        parameters = [param.name for param in params]
583

584 585
    params_and_grads = []
    for param in parameters:
F
update  
fengjiayi 已提交
586
        if param not in grad_info_map:
F
fengjiayi 已提交
587
            continue
F
update  
fengjiayi 已提交
588
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
589
        grad_block = grad_info[1]
590 591 592 593
        if not grad_block.has_var(grad_info[0]):
            raise ValueError("grad block[{0}] did not have grad var {1}".format(
                grad_info[1], grad_info[0]))
        # Get the param var from the global block
F
fengjiayi 已提交
594
        param_var = program.global_block().var(param)
595 596 597 598 599
        grad_var = grad_block.var(grad_info[0])
        if loss.block.has_var(grad_info[0]):
            params_and_grads.append((param_var, grad_var))
        else:
            params_and_grads.append((param_var, None))
Y
yuyang18 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612

    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
    for p, g in params_and_grads:
        if g is None:
            continue
        for op in reversed(program.global_block().ops):
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
613
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
614 615 616
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
        g.op.set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
617

618
    return params_and_grads
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746


def _as_list(x):
    if x is None:
        return []
    return list(x) if isinstance(x, collections.Sequence) else [x]


def _find_op_path_(block, outputs, inputs, no_grad_set):
    """
    no_grad_set will also be changed
    """
    input_names = set([inp.name for inp in inputs])
    output_names = set([out.name for out in outputs])

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
            if _some_in_set_(op.desc.input_arg_names(), input_names):
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
        if _some_in_set_(op.desc.output_arg_names(), output_names):
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
                if name not in input_names:
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
    Backpropagate the graidents of targets to inputs.

    Args:
        targets(Variable|list[Variable]): The target variables
        inputs(Variable|list[Variable]): The input variables
        no_grad_set(set[string]): The names of variables that have no gradients
            in Block 0. All variables with `stop_gradient=True` from all blocks
            will be automatically added.

    Return:
        (list[Variable]): list of gradients for inputs
        If an input does not affect targets, the corresponding gradient variable
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
            "Should have the same number of target_gradients as targets")

    if no_grad_set is None:
        no_grad_set = set()
    no_grad_set = copy.copy(no_grad_set)
    no_grad_dict = _get_stop_gradients_(prog)
    no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set))

    fwd_op_num = block.desc.op_size()

    target_grad_map = {}
    for i, grad in enumerate(target_gradients):
        target = targets[i]
        if grad is None:
            grad_name = _append_grad_suffix_(target.name)
            op_desc = _create_op_desc_("fill_constant_batch_size_like",
                                       {"Input": [target.name]},
                                       {"Out": [grad_name]}, {
                                           "shape": target.shape,
                                           "value": 1.0,
                                           "dtype": target.dtype,
                                           'input_dim_idx': 0,
                                           'output_dim_idx': 0
                                       })
            block.desc.append_op().copy_from(op_desc)
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
                    "The shapes of target and grad are different: %s %s" % (
                        target.name, grad.name))
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(block, targets, inputs, block_no_grad_set)
    no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set))
    grad_to_var = dict()
    grad_info_map = dict()
    _append_backward_ops_(block, op_path, block, no_grad_dict, grad_to_var)

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map)

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
W
Wu Yi 已提交
747
    prog._sync_with_cpp()
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars