backward.py 28.7 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from paddle.fluid import framework as framework
F
update  
fengjiayi 已提交
16
from . import core
F
update  
fengjiayi 已提交
17
import collections
18
import copy
Y
Yu Yang 已提交
19
import unique_name
20

Y
yuyang18 已提交
21
__all__ = ['append_backward']
22 23


24 25
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
26
    Traverse all ops in op_descs[begin_idx : end_idx],
27 28
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
29 30 31
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
32
        end_idx = len(op_descs)
F
update  
fengjiayi 已提交
33
    for i in range(begin_idx, end_idx):
34
        op_desc = op_descs[i]
F
fengjiayi 已提交
35 36 37 38
        if isinstance(op_desc, tuple):
            op_desc = op_desc[0]
        op_desc.rename_input(old_name, new_name)
        op_desc.rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
39 40


F
fengjiayi 已提交
41
def _create_op_desc_(op_type, inputs, outputs, attrs):
42 43 44
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
45 46 47 48 49 50
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
    for para, args in inputs.iteritems():
        op_desc.set_input(para, args)
    for para, args in outputs.iteritems():
        op_desc.set_output(para, args)
Y
yuyang18 已提交
51 52 53 54 55 56

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()

    if op_role_attr_name not in attrs:
        attrs[
            op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
F
fengjiayi 已提交
57 58 59 60 61 62 63 64
    for name, val in attrs.iteritems():
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
            op_desc.set_attr(name, val)
    return op_desc


65 66 67 68 69 70
def _infer_var_data_type_(grad_var_name, block):
    """
    Infer the data type of given grad variable
    """
    grad_var = block.desc.find_var(grad_var_name.encode("ascii"))
    fwd_name = _strip_grad_suffix_(grad_var_name.encode("ascii"))
F
fengjiayi 已提交
71 72 73 74
    if block.desc.has_var_recursive(fwd_name):
        fwd_var = block.desc.find_var_recursive(fwd_name.encode("ascii"))
        grad_var.set_dtype(fwd_var.dtype())
    else:
75
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
76 77


F
fengjiayi 已提交
78
def _all_in_set_(cands, s):
79 80 81
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
82 83
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
84 85 86 87 88 89
    for c in cands:
        if not c in s:
            return False
    return True


90 91 92 93 94 95 96 97 98 99 100 101
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
    for c in cands:
        if c in s:
            return True
    return False


F
fengjiayi 已提交
102
def _strip_grad_suffix_(name):
103 104 105 106 107
    """
    Strip the grad suffix from the given varibale name
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
F
fengjiayi 已提交
108 109
    pos = name.find(core.grad_var_suffix())
    return name[:pos] if pos != -1 else name
F
fengjiayi 已提交
110 111 112


def _append_grad_suffix_(name):
113 114 115 116
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
F
fengjiayi 已提交
117 118 119
    return name + core.grad_var_suffix()


F
fengjiayi 已提交
120
def _addup_repetitive_outputs_(op_descs):
121 122 123 124 125
    """
    In backward part, an variable may be the output of more than one ops.
    In this case, the variable should be the accumulation of all the outputs.
    `sum_op`s are added to implement the accumulate.
    """
F
update  
fengjiayi 已提交
126 127
    pending_sum_ops = []
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
128 129
    renamed_vars = collections.defaultdict(list)
    for idx, op_desc in enumerate(op_descs):
F
update  
fengjiayi 已提交
130
        for var_name in op_desc.input_arg_names():
F
fengjiayi 已提交
131
            if len(renamed_vars[var_name]) > 1:
132 133 134
                pending_sum_ops.append((_create_op_desc_(
                    "sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]},
                    {"use_mkldnn": False}), idx))
F
fengjiayi 已提交
135
                renamed_vars[var_name] = [var_name]
F
update  
fengjiayi 已提交
136
        for var_name in op_desc.output_arg_names():
F
fengjiayi 已提交
137 138 139
            if var_name == core.empty_var_name(
            ) or var_name in op_desc.input_arg_names():
                # empty variable or inplace op
F
fengjiayi 已提交
140
                continue
F
fengjiayi 已提交
141
            if len(renamed_vars[var_name]) == 0:
F
update  
fengjiayi 已提交
142
                # it's the first time we get the variable
F
fengjiayi 已提交
143
                renamed_vars[var_name] = [var_name]
F
update  
fengjiayi 已提交
144
            else:
F
fengjiayi 已提交
145
                if len(renamed_vars[var_name]) == 1:
F
update  
fengjiayi 已提交
146
                    new_name = var_name + "@RENAME@" + \
F
fengjiayi 已提交
147
                        str(var_rename_count[var_name])
F
fengjiayi 已提交
148
                    var_rename_count[var_name] += 1
F
update  
fengjiayi 已提交
149
                    # rename original var_name
F
fengjiayi 已提交
150 151
                    renamed_vars[var_name][0] = new_name
                    _rename_arg_(op_descs, var_name, new_name, 0, idx)
F
fengjiayi 已提交
152
                    _rename_arg_(pending_sum_ops, var_name, new_name)
F
update  
fengjiayi 已提交
153 154

                new_name = var_name + "@RENAME@" + \
F
fengjiayi 已提交
155
                    str(var_rename_count[var_name])
F
fengjiayi 已提交
156
                var_rename_count[var_name] += 1
F
update  
fengjiayi 已提交
157
                op_desc.rename_output(var_name, new_name)
F
fengjiayi 已提交
158 159
                renamed_vars[var_name].append(new_name)
    for var_name, inputs in renamed_vars.iteritems():
F
update  
fengjiayi 已提交
160
        if len(inputs) > 1:
161 162 163
            pending_sum_ops.append(
                (_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]},
                                  {"use_mkldnn": False}), len(op_descs)))
F
fengjiayi 已提交
164
    # sum_op descs are sorted according to their insert position
F
update  
fengjiayi 已提交
165
    for p in reversed(pending_sum_ops):
F
fengjiayi 已提交
166 167 168 169 170 171
        op_descs.insert(p[1], p[0])

    return op_descs


def _remove_no_grad_branch_(op_descs, no_grad_set):
172 173 174 175
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
176
        2. all grad inputs of the grad op are in 'no_grad_set'
177
    """
F
fengjiayi 已提交
178 179

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
180 181
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
182 183 184 185
            return True
        if _all_in_set_(
                filter(lambda name: name.find(core.grad_var_suffix()) != -1,
                       op_desc.input_arg_names()), no_grad_set):
F
fengjiayi 已提交
186
            no_grad_set.update(out_arg_names)
F
fengjiayi 已提交
187 188 189
            return True
        return False

F
fengjiayi 已提交
190 191
    # Remove ops whose outputs are all in no_grad_dict
    op_descs = filter(
F
fengjiayi 已提交
192
        lambda op_desc: not _op_can_be_removed_(op_desc, no_grad_set), op_descs)
F
fengjiayi 已提交
193 194
    # Insert fill_zeros_like_op
    to_insert = []
F
fengjiayi 已提交
195
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
196
        for arg in op_desc.input_arg_names():
F
fengjiayi 已提交
197 198 199
            if core.grad_var_suffix() in arg and arg in no_grad_set:
                to_insert.append((_create_op_desc_("fill_zeros_like", {
                    "X": [_strip_grad_suffix_(arg)]
200
                }, {"Out": [arg]}, {}), idx))
F
fengjiayi 已提交
201 202 203 204 205 206

    map(lambda p: op_descs.insert(p[1], p[0]), reversed(to_insert))

    return op_descs


Y
Yang Yang 已提交
207 208 209 210 211 212 213 214 215
import proto.framework_pb2 as framework_pb2


def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
    proto = framework_pb2.OpDesc.FromString(str(protostr))
    return proto.__str__()


216 217 218 219 220 221 222 223 224 225
def _callback_lookup_(op):
    """
    Only used in _append_backward_ops_
    Build and returns a callback function for certain op. For example

    parallel_do:           AllReduce

    :param op:
    :return: callback function
    """
Y
Yang Yang 已提交
226
    if op.type == 'parallel_do' and op.attr('use_nccl'):
Q
qiaolongfei 已提交
227
        all_vars = op.block.vars
228
        param_names = set(op.input('parameters'))
Q
qiaolongfei 已提交
229 230
        param_names = filter(lambda name: all_vars[name].stop_gradient is False,
                             param_names)
231 232 233
        param_grad_names = [n + "@GRAD" for n in param_names]

        class ParallelDoCallBack(object):
Y
Yang Yang 已提交
234
            def __init__(self, param_grad_names, parallel_scopes_name):
235 236
                self.has_inserted_nccl_init = False
                self.param_grad_names = param_grad_names
Y
Yang Yang 已提交
237
                self.parallel_scopes_name = parallel_scopes_name
238 239

            def __call__(self, block, context):
Y
Yang Yang 已提交
240
                if not self.has_inserted_nccl_init:
Y
Yang Yang 已提交
241
                    op_desc = _create_op_desc_(
Y
Yang Yang 已提交
242 243
                        "ncclInit",
                        {"parallel_scopes": self.parallel_scopes_name},
Y
Yang Yang 已提交
244 245 246
                        {"Communicator": ['nccl_com__do_not_change_']}, {})
                    block.program.global_block().desc.append_op().copy_from(
                        op_desc)
Y
Yang Yang 已提交
247 248 249 250 251
                    self.has_inserted_nccl_init = True

                current_op_desc = context["__current_op_desc__"]
                for o_param in current_op_desc.output_names():
                    for o_argu in current_op_desc.output(o_param):
252
                        if o_argu in self.param_grad_names:
Y
Yang Yang 已提交
253 254
                            allreduce_out_name = o_argu + "__nccl_all_reduce__"
                            op_desc = _create_op_desc_(
C
chengduoZH 已提交
255 256
                                "ncclReduce",
                                {
Y
Yang Yang 已提交
257
                                    "X": [o_argu],
Y
Yang Yang 已提交
258 259
                                    "Communicator":
                                    ['nccl_com__do_not_change_']
C
chengduoZH 已提交
260 261 262 263
                                },
                                {"Out": [allreduce_out_name]},
                                {"reduction": "ncclSum",
                                 "root": 0}, )
Y
Yang Yang 已提交
264 265 266 267 268 269
                            block.desc.append_op().copy_from(op_desc)

                            op_desc = _create_op_desc_(
                                "assign", {"X": [allreduce_out_name]},
                                {"Out": [o_argu]}, {})
                            block.desc.append_op().copy_from(op_desc)
270

Y
Yang Yang 已提交
271 272
        return ParallelDoCallBack(param_grad_names,
                                  op.output("parallel_scopes"))
273 274 275 276
    else:
        return None


277 278
def _append_backward_ops_(block,
                          ops,
F
fengjiayi 已提交
279 280 281
                          target_block,
                          no_grad_dict,
                          grad_to_var,
Y
Yang Yang 已提交
282
                          callbacks=None):
283 284 285 286 287
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
288
        ops(Op): the forward operators whose backward ops need to be added
289
        target_block(Block): the block which is going to hold new generated grad ops
290
        no_grad_dict(dict):
291 292 293 294 295
            key(int)  block index
            val(set) a set of varibale names. These varibales have no gradient
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
F
fengjiayi 已提交
296
        callback(callable object): a callable object used to decorate new generated grad ops
297
    """
Y
Yang Yang 已提交
298
    if callbacks is not None:
Y
Yang Yang 已提交
299 300 301 302
        assert (isinstance(callbacks, list))
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
303

F
fengjiayi 已提交
304
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
305 306
    grad_op_descs = []
    program = block.program
307
    for op in reversed(ops):
F
fengjiayi 已提交
308 309 310 311
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
            sub_block = program.block(op.block_attr("sub_block"))
Y
Yu Yang 已提交
312 313
            grad_sub_block = program.create_block()
            grad_sub_block.set_forward_block_idx(sub_block.idx)
Y
Yang Yang 已提交
314 315 316 317 318 319 320 321
            cb = _callback_lookup_(op)
            if cb is not None:
                if callbacks is None:
                    new_callbacks = [cb]
                else:
                    new_callbacks = callbacks + [_callback_lookup_(op)]
                _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
                                      no_grad_dict, grad_to_var, new_callbacks)
Y
Yang Yang 已提交
322
            else:
Y
Yang Yang 已提交
323 324
                _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
                                      no_grad_dict, grad_to_var, callbacks)
Y
Yu Yang 已提交
325 326

            program.rollback()
F
fengjiayi 已提交
327 328
            grad_sub_block_list.append(grad_sub_block.desc)

F
fengjiayi 已提交
329
        # Getting op's corresponding grad_op
F
fengjiayi 已提交
330 331
        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
            op.desc, no_grad_dict[block.idx], grad_sub_block_list)
Y
Yang Yu 已提交
332

F
fengjiayi 已提交
333 334 335 336 337 338 339
        grad_op_descs.extend(grad_op_desc)
        grad_to_var.update(op_grad_to_var)

    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs)

    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
F
fengjiayi 已提交
340

F
fengjiayi 已提交
341
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
342 343
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
344
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
345 346
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
Y
yuyang18 已提交
347
        new_op_desc.set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
348
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
349 350 351 352
        if callbacks is not None:
            assert (isinstance(callbacks, list))
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
353

F
fengjiayi 已提交
354 355

def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
356 357 358 359 360 361 362 363 364 365 366 367
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
368
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
369
    """
F
fengjiayi 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
            sub_block = block.program.block(op_desc.block_attr("sub_block"))
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
            grad_var_name = grad_var_name.encode("ascii")
            if block.desc.has_var_recursive(
                    grad_var_name) or grad_var_name == core.empty_var_name():
                continue
            block.desc.var(grad_var_name)
            new_vars.add(grad_var_name)
            if not grad_to_var.has_key(grad_var_name):
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
Y
Yang Yang 已提交
390 391 392
        # ncclInit dones't need to set data_type
        if op_desc.type() == 'ncclInit':
            continue
F
fengjiayi 已提交
393 394 395
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
                _infer_var_data_type_(arg, block)
F
update  
fengjiayi 已提交
396 397


398 399 400 401 402 403 404 405 406 407
def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
                op_desc.rename_input(name, var_map[name])

        for name in op_desc.output_arg_names():
            if block.desc.find_var(name.encode("ascii")):
Y
Yu Yang 已提交
408
                new_name = unique_name.generate(name)
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
                op_desc.rename_output(name, new_name)
                var_map[name] = new_name

    for g, ng in var_map.iteritems():
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
        for var in block.vars.itervalues():
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


Y
Yang Yang 已提交
432 433
def append_backward(loss, parameter_list=None, no_grad_set=None,
                    callbacks=None):
434
    """
F
fengjiayi 已提交
435 436 437 438 439 440 441 442 443
    Append backward part to main_program.

    A complete neural network training is made up of forward and backward 
    propagation. However, when we configure a network, we only need to 
    specify its forwrd part. The backward part is generated automatically 
    according to the forward part by this function.

    In most cases, users do not need to invoke this function manually. It 
    will be automatically invoked by the optimizer's `minimize` function.
F
fengjiayi 已提交
444 445

    Args:
F
fengjiayi 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
        loss(Variable): The loss variable of the network.
        parameter_list(list[string]|None): Names of parameters that need 
                                           to be updated by optimizers. 
                                           If it is None, all parameters 
                                           will be updated.
                                           Default: None
        no_grad_set(set|None): Variables in the Block 0 whose gradients 
                               should be ignored. All variables with 
                               `step_gradient=True` from all blocks will 
                               be automatically added into this set.
                               Default: None
        callbacks(list[callable object]|None): The callbacks are used for 
                                               doing some custom jobs during 
                                               backward part building. All 
                                               callable objects in it will 
                                               be invoked once each time a 
                                               new gradient operator is added 
                                               into the program. The callable 
                                               object must has two input 
                                               parameters: 'block' and 'context'. 
                                               The 'block' is the block which 
                                               the new gradient operator will 
                                               be added to. The 'context' is a 
                                               map, whose keys are gradient 
                                               variable names and values are 
                                               corresponding original variables.
                                               In addition to this, the 'context' 
                                               has another special key-value pair: 
                                               the key is string '__current_op_desc__' 
                                               and the value is the op_desc of the 
                                               gradient operator who has just 
                                               triggered the callable object. 

    Returns:
        list[(Variable,Variable)]: Pairs of parameter and its 
        corresponding gradients. The key is the parameter and the 
        value is gradient variable.

    Raises:
        AssertionError: If `loss` is not an instance of Variable.

    Examples:
        .. code-block:: python

F
fengjiayi 已提交
490 491 492 493
            # network configuration code
            # ...
            avg_loss = fluid.layers.mean(loss)
            param_grad_list = fluid.backward.append_backward(loss=avg_loss)
494 495
    """
    assert isinstance(loss, framework.Variable)
Y
yuyang18 已提交
496

Y
Fix bug  
yuyang18 已提交
497 498 499 500 501 502 503 504 505 506 507
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
        for op in reversed(loss.block.ops):
            assert isinstance(op, framework.Operator)
            if len(op.output_arg_names) == 1 and op.output_arg_names[
                    0] == loss.name:
                loss.op = op
                break
        if loss.op is None:
            raise ValueError("loss.op is None. Should not happend")

Y
yuyang18 已提交
508 509 510 511
    loss.op.set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(),
                     int(core.op_proto_and_checker_maker.OpRole.Forward) |
                     int(core.op_proto_and_checker_maker.OpRole.Loss))

Y
Yang Yang 已提交
512 513
    if callbacks is not None:
        isinstance(callbacks, list)
Y
Yu Yang 已提交
514

F
fengjiayi 已提交
515
    program = loss.block.program
F
fengjiayi 已提交
516
    if no_grad_set is None:
517 518 519 520
        no_grad_set = set()
    no_grad_set = copy.copy(no_grad_set)
    no_grad_dict = _get_stop_gradients_(program)
    no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set))
Y
Yu Yang 已提交
521

F
update  
fengjiayi 已提交
522
    grad_info_map = dict()
F
fengjiayi 已提交
523
    root_block = program.block(0)
F
fengjiayi 已提交
524

F
fengjiayi 已提交
525 526
    fwd_op_num = root_block.desc.op_size()
    current_block_idx = program.current_block_idx
F
fengjiayi 已提交
527 528
    grad_to_var = dict()

Y
yuyang18 已提交
529 530 531 532 533 534 535 536 537 538
    op_desc = _create_op_desc_(
        "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
            "shape": [1],
            "value": 1.0,
            "dtype": loss.dtype,
            "force_cpu": False,
            core.op_proto_and_checker_maker.kOpRoleAttrName():
            int(core.op_proto_and_checker_maker.OpRole.Backward) |
            int(core.op_proto_and_checker_maker.OpRole.Loss),
        })
539 540 541 542 543 544 545
    root_block.desc.append_op().copy_from(op_desc)

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(root_block, [loss], [], block_no_grad_set)
    no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set))

    _append_backward_ops_(root_block, op_path, root_block, no_grad_dict,
Y
Yang Yang 已提交
546
                          grad_to_var, callbacks)
547 548 549 550 551 552

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(root_block, fwd_op_num, grad_to_var, {})

F
fengjiayi 已提交
553
    _append_backward_vars_(root_block, fwd_op_num, grad_to_var, grad_info_map)
F
fengjiayi 已提交
554

F
fengjiayi 已提交
555 556
    program.current_block_idx = current_block_idx
    program.sync_with_cpp()
C
chengduoZH 已提交
557 558
    # FIXME(zcd): prevent loss.grad optimized by mem_opt.
    loss.block.var(_append_grad_suffix_(loss.name)).persistable = True
F
fengjiayi 已提交
559

560 561 562
    if parameter_list is not None:
        parameters = parameter_list
    else:
F
fengjiayi 已提交
563
        params = program.global_block().all_parameters()
564
        parameters = [param.name for param in params]
565

566 567
    params_and_grads = []
    for param in parameters:
F
update  
fengjiayi 已提交
568
        if param not in grad_info_map:
F
fengjiayi 已提交
569
            continue
F
update  
fengjiayi 已提交
570
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
571
        grad_block = grad_info[1]
572 573 574 575
        if not grad_block.has_var(grad_info[0]):
            raise ValueError("grad block[{0}] did not have grad var {1}".format(
                grad_info[1], grad_info[0]))
        # Get the param var from the global block
F
fengjiayi 已提交
576
        param_var = program.global_block().var(param)
577 578 579 580 581
        grad_var = grad_block.var(grad_info[0])
        if loss.block.has_var(grad_info[0]):
            params_and_grads.append((param_var, grad_var))
        else:
            params_and_grads.append((param_var, None))
Y
yuyang18 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594

    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
    for p, g in params_and_grads:
        if g is None:
            continue
        for op in reversed(program.global_block().ops):
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
595
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
596 597 598
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
        g.op.set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
599

600
    return params_and_grads
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744


def _as_list(x):
    if x is None:
        return []
    return list(x) if isinstance(x, collections.Sequence) else [x]


def _find_op_path_(block, outputs, inputs, no_grad_set):
    """
    no_grad_set will also be changed
    """
    input_names = set([inp.name for inp in inputs])
    output_names = set([out.name for out in outputs])

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
            if _some_in_set_(op.desc.input_arg_names(), input_names):
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
        if _some_in_set_(op.desc.output_arg_names(), output_names):
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
                if name not in input_names:
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
    Backpropagate the graidents of targets to inputs.

    Args:
        targets(Variable|list[Variable]): The target variables
        inputs(Variable|list[Variable]): The input variables
        no_grad_set(set[string]): The names of variables that have no gradients
            in Block 0. All variables with `stop_gradient=True` from all blocks
            will be automatically added.

    Return:
        (list[Variable]): list of gradients for inputs
        If an input does not affect targets, the corresponding gradient variable
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
            "Should have the same number of target_gradients as targets")

    if no_grad_set is None:
        no_grad_set = set()
    no_grad_set = copy.copy(no_grad_set)
    no_grad_dict = _get_stop_gradients_(prog)
    no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set))

    fwd_op_num = block.desc.op_size()

    target_grad_map = {}
    for i, grad in enumerate(target_gradients):
        target = targets[i]
        if grad is None:
            grad_name = _append_grad_suffix_(target.name)
            op_desc = _create_op_desc_("fill_constant_batch_size_like",
                                       {"Input": [target.name]},
                                       {"Out": [grad_name]}, {
                                           "shape": target.shape,
                                           "value": 1.0,
                                           "dtype": target.dtype,
                                           'input_dim_idx': 0,
                                           'output_dim_idx': 0
                                       })
            block.desc.append_op().copy_from(op_desc)
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
                    "The shapes of target and grad are different: %s %s" % (
                        target.name, grad.name))
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(block, targets, inputs, block_no_grad_set)
    no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set))
    grad_to_var = dict()
    grad_info_map = dict()
    _append_backward_ops_(block, op_path, block, no_grad_dict, grad_to_var)

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map)

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
    prog.sync_with_cpp()

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars