backward.py 26.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from paddle.fluid import framework as framework
F
update  
fengjiayi 已提交
16
from . import core
F
update  
fengjiayi 已提交
17
import collections
18
import copy
Y
Yu Yang 已提交
19
import unique_name
20

21 22 23 24
__all__ = [
    'append_backward',
    'calc_gradient',
]
25 26


27 28
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):
    """
29
    Traverse all ops in op_descs[begin_idx : end_idx],
30 31
    if any op has inputs/outputs named "old_name", rename it as 'new_name'
    """
F
update  
fengjiayi 已提交
32 33 34
    if begin_idx is None:
        begin_idx = 0
    if end_idx is None:
35
        end_idx = len(op_descs)
F
update  
fengjiayi 已提交
36
    for i in range(begin_idx, end_idx):
37
        op_desc = op_descs[i]
F
fengjiayi 已提交
38 39 40 41
        if isinstance(op_desc, tuple):
            op_desc = op_desc[0]
        op_desc.rename_input(old_name, new_name)
        op_desc.rename_output(old_name, new_name)
F
update  
fengjiayi 已提交
42 43


F
fengjiayi 已提交
44
def _create_op_desc_(op_type, inputs, outputs, attrs):
45 46 47
    """
    Create a C++ OpDesc object with specified inputs, outputs and attributes.
    """
F
fengjiayi 已提交
48 49 50 51 52 53
    op_desc = core.OpDesc()
    op_desc.set_type(op_type)
    for para, args in inputs.iteritems():
        op_desc.set_input(para, args)
    for para, args in outputs.iteritems():
        op_desc.set_output(para, args)
Y
yuyang18 已提交
54 55 56 57 58 59

    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()

    if op_role_attr_name not in attrs:
        attrs[
            op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward
F
fengjiayi 已提交
60 61 62 63 64 65 66 67
    for name, val in attrs.iteritems():
        if isinstance(val, framework.Block):
            op_desc.set_block_attr(name, val.desc)
        else:
            op_desc.set_attr(name, val)
    return op_desc


68 69 70 71 72 73
def _infer_var_data_type_(grad_var_name, block):
    """
    Infer the data type of given grad variable
    """
    grad_var = block.desc.find_var(grad_var_name.encode("ascii"))
    fwd_name = _strip_grad_suffix_(grad_var_name.encode("ascii"))
F
fengjiayi 已提交
74 75 76 77
    if block.desc.has_var_recursive(fwd_name):
        fwd_var = block.desc.find_var_recursive(fwd_name.encode("ascii"))
        grad_var.set_dtype(fwd_var.dtype())
    else:
78
        grad_var.set_dtype(core.VarDesc.VarType.FP32)
F
fengjiayi 已提交
79 80


F
fengjiayi 已提交
81
def _all_in_set_(cands, s):
82 83 84
    """
    Test if all elements of 'cands' are in set 's'
    """
F
fengjiayi 已提交
85 86
    if len(cands) == 0:
        return False
F
fengjiayi 已提交
87 88 89 90 91 92
    for c in cands:
        if not c in s:
            return False
    return True


93 94 95 96 97 98 99 100 101 102 103 104
def _some_in_set_(cands, s):
    """
    Test if some elements of 'cands' are in set 's'
    """
    if len(cands) == 0:
        return False
    for c in cands:
        if c in s:
            return True
    return False


F
fengjiayi 已提交
105
def _strip_grad_suffix_(name):
106 107 108 109 110
    """
    Strip the grad suffix from the given varibale name
    e.g. x@GRAD ==> x
         y@GRAD@RENAME@1 ==> y
    """
F
fengjiayi 已提交
111 112
    pos = name.find(core.grad_var_suffix())
    return name[:pos] if pos != -1 else name
F
fengjiayi 已提交
113 114 115


def _append_grad_suffix_(name):
116 117 118 119
    """
    Append grad suffix to the given variable name
    e.g. x ==> x@GRAD
    """
F
fengjiayi 已提交
120 121 122
    return name + core.grad_var_suffix()


F
fengjiayi 已提交
123
def _addup_repetitive_outputs_(op_descs):
124 125 126 127 128
    """
    In backward part, an variable may be the output of more than one ops.
    In this case, the variable should be the accumulation of all the outputs.
    `sum_op`s are added to implement the accumulate.
    """
F
update  
fengjiayi 已提交
129 130
    pending_sum_ops = []
    var_rename_count = collections.defaultdict(int)
F
fengjiayi 已提交
131 132
    renamed_vars = collections.defaultdict(list)
    for idx, op_desc in enumerate(op_descs):
F
update  
fengjiayi 已提交
133
        for var_name in op_desc.input_arg_names():
F
fengjiayi 已提交
134
            if len(renamed_vars[var_name]) > 1:
135 136 137
                pending_sum_ops.append((_create_op_desc_(
                    "sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]},
                    {"use_mkldnn": False}), idx))
F
fengjiayi 已提交
138
                renamed_vars[var_name] = [var_name]
F
update  
fengjiayi 已提交
139
        for var_name in op_desc.output_arg_names():
F
fengjiayi 已提交
140 141 142
            if var_name == core.empty_var_name(
            ) or var_name in op_desc.input_arg_names():
                # empty variable or inplace op
F
fengjiayi 已提交
143
                continue
F
fengjiayi 已提交
144
            if len(renamed_vars[var_name]) == 0:
F
update  
fengjiayi 已提交
145
                # it's the first time we get the variable
F
fengjiayi 已提交
146
                renamed_vars[var_name] = [var_name]
F
update  
fengjiayi 已提交
147
            else:
F
fengjiayi 已提交
148
                if len(renamed_vars[var_name]) == 1:
F
update  
fengjiayi 已提交
149
                    new_name = var_name + "@RENAME@" + \
Y
yuyang18 已提交
150
                               str(var_rename_count[var_name])
F
fengjiayi 已提交
151
                    var_rename_count[var_name] += 1
F
update  
fengjiayi 已提交
152
                    # rename original var_name
F
fengjiayi 已提交
153 154
                    renamed_vars[var_name][0] = new_name
                    _rename_arg_(op_descs, var_name, new_name, 0, idx)
F
fengjiayi 已提交
155
                    _rename_arg_(pending_sum_ops, var_name, new_name)
F
update  
fengjiayi 已提交
156 157

                new_name = var_name + "@RENAME@" + \
Y
yuyang18 已提交
158
                           str(var_rename_count[var_name])
F
fengjiayi 已提交
159
                var_rename_count[var_name] += 1
F
update  
fengjiayi 已提交
160
                op_desc.rename_output(var_name, new_name)
F
fengjiayi 已提交
161 162
                renamed_vars[var_name].append(new_name)
    for var_name, inputs in renamed_vars.iteritems():
F
update  
fengjiayi 已提交
163
        if len(inputs) > 1:
164 165 166
            pending_sum_ops.append(
                (_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]},
                                  {"use_mkldnn": False}), len(op_descs)))
F
fengjiayi 已提交
167
    # sum_op descs are sorted according to their insert position
F
update  
fengjiayi 已提交
168
    for p in reversed(pending_sum_ops):
F
fengjiayi 已提交
169 170 171 172 173 174
        op_descs.insert(p[1], p[0])

    return op_descs


def _remove_no_grad_branch_(op_descs, no_grad_set):
175 176 177 178
    """
    Remove unnecessary grad ops
    A grad op can be removed in two cases:
        1. all outputs of the grad op are in 'no_grad_set'
F
fengjiayi 已提交
179
        2. all grad inputs of the grad op are in 'no_grad_set'
180
    """
F
fengjiayi 已提交
181 182

    def _op_can_be_removed_(op_desc, no_grad_set):
F
fengjiayi 已提交
183 184
        out_arg_names = op_desc.output_arg_names()
        if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set):
F
fengjiayi 已提交
185 186 187 188
            return True
        if _all_in_set_(
                filter(lambda name: name.find(core.grad_var_suffix()) != -1,
                       op_desc.input_arg_names()), no_grad_set):
F
fengjiayi 已提交
189
            no_grad_set.update(out_arg_names)
F
fengjiayi 已提交
190 191 192
            return True
        return False

F
fengjiayi 已提交
193 194
    # Remove ops whose outputs are all in no_grad_dict
    op_descs = filter(
F
fengjiayi 已提交
195
        lambda op_desc: not _op_can_be_removed_(op_desc, no_grad_set), op_descs)
F
fengjiayi 已提交
196 197
    # Insert fill_zeros_like_op
    to_insert = []
F
fengjiayi 已提交
198
    for idx, op_desc in enumerate(op_descs):
F
fengjiayi 已提交
199
        for arg in op_desc.input_arg_names():
F
fengjiayi 已提交
200 201 202
            if core.grad_var_suffix() in arg and arg in no_grad_set:
                to_insert.append((_create_op_desc_("fill_zeros_like", {
                    "X": [_strip_grad_suffix_(arg)]
203
                }, {"Out": [arg]}, {}), idx))
F
fengjiayi 已提交
204 205 206 207 208 209

    map(lambda p: op_descs.insert(p[1], p[0]), reversed(to_insert))

    return op_descs


Y
Yang Yang 已提交
210 211 212 213 214 215 216 217 218
import proto.framework_pb2 as framework_pb2


def serialize_op_decs(op_desc):
    protostr = op_desc.serialize_to_string()
    proto = framework_pb2.OpDesc.FromString(str(protostr))
    return proto.__str__()


219 220 221 222 223 224 225 226 227 228
def _callback_lookup_(op):
    """
    Only used in _append_backward_ops_
    Build and returns a callback function for certain op. For example

    parallel_do:           AllReduce

    :param op:
    :return: callback function
    """
Y
Yang Yang 已提交
229
    if op.type == 'parallel_do' and op.attr('use_nccl'):
Q
qiaolongfei 已提交
230
        all_vars = op.block.vars
231
        param_names = set(op.input('parameters'))
Q
qiaolongfei 已提交
232 233
        param_names = filter(lambda name: all_vars[name].stop_gradient is False,
                             param_names)
234 235 236
        param_grad_names = [n + "@GRAD" for n in param_names]

        class ParallelDoCallBack(object):
Y
Yang Yang 已提交
237
            def __init__(self, param_grad_names, parallel_scopes_name):
238 239
                self.has_inserted_nccl_init = False
                self.param_grad_names = param_grad_names
Y
Yang Yang 已提交
240
                self.parallel_scopes_name = parallel_scopes_name
241 242

            def __call__(self, block, context):
Y
Yang Yang 已提交
243
                if not self.has_inserted_nccl_init:
Y
Yang Yang 已提交
244
                    op_desc = _create_op_desc_(
Y
Yang Yang 已提交
245 246
                        "ncclInit",
                        {"parallel_scopes": self.parallel_scopes_name},
Y
Yang Yang 已提交
247 248 249
                        {"Communicator": ['nccl_com__do_not_change_']}, {})
                    block.program.global_block().desc.append_op().copy_from(
                        op_desc)
Y
Yang Yang 已提交
250 251 252 253 254
                    self.has_inserted_nccl_init = True

                current_op_desc = context["__current_op_desc__"]
                for o_param in current_op_desc.output_names():
                    for o_argu in current_op_desc.output(o_param):
255
                        if o_argu in self.param_grad_names:
Y
Yang Yang 已提交
256 257
                            allreduce_out_name = o_argu + "__nccl_all_reduce__"
                            op_desc = _create_op_desc_(
C
chengduoZH 已提交
258 259
                                "ncclReduce",
                                {
Y
Yang Yang 已提交
260
                                    "X": [o_argu],
Y
Yang Yang 已提交
261 262
                                    "Communicator":
                                    ['nccl_com__do_not_change_']
C
chengduoZH 已提交
263 264 265 266
                                },
                                {"Out": [allreduce_out_name]},
                                {"reduction": "ncclSum",
                                 "root": 0}, )
Y
Yang Yang 已提交
267 268 269 270 271 272
                            block.desc.append_op().copy_from(op_desc)

                            op_desc = _create_op_desc_(
                                "assign", {"X": [allreduce_out_name]},
                                {"Out": [o_argu]}, {})
                            block.desc.append_op().copy_from(op_desc)
273

Y
Yang Yang 已提交
274 275
        return ParallelDoCallBack(param_grad_names,
                                  op.output("parallel_scopes"))
276 277 278 279
    else:
        return None


280 281
def _append_backward_ops_(block,
                          ops,
F
fengjiayi 已提交
282 283 284
                          target_block,
                          no_grad_dict,
                          grad_to_var,
Y
Yang Yang 已提交
285
                          callbacks=None):
286 287 288 289 290
    """
    Create all grad ops, and insert them into given block

    Args:
        block(Block): the block where forward ops are
291
        ops(Op): the forward operators whose backward ops need to be added
292
        target_block(Block): the block which is going to hold new generated grad ops
293
        no_grad_dict(dict):
294 295 296 297 298
            key(int)  block index
            val(set) a set of varibale names. These varibales have no gradient
        grad_to_var(dict)(output argument):
            key(str): grad variable name
            val(str): corresponding forward variable name
F
fengjiayi 已提交
299
        callback(callable object): a callable object used to decorate new generated grad ops
300
    """
Y
Yang Yang 已提交
301
    if callbacks is not None:
Y
Yang Yang 已提交
302 303 304 305
        assert (isinstance(callbacks, list))
        for cb in callbacks:
            if not hasattr(cb, '__call__'):
                raise ValueError("'callback' must be a callable object.")
F
fengjiayi 已提交
306

F
fengjiayi 已提交
307
    # grad_op_descs holds created grad_op, and will be appended to target_block
F
fengjiayi 已提交
308 309
    grad_op_descs = []
    program = block.program
310
    for op in reversed(ops):
F
fengjiayi 已提交
311 312 313 314
        grad_sub_block_list = []
        # If the op has its own sub-block, deal with the sub-block first
        if op.has_attr("sub_block"):
            sub_block = program.block(op.block_attr("sub_block"))
Y
Yu Yang 已提交
315 316
            grad_sub_block = program.create_block()
            grad_sub_block.set_forward_block_idx(sub_block.idx)
Y
Yang Yang 已提交
317 318 319 320 321 322 323 324
            cb = _callback_lookup_(op)
            if cb is not None:
                if callbacks is None:
                    new_callbacks = [cb]
                else:
                    new_callbacks = callbacks + [_callback_lookup_(op)]
                _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
                                      no_grad_dict, grad_to_var, new_callbacks)
Y
Yang Yang 已提交
325
            else:
Y
Yang Yang 已提交
326 327
                _append_backward_ops_(sub_block, sub_block.ops, grad_sub_block,
                                      no_grad_dict, grad_to_var, callbacks)
Y
Yu Yang 已提交
328 329

            program.rollback()
F
fengjiayi 已提交
330 331
            grad_sub_block_list.append(grad_sub_block.desc)

F
fengjiayi 已提交
332
        # Getting op's corresponding grad_op
F
fengjiayi 已提交
333 334
        grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
            op.desc, no_grad_dict[block.idx], grad_sub_block_list)
Y
Yang Yu 已提交
335

F
fengjiayi 已提交
336 337 338 339 340 341 342
        grad_op_descs.extend(grad_op_desc)
        grad_to_var.update(op_grad_to_var)

    grad_op_descs = _addup_repetitive_outputs_(grad_op_descs)

    grad_op_descs = _remove_no_grad_branch_(grad_op_descs,
                                            no_grad_dict[block.idx])
F
fengjiayi 已提交
343

F
fengjiayi 已提交
344
    # append op_desc in grad_op_descs to target_block
Y
yuyang18 已提交
345 346
    op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
    backward = core.op_proto_and_checker_maker.OpRole.Backward
F
update  
fengjiayi 已提交
347
    for op_desc in grad_op_descs:
F
fengjiayi 已提交
348 349
        new_op_desc = target_block.desc.append_op()
        new_op_desc.copy_from(op_desc)
Y
yuyang18 已提交
350
        new_op_desc.set_attr(op_role_attr_name, backward)
Y
Yang Yang 已提交
351
        grad_to_var["__current_op_desc__"] = new_op_desc
Y
Yang Yang 已提交
352 353 354 355
        if callbacks is not None:
            assert (isinstance(callbacks, list))
            for cb in callbacks:
                cb(block=target_block, context=grad_to_var)
F
update  
fengjiayi 已提交
356

F
fengjiayi 已提交
357 358

def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map):
359 360 361 362 363 364 365 366 367 368 369 370
    """
    Create new variables required by backward pass.

    Args:
        block(Block): the block where new variables will be created
        start_op_idx(int): Only variables required by ops in block.ops[start_op_idx : ] will be created
        grad_to_var(dict):
            key(str): grad variable name
            val(str): corresponding forward variable name
            In most cases, this dict is generated by _append_backward_ops_()
        grad_info_map(dict)(output argument):
            key(str): forward variable name
371
            val(tuple): a tuple of (str, Block), str is the corresponding grad name, Block is the block containing grad variable
372
    """
F
fengjiayi 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        if op_desc.has_attr("sub_block"):
            sub_block = block.program.block(op_desc.block_attr("sub_block"))
            _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map)
        new_vars = set()
        # create new gradient variables
        for grad_var_name in op_desc.output_arg_names():
            grad_var_name = grad_var_name.encode("ascii")
            if block.desc.has_var_recursive(
                    grad_var_name) or grad_var_name == core.empty_var_name():
                continue
            block.desc.var(grad_var_name)
            new_vars.add(grad_var_name)
            if not grad_to_var.has_key(grad_var_name):
                continue
            grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block)
        # infer_shape and infer_type
        op_desc.infer_var_type(block.desc)
        op_desc.infer_shape(block.desc)
Y
Yang Yang 已提交
393 394 395
        # ncclInit dones't need to set data_type
        if op_desc.type() == 'ncclInit':
            continue
F
fengjiayi 已提交
396 397 398
        for arg in op_desc.output_arg_names():
            if arg in new_vars:
                _infer_var_data_type_(arg, block)
F
update  
fengjiayi 已提交
399 400


401 402 403 404 405 406 407 408 409 410
def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
    var_map = copy.copy(target_grad_map)
    for op_idx in range(start_op_idx, block.desc.op_size()):
        op_desc = block.desc.op(op_idx)
        for name in op_desc.input_arg_names():
            if name in var_map:
                op_desc.rename_input(name, var_map[name])

        for name in op_desc.output_arg_names():
            if block.desc.find_var(name.encode("ascii")):
Y
Yu Yang 已提交
411
                new_name = unique_name.generate(name)
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
                op_desc.rename_output(name, new_name)
                var_map[name] = new_name

    for g, ng in var_map.iteritems():
        if g in grad_to_var:
            grad_to_var[ng] = grad_to_var[g]
            grad_to_var.pop(g)


def _get_stop_gradients_(program):
    no_grad_dict = dict()
    assert isinstance(program, framework.Program)
    for block in program.blocks:
        assert isinstance(block, framework.Block)
        block_no_grad_set = set()
        for var in block.vars.itervalues():
            assert isinstance(var, framework.Variable)
            if var.stop_gradient:
                block_no_grad_set.add(_append_grad_suffix_(var.name))
        no_grad_dict[block.idx] = block_no_grad_set
    return no_grad_dict


Y
Yang Yang 已提交
435 436
def append_backward(loss, parameter_list=None, no_grad_set=None,
                    callbacks=None):
437
    """
F
fengjiayi 已提交
438 439 440 441
    Append backward part to main_program

    Args:
        loss(Variable): The variable generated by cost function.
442 443
        parameter_list(list[string]): Parameters that need to be updated by
            optimizer. If None, it means all parameters need to be updated.
444
        no_grad_set(set): Variables that have no gradients in Block 0.
445 446
            All variables with `step_gradient=True` from all blocks will be
            automatically added.
F
fengjiayi 已提交
447 448

    Return:
449
        (list[(Variable,Variable)]): list of (parameter, gradient) pair.
450 451
    """
    assert isinstance(loss, framework.Variable)
Y
yuyang18 已提交
452

Y
Fix bug  
yuyang18 已提交
453 454 455 456 457 458 459 460 461 462 463
    if loss.op is None:
        # the loss is from a cloned program. Find loss op manually.
        for op in reversed(loss.block.ops):
            assert isinstance(op, framework.Operator)
            if len(op.output_arg_names) == 1 and op.output_arg_names[
                    0] == loss.name:
                loss.op = op
                break
        if loss.op is None:
            raise ValueError("loss.op is None. Should not happend")

Y
yuyang18 已提交
464 465 466 467
    loss.op.set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(),
                     int(core.op_proto_and_checker_maker.OpRole.Forward) |
                     int(core.op_proto_and_checker_maker.OpRole.Loss))

Y
Yang Yang 已提交
468 469
    if callbacks is not None:
        isinstance(callbacks, list)
Y
Yu Yang 已提交
470

F
fengjiayi 已提交
471
    program = loss.block.program
F
fengjiayi 已提交
472
    if no_grad_set is None:
473 474 475 476
        no_grad_set = set()
    no_grad_set = copy.copy(no_grad_set)
    no_grad_dict = _get_stop_gradients_(program)
    no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set))
Y
Yu Yang 已提交
477

F
update  
fengjiayi 已提交
478
    grad_info_map = dict()
F
fengjiayi 已提交
479
    root_block = program.block(0)
F
fengjiayi 已提交
480

F
fengjiayi 已提交
481 482
    fwd_op_num = root_block.desc.op_size()
    current_block_idx = program.current_block_idx
F
fengjiayi 已提交
483 484
    grad_to_var = dict()

Y
yuyang18 已提交
485 486 487 488 489 490 491 492 493 494
    op_desc = _create_op_desc_(
        "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, {
            "shape": [1],
            "value": 1.0,
            "dtype": loss.dtype,
            "force_cpu": False,
            core.op_proto_and_checker_maker.kOpRoleAttrName():
            int(core.op_proto_and_checker_maker.OpRole.Backward) |
            int(core.op_proto_and_checker_maker.OpRole.Loss),
        })
495 496 497 498 499 500 501
    root_block.desc.append_op().copy_from(op_desc)

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(root_block, [loss], [], block_no_grad_set)
    no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set))

    _append_backward_ops_(root_block, op_path, root_block, no_grad_dict,
Y
Yang Yang 已提交
502
                          grad_to_var, callbacks)
503 504 505 506 507 508

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(root_block, fwd_op_num, grad_to_var, {})

F
fengjiayi 已提交
509
    _append_backward_vars_(root_block, fwd_op_num, grad_to_var, grad_info_map)
F
fengjiayi 已提交
510

F
fengjiayi 已提交
511 512
    program.current_block_idx = current_block_idx
    program.sync_with_cpp()
C
chengduoZH 已提交
513 514
    # FIXME(zcd): prevent loss.grad optimized by mem_opt.
    loss.block.var(_append_grad_suffix_(loss.name)).persistable = True
F
fengjiayi 已提交
515

516 517 518
    if parameter_list is not None:
        parameters = parameter_list
    else:
F
fengjiayi 已提交
519
        params = program.global_block().all_parameters()
520
        parameters = [param.name for param in params]
521

522 523
    params_and_grads = []
    for param in parameters:
F
update  
fengjiayi 已提交
524
        if param not in grad_info_map:
F
fengjiayi 已提交
525
            continue
F
update  
fengjiayi 已提交
526
        grad_info = grad_info_map[param]
F
fengjiayi 已提交
527
        grad_block = grad_info[1]
528 529 530 531
        if not grad_block.has_var(grad_info[0]):
            raise ValueError("grad block[{0}] did not have grad var {1}".format(
                grad_info[1], grad_info[0]))
        # Get the param var from the global block
F
fengjiayi 已提交
532
        param_var = program.global_block().var(param)
533 534 535 536 537
        grad_var = grad_block.var(grad_info[0])
        if loss.block.has_var(grad_info[0]):
            params_and_grads.append((param_var, grad_var))
        else:
            params_and_grads.append((param_var, None))
Y
yuyang18 已提交
538 539 540 541 542 543 544 545 546 547 548 549 550

    op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
    for p, g in params_and_grads:
        if g is None:
            continue
        for op in reversed(program.global_block().ops):
            assert isinstance(op, framework.Operator)
            if g.name in op.output_arg_names:
                g.op = op
                break

        if g.op is None:
            raise ValueError("Unexpected branch")
Y
yuyang18 已提交
551
        attr_val = [p.name, g.name]
Y
yuyang18 已提交
552 553 554
        if g.op.has_attr(op_role_var_attr_name):
            attr_val.extend(g.op.attr(op_role_var_attr_name))
        g.op.set_attr(op_role_var_attr_name, attr_val)
Y
yuyang18 已提交
555

556
    return params_and_grads
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700


def _as_list(x):
    if x is None:
        return []
    return list(x) if isinstance(x, collections.Sequence) else [x]


def _find_op_path_(block, outputs, inputs, no_grad_set):
    """
    no_grad_set will also be changed
    """
    input_names = set([inp.name for inp in inputs])
    output_names = set([out.name for out in outputs])

    relevant_op_flags = [True] * len(block.ops)

    # All the inputs of the block are used if inputs is empty,
    if inputs:
        for i, op in enumerate(block.ops):
            if _some_in_set_(op.desc.input_arg_names(), input_names):
                for name in op.desc.output_arg_names():
                    if name not in no_grad_set:
                        input_names.add(name)
            else:
                relevant_op_flags[i] = False

    for i, op in reversed(list(enumerate(block.ops))):
        if _some_in_set_(op.desc.output_arg_names(), output_names):
            for name in op.desc.input_arg_names():
                if name not in no_grad_set:
                    output_names.add(name)
        else:
            relevant_op_flags[i] = False

    op_path = [
        block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
    ]

    if inputs:
        for op in op_path:
            for name in op.desc.input_arg_names():
                if name not in input_names:
                    no_grad_set.add(name)

    return op_path


def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None):
    """
    Backpropagate the graidents of targets to inputs.

    Args:
        targets(Variable|list[Variable]): The target variables
        inputs(Variable|list[Variable]): The input variables
        no_grad_set(set[string]): The names of variables that have no gradients
            in Block 0. All variables with `stop_gradient=True` from all blocks
            will be automatically added.

    Return:
        (list[Variable]): list of gradients for inputs
        If an input does not affect targets, the corresponding gradient variable
        will be None
    """
    targets = _as_list(targets)
    inputs = _as_list(inputs)
    target_gradients = _as_list(target_gradients)

    block = targets[0].block
    prog = block.program
    block_idx = block.idx

    if not target_gradients:
        target_gradients = [None] * len(targets)

    if len(targets) != len(target_gradients):
        raise ValueError(
            "Should have the same number of target_gradients as targets")

    if no_grad_set is None:
        no_grad_set = set()
    no_grad_set = copy.copy(no_grad_set)
    no_grad_dict = _get_stop_gradients_(prog)
    no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set))

    fwd_op_num = block.desc.op_size()

    target_grad_map = {}
    for i, grad in enumerate(target_gradients):
        target = targets[i]
        if grad is None:
            grad_name = _append_grad_suffix_(target.name)
            op_desc = _create_op_desc_("fill_constant_batch_size_like",
                                       {"Input": [target.name]},
                                       {"Out": [grad_name]}, {
                                           "shape": target.shape,
                                           "value": 1.0,
                                           "dtype": target.dtype,
                                           'input_dim_idx': 0,
                                           'output_dim_idx': 0
                                       })
            block.desc.append_op().copy_from(op_desc)
        else:
            if target.block.idx != block_idx or target.block.program != prog:
                raise ValueError("all targets must be in the same block")
            if target.shape != grad.shape:
                raise ValueError(
                    "The shapes of target and grad are different: %s %s" % (
                        target.name, grad.name))
            target_grad_map[_append_grad_suffix_(target.name)] = grad.name

    for input in inputs:
        if input.block.program != prog:
            raise "input must be in the same program as targets"

    block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0]))
    op_path = _find_op_path_(block, targets, inputs, block_no_grad_set)
    no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set))
    grad_to_var = dict()
    grad_info_map = dict()
    _append_backward_ops_(block, op_path, block, no_grad_dict, grad_to_var)

    # Because calc_gradient may be called multiple times,
    # we need rename the internal gradient variables so that they have
    # different names.
    _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map)

    _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map)
    prog.sync_with_cpp()

    grad_vars = []
    for input_var in inputs:
        if input_var.name not in grad_info_map:
            grad_vars.append(None)
        else:
            grad_info = grad_info_map[input_var.name]
            grad_block = grad_info[1]
            grad_var = grad_block.var(grad_info[0])
            grad_vars.append(grad_var)

    if len(grad_vars) == 1:
        return grad_vars[0]
    else:
        return grad_vars