diff --git a/python/paddle/v2/framework/backward.py b/python/paddle/v2/framework/backward.py index 6827792cb351243f926aeca5f37324dc987d6a79..678efd5d20585355a684bb2df16fdb57a69e0eeb 100644 --- a/python/paddle/v2/framework/backward.py +++ b/python/paddle/v2/framework/backward.py @@ -19,8 +19,20 @@ def append_backward_ops(loss, parameter_list=None, no_grad_set=None): :rtype: list[Variable] """ assert isinstance(loss, framework.Variable) - param_grad_map = loss.block.program.append_backward(loss, no_grad_set or - set()) + + if no_grad_set is None: + program = loss.block.program + assert isinstance(program, framework.Program) + no_grad_set = list() + for block in program.blocks: + assert isinstance(block, framework.Block) + for var in block.vars.itervalues(): + assert isinstance(var, framework.Variable) + if var.stop_gradient: + no_grad_set.append(var.name) + no_grad_set = set(no_grad_set) + + param_grad_map = loss.block.program.append_backward(loss, no_grad_set) if parameter_list is not None: parameters = parameter_list else: diff --git a/python/paddle/v2/framework/framework.py b/python/paddle/v2/framework/framework.py index a26d8b517db0f23c3189604c65a74f602b87e542..dd23c47961b6b27ffca32dc7fe496d715411c858 100644 --- a/python/paddle/v2/framework/framework.py +++ b/python/paddle/v2/framework/framework.py @@ -21,6 +21,7 @@ class Variable(object): dtype=None, lod_level=None, persistable=None, + stop_gradient=False, **kwargs): self.block = block @@ -89,6 +90,7 @@ class Variable(object): self.block.vars[name] = self self.op = None + self.stop_gradient = stop_gradient def __str__(self): protostr = self.desc.serialize_to_string() diff --git a/python/paddle/v2/framework/layers.py b/python/paddle/v2/framework/layers.py index 967a85f1a5dccc10b265fd96050dd70bd9653b62..0739b2d2e2964f7f3565baf068a80c89938d193a 100644 --- a/python/paddle/v2/framework/layers.py +++ b/python/paddle/v2/framework/layers.py @@ -99,7 +99,7 @@ def data(name, shape = [-1] + shape # append batch size as -1 return helper.create_global_variable( - name=name, shape=shape, dtype=data_type, type=type) + name=name, shape=shape, dtype=data_type, type=type, stop_gradient=True) def _convert_(name): diff --git a/python/paddle/v2/framework/tests/test_recurrent_op.py b/python/paddle/v2/framework/tests/test_recurrent_op.py index d2c43168aa309a6e1bb8c1ae93d159620e5a270f..001de349d1f62e661335e63f7519c59697a1030c 100644 --- a/python/paddle/v2/framework/tests/test_recurrent_op.py +++ b/python/paddle/v2/framework/tests/test_recurrent_op.py @@ -125,11 +125,13 @@ class RecurrentOpTest1(unittest.TestCase): name='x', append_batch_size=False, **self.p_info) + x.stop_gradient = False h_boot = data( shape=[self.input_dim], data_type='float32', name='h_boot', **self.p_info) + h_boot.stop_gradient = False rnn = StaticRNN(main_program=self.main_program) with rnn.step(): @@ -256,11 +258,13 @@ class RecurrentOpTest2(RecurrentOpTest1): name='x', append_batch_size=False, **self.p_info) + x.stop_gradient = False h_boot = data( shape=[self.input_dim], data_type='float32', name='h_boot', **self.p_info) + h_boot.stop_gradient = False rnn = StaticRNN(main_program=self.main_program) with rnn.step(): @@ -353,18 +357,21 @@ class RecurrentOpTest3(RecurrentOpTest1): name='x', append_batch_size=False, **self.p_info) + x.stop_gradient = False h_boot1 = data( shape=[self.batch_size, self.input_dim], data_type='float32', name='h_boot1', append_batch_size=False, **self.p_info) + h_boot1.stop_gradient = False h_boot2 = data( shape=[self.batch_size, self.input_dim], data_type='float32', name='h_boot2', append_batch_size=False, **self.p_info) + h_boot2.stop_gradient = False rnn = StaticRNN(main_program=self.main_program) with rnn.step():