diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 905f499dfce96644765003abf449639f8ee9f445..05731baad24716325fa7b458a9c1959f4b292be0 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -7170,6 +7170,8 @@ class Parameter(Variable, metaclass=ParameterMetaClass): ) self.trainable = kwargs.get('trainable', True) + self.stop_gradient = not self.trainable + self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0}) self.regularizer = kwargs.get('regularizer', None) diff --git a/test/legacy_test/test_trainable.py b/test/legacy_test/test_trainable.py index 5f6968af51096fa5f8780d35ea1b89052d622339..07501923885a99c92589db73345fc9453e1202fe 100644 --- a/test/legacy_test/test_trainable.py +++ b/test/legacy_test/test_trainable.py @@ -20,6 +20,8 @@ from simple_nets import init_data import paddle from paddle import fluid +paddle.enable_static() + def test_trainable(): x = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') @@ -68,12 +70,12 @@ class TestTrainable(unittest.TestCase): self.check_trainable( test_trainable, feed_dict, - op_count={'adam': 1, 'scale': 0, 'mul_grad': 1}, + op_count={'adam': 1, 'scale': 0, 'mul_grad': 0}, ) self.check_trainable( test_trainable, feed_dict, - op_count={'adamax': 1, 'scale': 1, 'mul_grad': 1}, + op_count={'adamax': 1, 'scale': 1, 'mul_grad': 0}, optimizer=paddle.optimizer.Adamax(learning_rate=0.2), )