diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 5f3111f363ccc14de4dd3f067097a19eabb83662..b07d0131a32c3f2744854a17b180ae714d532f80 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1522,13 +1522,17 @@ class Program(object): >>> with program.lr_schedule_guard(): >>> lr = lr * decay """ + + tmp_role = self._current_role + tmp_var = self._op_role_var + OpRole = core.op_proto_and_checker_maker.OpRole self._current_role = OpRole.LRSched # TODO(typhoonzero): how to set target learning rate var self._op_role_var = [] yield - self._op_role_var = [] - self._current_role = OpRole.Forward + self._op_role_var = tmp_var + self._current_role = tmp_role def __str__(self): """ diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 73e3a43af4b98ee8e83a208c397d59e9025b8420..fb6770f025a5ed51856d7cf5f2c24c8c3f682edb 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -15,7 +15,7 @@ from __future__ import print_function import re from collections import defaultdict -from paddle.fluid.framework import Program, Variable, name_scope +from paddle.fluid.framework import Program, Variable, name_scope, default_main_program from . import framework from . import layers from .backward import append_backward @@ -111,7 +111,8 @@ class Optimizer(object): if param_lr == 1.0: return self._global_learning_rate() else: - return self._global_learning_rate() * param_lr + with default_main_program()._lr_schedule_guard(): + return self._global_learning_rate() * param_lr def _create_accumulators(self, block, parameters): """Create all accumulators needed by the parameters