From c505c4dbea0714f02d1c200735c5c9acece1b474 Mon Sep 17 00:00:00 2001 From: Zhou Wei <52485244+zhouwei25@users.noreply.github.com> Date: Wed, 1 Jul 2020 11:03:53 +0800 Subject: [PATCH] add new API: optimizer.set_lr (#24455) * add new api: optimizer.set_lr, test=develop * add API doc and example code for optimizer.set_lr,test=develop * add API doc and example code for optimizer.set_lr,test=develop * Modified doc to :api_attr: imperative,test=develop --- python/paddle/fluid/optimizer.py | 80 ++++++++++++++++++- .../unittests/test_imperative_optimizer.py | 40 ++++++++++ 2 files changed, 118 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 04271d715f7..28403284eb8 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -296,11 +296,87 @@ class Optimizer(object): dtype='float32' if self._dtype is None else self._dtype, persistable=True) + @framework.dygraph_only + def set_lr(self, value): + """ + :api_attr: imperative + + Set the value of the learning rate manually in the optimizer. If the optimizer use LearningRateDecay, + this API cannot be invoked, because it will lead to conflict. + + Args: + value (float|Variable): the value of learning rate + + Returns: + None + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + + with fluid.dygraph.guard(): + linear = fluid.dygraph.nn.Linear(10, 10) + + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + + # set learning rate manually by python float value + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.2 + # current lr is 0.3 + # current lr is 0.4 + # current lr is 0.5 + # current lr is 0.6 + + + # set learning rate manually by framework Variable + lr_var = fluid.layers.create_global_var( + shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + lr = adam.current_step_lr() + print("current lr is {}".format(lr)) + # Print: + # current lr is 0.7 + + + + """ + if not isinstance(value, (framework.Variable, float)): + raise TypeError( + "The type of 'value' in optimizer.set_lr must be (float, Variable), but received %s." + % (type(value))) + if isinstance(self._learning_rate, LearningRateDecay): + raise RuntimeError( + "optimizer's learning rate can't be LearningRateDecay when invoke this API, because this will lead to conflict." + ) + if isinstance(value, float): + self._learning_rate = value + current_lr = self._global_learning_rate() + if current_lr is not None: + global_block = framework.default_main_program().global_block() + global_block.append_op( + type='fill_constant', + outputs={'Out': [current_lr]}, + attrs={ + 'dtype': current_lr.dtype, + 'shape': list(current_lr.shape), + 'value': float(value) + }, + stop_gradient=True) + else: + assert len(value.shape) == 1 and value.shape[ + 0] == 1, "optimizer's learning rate must be 1-D Tensor with shape[1]" + self._learning_rate_map[framework.default_main_program()] = value + @framework.dygraph_only def current_step_lr(self): """ - .. note:: - **This API is ONLY available in Dygraph mode** + :api_attr: imperative Get current step learning rate. The return value is all the same When LearningRateDecay is not used, otherwise return the step learning rate. diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 71d6c101d00..d3017c31c70 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -428,6 +428,46 @@ class TestOptimizerLearningRate(unittest.TestCase): self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0)) + def test_set_lr(self): + with fluid.dygraph.guard(): + a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") + + linear = fluid.dygraph.nn.Linear(10, 10) + + a = fluid.dygraph.to_variable(a) + + b = linear(a) + + loss = fluid.layers.reduce_mean(b) + + adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters()) + + lr_list = [0.2, 0.3, 0.4, 0.5, 0.6] + for i in range(5): + adam.set_lr(lr_list[i]) + adam.minimize(loss) + lr = adam.current_step_lr() + self.assertTrue( + np.allclose( + lr, lr_list[i], rtol=1e-06, atol=0.0)) + + lr_var = fluid.layers.create_global_var( + shape=[1], value=0.7, dtype='float32') + adam.set_lr(lr_var) + adam.minimize(loss) + lr = adam.current_step_lr() + self.assertTrue(np.allclose(lr, 0.7, rtol=1e-06, atol=0.0)) + + with self.assertRaises(RuntimeError): + adam = fluid.optimizer.Adam( + fluid.dygraph.NaturalExpDecay( + learning_rate=0.1, + decay_steps=3, + decay_rate=0.5, + staircase=True), + parameter_list=linear.parameters()) + adam.set_lr(0.01) + class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): def get_optimizer_dygraph(self, parameter_list): -- GitLab