未验证 提交 c505c4db 编写于 作者: Z Zhou Wei 提交者: GitHub

add new API: optimizer.set_lr (#24455)

* add new api: optimizer.set_lr, test=develop

* add API doc and example code for optimizer.set_lr,test=develop

* add API doc and example code for optimizer.set_lr,test=develop

* Modified doc to :api_attr: imperative,test=develop
上级 6bf7df47
...@@ -296,11 +296,87 @@ class Optimizer(object): ...@@ -296,11 +296,87 @@ class Optimizer(object):
dtype='float32' if self._dtype is None else self._dtype, dtype='float32' if self._dtype is None else self._dtype,
persistable=True) persistable=True)
@framework.dygraph_only
def set_lr(self, value):
"""
:api_attr: imperative
Set the value of the learning rate manually in the optimizer. If the optimizer use LearningRateDecay,
this API cannot be invoked, because it will lead to conflict.
Args:
value (float|Variable): the value of learning rate
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.dygraph.guard():
linear = fluid.dygraph.nn.Linear(10, 10)
adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())
# set learning rate manually by python float value
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
lr = adam.current_step_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.2
# current lr is 0.3
# current lr is 0.4
# current lr is 0.5
# current lr is 0.6
# set learning rate manually by framework Variable
lr_var = fluid.layers.create_global_var(
shape=[1], value=0.7, dtype='float32')
adam.set_lr(lr_var)
lr = adam.current_step_lr()
print("current lr is {}".format(lr))
# Print:
# current lr is 0.7
"""
if not isinstance(value, (framework.Variable, float)):
raise TypeError(
"The type of 'value' in optimizer.set_lr must be (float, Variable), but received %s."
% (type(value)))
if isinstance(self._learning_rate, LearningRateDecay):
raise RuntimeError(
"optimizer's learning rate can't be LearningRateDecay when invoke this API, because this will lead to conflict."
)
if isinstance(value, float):
self._learning_rate = value
current_lr = self._global_learning_rate()
if current_lr is not None:
global_block = framework.default_main_program().global_block()
global_block.append_op(
type='fill_constant',
outputs={'Out': [current_lr]},
attrs={
'dtype': current_lr.dtype,
'shape': list(current_lr.shape),
'value': float(value)
},
stop_gradient=True)
else:
assert len(value.shape) == 1 and value.shape[
0] == 1, "optimizer's learning rate must be 1-D Tensor with shape[1]"
self._learning_rate_map[framework.default_main_program()] = value
@framework.dygraph_only @framework.dygraph_only
def current_step_lr(self): def current_step_lr(self):
""" """
.. note:: :api_attr: imperative
**This API is ONLY available in Dygraph mode**
Get current step learning rate. The return value is all the same When LearningRateDecay is not used, Get current step learning rate. The return value is all the same When LearningRateDecay is not used,
otherwise return the step learning rate. otherwise return the step learning rate.
......
...@@ -428,6 +428,46 @@ class TestOptimizerLearningRate(unittest.TestCase): ...@@ -428,6 +428,46 @@ class TestOptimizerLearningRate(unittest.TestCase):
self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0)) self.assertTrue(np.allclose(lr, ret[i], rtol=1e-06, atol=0.0))
def test_set_lr(self):
with fluid.dygraph.guard():
a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = fluid.dygraph.nn.Linear(10, 10)
a = fluid.dygraph.to_variable(a)
b = linear(a)
loss = fluid.layers.reduce_mean(b)
adam = fluid.optimizer.Adam(0.1, parameter_list=linear.parameters())
lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
for i in range(5):
adam.set_lr(lr_list[i])
adam.minimize(loss)
lr = adam.current_step_lr()
self.assertTrue(
np.allclose(
lr, lr_list[i], rtol=1e-06, atol=0.0))
lr_var = fluid.layers.create_global_var(
shape=[1], value=0.7, dtype='float32')
adam.set_lr(lr_var)
adam.minimize(loss)
lr = adam.current_step_lr()
self.assertTrue(np.allclose(lr, 0.7, rtol=1e-06, atol=0.0))
with self.assertRaises(RuntimeError):
adam = fluid.optimizer.Adam(
fluid.dygraph.NaturalExpDecay(
learning_rate=0.1,
decay_steps=3,
decay_rate=0.5,
staircase=True),
parameter_list=linear.parameters())
adam.set_lr(0.01)
class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase): class TestImperativeMomentumOptimizer(TestImperativeOptimizerBase):
def get_optimizer_dygraph(self, parameter_list): def get_optimizer_dygraph(self, parameter_list):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册