Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
316afbb2
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
316afbb2
编写于
7月 09, 2020
作者:
Z
Zhou Wei
提交者:
GitHub
7月 09, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add new API:LambdaDecay,test=develop (#25367)
add new API:LambdaDecay,test=develop
上级
39d85bfb
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
95 addition
and
1 deletion
+95
-1
python/paddle/fluid/dygraph/learning_rate_scheduler.py
python/paddle/fluid/dygraph/learning_rate_scheduler.py
+68
-1
python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py
...dle/fluid/tests/unittests/test_learning_rate_scheduler.py
+27
-0
未找到文件。
python/paddle/fluid/dygraph/learning_rate_scheduler.py
浏览文件 @
316afbb2
...
...
@@ -23,7 +23,7 @@ from ..data_feeder import check_type
__all__
=
[
'NoamDecay'
,
'PiecewiseDecay'
,
'NaturalExpDecay'
,
'ExponentialDecay'
,
'InverseTimeDecay'
,
'PolynomialDecay'
,
'CosineDecay'
,
'LinearLrWarmup'
,
'ReduceLROnPlateau'
,
'StepDecay'
,
'MultiStepDecay'
'ReduceLROnPlateau'
,
'StepDecay'
,
'MultiStepDecay'
,
'LambdaDecay'
]
...
...
@@ -1086,3 +1086,70 @@ class MultiStepDecay(_LearningRateEpochDecay):
return
self
.
base_lr
*
(
decay_rate
**
i
)
return
self
.
base_lr
*
(
decay_rate
**
len
(
self
.
milestones
))
class
LambdaDecay
(
_LearningRateEpochDecay
):
"""
:api_attr: imperative
Sets the learning rate of ``optimizer`` to the initial lr times a multiplicative factor, and this multiplicative
factor is computed by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` .
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5 # init learning_rate
lr_lambda = lambda epoch: 0.95 ** epoch
learning_rate = 0.5 # epoch 0
learning_rate = 0.475 # epoch 1
learning_rate = 0.45125 # epoch 2
Parameters:
learning_rate (float|int): The initial learning rate. It can be set to python float or int number.
lr_lambda (function): A function which computes a multiplicative factor given an integer parameter ``epoch`` , and
then multiply the initial learning rate by this multiplicative factor.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
linear = fluid.dygraph.Linear(10, 10)
input = fluid.dygraph.to_variable(x)
scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x)
adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
for epoch in range(6):
for batch_id in range(5):
out = linear(input)
loss = fluid.layers.reduce_mean(out)
adam.minimize(loss)
scheduler.epoch()
print("epoch:%d, current lr is %f" .format(epoch, adam.current_step_lr()))
# epoch:0, current lr is 0.5
# epoch:1, current lr is 0.475
# epoch:2, current lr is 0.45125
"""
def
__init__
(
self
,
learning_rate
,
lr_lambda
):
if
not
callable
(
lr_lambda
):
raise
TypeError
(
"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s."
%
type
(
lr_lambda
))
self
.
lr_lambda
=
lr_lambda
super
(
LambdaDecay
,
self
).
__init__
(
learning_rate
)
def
get_lr
(
self
):
base_lr
=
self
.
create_lr_var
(
self
.
base_lr
)
return
self
.
base_lr
*
self
.
lr_lambda
(
self
.
epoch_num
)
python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py
浏览文件 @
316afbb2
...
...
@@ -116,6 +116,10 @@ def step_decay(global_step, learning_rate, step_size, decay_rate=0.1):
return
learning_rate
*
math
.
pow
(
decay_rate
,
global_step
//
step_size
)
def
lambda_decay
(
global_step
,
learning_rate
,
lr_lambda
):
return
learning_rate
*
lr_lambda
(
global_step
)
class
TestLearningRateDecayDygraph
(
unittest
.
TestCase
):
def
test_NoamDecay
(
self
):
with
fluid
.
dygraph
.
guard
():
...
...
@@ -217,6 +221,29 @@ class TestLearningRateDecayDygraph(unittest.TestCase):
with
self
.
assertRaises
(
ValueError
):
lr
=
fluid
.
dygraph
.
MultiStepDecay
(
2.0
,
[
20
,
30
,
50
])
def
test_LambdaDecay
(
self
):
with
fluid
.
dygraph
.
guard
():
learning_rate
=
0.5
lr_lambda
=
lambda
x
:
0.95
**
x
scheduler
=
fluid
.
dygraph
.
LambdaDecay
(
learning_rate
,
lr_lambda
)
linear
=
fluid
.
dygraph
.
nn
.
Linear
(
10
,
10
)
adam
=
fluid
.
optimizer
.
Adam
(
scheduler
,
parameter_list
=
linear
.
parameters
())
for
epoch
in
range
(
30
):
right_result
=
lambda_decay
(
epoch
,
learning_rate
,
lr_lambda
)
fluid_result
=
scheduler
().
numpy
()[
0
]
scheduler
.
epoch
()
self
.
assertAlmostEqual
(
right_result
,
fluid_result
,
msg
=
'Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}'
.
format
(
epoch
,
right_result
,
fluid_result
))
with
self
.
assertRaises
(
TypeError
):
lr
=
fluid
.
dygraph
.
LambdaDecay
(
learning_rate
,
"test"
)
class
TestLearningRateDecay
(
unittest
.
TestCase
):
def
check_decay
(
self
,
python_decay_fn
,
fluid_decay_fn
,
kwargs
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录