Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c4cd99f3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c4cd99f3
编写于
1月 10, 2021
作者:
W
WangXi
提交者:
GitHub
1月 10, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix adamw apply gradient (#30130) (#30207)
上级
6d1fb79d
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
44 addition
and
88 deletion
+44
-88
python/paddle/fluid/tests/unittests/test_adamw_op.py
python/paddle/fluid/tests/unittests/test_adamw_op.py
+6
-4
python/paddle/optimizer/adam.py
python/paddle/optimizer/adam.py
+2
-0
python/paddle/optimizer/adamw.py
python/paddle/optimizer/adamw.py
+36
-84
未找到文件。
python/paddle/fluid/tests/unittests/test_adamw_op.py
浏览文件 @
c4cd99f3
...
...
@@ -29,6 +29,8 @@ class TestAdamWOp(unittest.TestCase):
parameters
=
linear
.
parameters
(),
apply_decay_param_fun
=
lambda
name
:
True
,
weight_decay
=
0.01
)
for
_
in
range
(
2
):
out
=
linear
(
a
)
out
.
backward
()
adam
.
step
()
...
...
python/paddle/optimizer/adam.py
浏览文件 @
c4cd99f3
...
...
@@ -16,6 +16,7 @@ from .optimizer import Optimizer
from
..fluid
import
core
from
..fluid
import
framework
from
..fluid.framework
import
Variable
from
..fluid.dygraph
import
base
as
imperative_base
import
paddle
...
...
@@ -247,6 +248,7 @@ class Adam(Optimizer):
return
adam_op
@
imperative_base
.
no_grad
@
framework
.
dygraph_only
def
step
(
self
):
"""
...
...
python/paddle/optimizer/adamw.py
浏览文件 @
c4cd99f3
...
...
@@ -129,6 +129,7 @@ class AdamW(Adam):
self
.
_params_name
=
set
()
self
.
_apply_decay_param_fun
=
apply_decay_param_fun
self
.
_coeff
=
coeff
self
.
_lr_to_coeff
=
dict
()
super
(
AdamW
,
self
).
__init__
(
learning_rate
=
learning_rate
,
parameters
=
parameters
,
...
...
@@ -139,97 +140,48 @@ class AdamW(Adam):
name
=
name
,
lazy_mode
=
lazy_mode
)
def
_
scale_parameters
(
self
,
params_and_grads
):
def
_
append_decoupled_weight_decay
(
self
,
block
,
param_and_grad
):
"""
Add
s weight decay ops
.
scaled_parameter = parameter * coeff
Add
decoupled weight decay op
.
parameter = parameter - parameter * coeff * lr
Args:
params_and_grads: A list of (parameters, gradients) pairs,
block: block in which variable is to be created
param_and_grad: (parameters, gradients) pairs,
the parameters need to decay.
Raises:
Exception: The type of coeff and parameter is not consistent.
"""
param
,
grad
=
param_and_grad
scaled_params
=
[]
for
param
,
grad
in
params_and_grads
:
# If no gradient then we don't need to do anything
if
grad
is
None
:
continue
if
self
.
_apply_decay_param_fun
is
not
None
\
and
not
self
.
_apply_decay_param_fun
(
param
.
name
):
continue
return
if
isinstance
(
self
.
_coeff
,
float
):
assert
param
.
dtype
is
not
paddle
.
fluid
.
core
.
VarDesc
.
VarType
.
FP32
,
\
"the type of coeff(float) and parameter(%s) is not consistent."
%
(
self
.
_coeff
.
dtype
)
else
:
assert
self
.
_coeff
.
dtype
==
param
.
dtype
,
\
"the type of coeff(%s) and parameter(%s) is not consistent."
%
(
self
.
_coeff
.
dtype
,
param
.
dtype
)
if
isinstance
(
self
.
_learning_rate
,
float
):
learning_rate
=
self
.
_learning_rate
else
:
learning_rate
=
self
.
_learning_rate
()
with
param
.
block
.
program
.
_optimized_guard
(
# NOTE. We add this function to the _append_optimize_op(),
# for we must make sure _create_param_lr() be called after
# optimizer._create_global_learning_rate().
learning_rate
=
self
.
_create_param_lr
(
param_and_grad
)
with
block
.
program
.
_optimized_guard
(
[
param
,
grad
]),
framework
.
name_scope
(
'weight decay'
):
scaled_params
.
append
(
(
param
,
grad
,
param
*
self
.
_coeff
*
learning_rate
))
if
param
.
name
not
in
self
.
_params_name
:
self
.
_params_name
.
add
(
param
.
name
)
param
=
param
*
self
.
_coeff
return
scaled_params
@
imperative_base
.
no_grad
def
minimize
(
self
,
loss
,
startup_program
=
None
,
parameters
=
None
,
no_grad_set
=
None
):
parameters
=
parameters
if
parameters
\
else
self
.
_parameter_list
# If it has been calculated, the result will be reused
decay_coeff
=
self
.
_lr_to_coeff
.
get
(
learning_rate
,
None
)
if
decay_coeff
is
None
:
decay_coeff
=
1.0
-
learning_rate
*
self
.
_coeff
self
.
_lr_to_coeff
[
learning_rate
]
=
decay_coeff
params_grads
=
self
.
backward
(
loss
=
loss
,
startup_program
=
startup_program
,
parameters
=
parameters
,
no_grad_set
=
no_grad_set
)
scaled_params
=
self
.
_scale_parameters
(
params_grads
)
for
p_grad_sgrad
in
scaled_params
:
param
,
grad
,
scaled_param
=
p_grad_sgrad
with
param
.
block
.
program
.
_optimized_guard
(
[
param
,
grad
]),
framework
.
name_scope
(
'weight decay'
):
updated_param
=
paddle
.
fluid
.
layers
.
elementwise_sub
(
x
=
param
,
y
=
scaled_param
)
paddle
.
fluid
.
layers
.
assign
(
input
=
updated_param
,
output
=
param
)
optimize_ops
=
self
.
_apply_optimize
(
loss
=
loss
,
params_grads
=
params_grads
,
startup_program
=
startup_program
)
return
optimize_ops
,
params_grads
@
framework
.
dygraph_only
@
imperative_base
.
no_grad
def
step
(
self
):
self
.
_dtype
=
None
params_grads
=
[]
for
param
in
self
.
_parameter_list
:
if
not
param
.
trainable
:
continue
if
param
.
_grad_ivar
()
is
not
None
:
grad_var
=
param
.
_grad_ivar
()
params_grads
.
append
((
param
,
grad_var
))
scaled_params
=
self
.
_scale_parameters
(
params_grads
)
for
p_grad_sgrad
in
scaled_params
:
param
,
grad
,
scaled_param
=
p_grad_sgrad
with
param
.
block
.
program
.
_optimized_guard
(
[
param
,
grad
]),
framework
.
name_scope
(
'weight decay'
):
updated_param
=
paddle
.
fluid
.
layers
.
elementwise_sub
(
x
=
param
,
y
=
scaled_param
)
paddle
.
fluid
.
layers
.
assign
(
input
=
updated_param
,
output
=
param
)
self
.
_apply_optimize
(
loss
=
None
,
startup_program
=
None
,
params_grads
=
params_grads
)
scaled_param
=
param
*
decay_coeff
paddle
.
fluid
.
layers
.
assign
(
input
=
scaled_param
,
output
=
param
)
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
self
.
_append_decoupled_weight_decay
(
block
,
param_and_grad
)
return
super
(
AdamW
,
self
).
_append_optimize_op
(
block
,
param_and_grad
)
def
__str__
(
self
):
return
" "
.
join
([
"Weight Decay, params:"
,
","
.
join
(
self
.
_params_name
)])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录