Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
17fd1bc2
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
接近 2 年 前同步成功
通知
116
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
17fd1bc2
编写于
4月 22, 2022
作者:
H
HydrogenSulfate
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine code
上级
15242df1
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
42 addition
and
31 deletion
+42
-31
ppcls/engine/train/utils.py
ppcls/engine/train/utils.py
+1
-1
ppcls/optimizer/__init__.py
ppcls/optimizer/__init__.py
+41
-30
未找到文件。
ppcls/engine/train/utils.py
浏览文件 @
17fd1bc2
...
@@ -39,7 +39,7 @@ def update_loss(trainer, loss_dict, batch_size):
...
@@ -39,7 +39,7 @@ def update_loss(trainer, loss_dict, batch_size):
def
log_info
(
trainer
,
batch_size
,
epoch_id
,
iter_id
):
def
log_info
(
trainer
,
batch_size
,
epoch_id
,
iter_id
):
lr_msg
=
", "
.
join
([
lr_msg
=
", "
.
join
([
"lr
_{}: {:.8f}"
.
format
(
i
+
1
,
lr
.
get_lr
())
"lr
({}): {:.8f}"
.
format
(
lr
.
__class__
.
__name__
,
lr
.
get_lr
())
for
i
,
lr
in
enumerate
(
trainer
.
lr_sch
)
for
i
,
lr
in
enumerate
(
trainer
.
lr_sch
)
])
])
metric_msg
=
", "
.
join
([
metric_msg
=
", "
.
join
([
...
...
ppcls/optimizer/__init__.py
浏览文件 @
17fd1bc2
...
@@ -44,10 +44,9 @@ def build_lr_scheduler(lr_config, epochs, step_each_epoch):
...
@@ -44,10 +44,9 @@ def build_lr_scheduler(lr_config, epochs, step_each_epoch):
# model_list is None in static graph
# model_list is None in static graph
def
build_optimizer
(
config
,
epochs
,
step_each_epoch
,
model_list
=
None
):
def
build_optimizer
(
config
,
epochs
,
step_each_epoch
,
model_list
=
None
):
config
=
copy
.
deepcopy
(
config
)
optim_config
=
copy
.
deepcopy
(
config
)
optim_config
=
config
[
"Optimizer"
]
if
isinstance
(
optim_config
,
dict
):
if
isinstance
(
optim_config
,
dict
):
# convert {'name': xxx, **optim_cfg} to [{
name: {scope
: xxx, **optim_cfg}}]
# convert {'name': xxx, **optim_cfg} to [{
'name': {'scope'
: xxx, **optim_cfg}}]
optim_name
=
optim_config
.
pop
(
"name"
)
optim_name
=
optim_config
.
pop
(
"name"
)
optim_config
:
List
[
Dict
[
str
,
Dict
]]
=
[{
optim_config
:
List
[
Dict
[
str
,
Dict
]]
=
[{
optim_name
:
{
optim_name
:
{
...
@@ -61,19 +60,19 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
...
@@ -61,19 +60,19 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
"""NOTE:
"""NOTE:
Currently only support optim objets below.
Currently only support optim objets below.
1. single optimizer config.
1. single optimizer config.
2.
next level uner Arch, such as Arch.backbone, Arch.neck, Arch.
head.
2.
model(entire Arch), backbone, neck,
head.
3. loss
which has parameters, such as CenterLoss
.
3. loss
(entire Loss), specific loss listed in ppcls/loss/__init__.py
.
"""
"""
for
optim_item
in
optim_config
:
for
optim_item
in
optim_config
:
# optim_cfg = {optim_name: {
scope
: xxx, **optim_cfg}}
# optim_cfg = {optim_name: {
'scope'
: xxx, **optim_cfg}}
# step1 build lr
# step1 build lr
optim_name
=
list
(
optim_item
.
keys
())[
0
]
# get optim_name
optim_name
=
list
(
optim_item
.
keys
())[
0
]
# get optim_name
optim_scope
=
optim_item
[
optim_name
].
pop
(
'scope'
)
# get optim_scope
optim_scope
=
optim_item
[
optim_name
].
pop
(
'scope'
)
# get optim_scope
optim_cfg
=
optim_item
[
optim_name
]
# get optim_cfg
optim_cfg
=
optim_item
[
optim_name
]
# get optim_cfg
lr
=
build_lr_scheduler
(
optim_cfg
.
pop
(
'lr'
),
epochs
,
step_each_epoch
)
lr
=
build_lr_scheduler
(
optim_cfg
.
pop
(
'lr'
),
epochs
,
step_each_epoch
)
logger
.
debug
(
"build lr ({}) for scope ({}) success.."
.
format
(
logger
.
info
(
"build lr ({}) for scope ({}) success.."
.
format
(
lr
,
optim_scope
))
lr
.
__class__
.
__name__
,
optim_scope
))
# step2 build regularization
# step2 build regularization
if
'regularizer'
in
optim_cfg
and
optim_cfg
[
'regularizer'
]
is
not
None
:
if
'regularizer'
in
optim_cfg
and
optim_cfg
[
'regularizer'
]
is
not
None
:
if
'weight_decay'
in
optim_cfg
:
if
'weight_decay'
in
optim_cfg
:
...
@@ -84,8 +83,8 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
...
@@ -84,8 +83,8 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
reg_name
=
reg_config
.
pop
(
'name'
)
+
'Decay'
reg_name
=
reg_config
.
pop
(
'name'
)
+
'Decay'
reg
=
getattr
(
paddle
.
regularizer
,
reg_name
)(
**
reg_config
)
reg
=
getattr
(
paddle
.
regularizer
,
reg_name
)(
**
reg_config
)
optim_cfg
[
"weight_decay"
]
=
reg
optim_cfg
[
"weight_decay"
]
=
reg
logger
.
debug
(
"build regularizer ({}) for scope ({}) success.."
.
logger
.
info
(
"build regularizer ({}) for scope ({}) success.."
.
format
(
reg
,
optim_scope
))
format
(
reg
.
__class__
.
__name__
,
optim_scope
))
# step3 build optimizer
# step3 build optimizer
if
'clip_norm'
in
optim_cfg
:
if
'clip_norm'
in
optim_cfg
:
clip_norm
=
optim_cfg
.
pop
(
'clip_norm'
)
clip_norm
=
optim_cfg
.
pop
(
'clip_norm'
)
...
@@ -93,30 +92,42 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
...
@@ -93,30 +92,42 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None):
else
:
else
:
grad_clip
=
None
grad_clip
=
None
optim_model
=
[]
optim_model
=
[]
for
i
in
range
(
len
(
model_list
)):
if
len
(
model_list
[
i
].
parameters
())
==
0
:
continue
if
optim_scope
==
"all"
:
# optimizer for all
optim_model
.
append
(
model_list
[
i
])
else
:
if
optim_scope
.
endswith
(
"Loss"
):
# optimizer for loss
for
m
in
model_list
[
i
].
sublayers
(
True
):
if
m
.
__class_name
==
optim_scope
:
optim_model
.
append
(
m
)
else
:
# opmizer for module in model, such as backbone, neck, head...
if
hasattr
(
model_list
[
i
],
optim_scope
):
optim_model
.
append
(
getattr
(
model_list
[
i
],
optim_scope
))
assert
len
(
optim_model
)
==
1
,
\
# for static graph
"Invalid optim model for optim scope({}), number of optim_model={}"
.
format
(
optim_scope
,
len
(
optim_model
))
if
model_list
is
None
:
optim
=
getattr
(
optimizer
,
optim_name
)(
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
**
optim_cfg
)(
model_list
=
optim_model
)
return
optim
,
lr
# for dynamic graph
if
optim_scope
==
"all"
:
optim_model
=
model_list
elif
optim_scope
==
"model"
:
optim_model
=
[
model_list
[
0
],
]
elif
optim_scope
in
[
"backbone"
,
"neck"
,
"head"
]:
optim_model
=
[
getattr
(
model_list
[
0
],
optim_scope
,
None
),
]
elif
optim_scope
==
"loss"
:
optim_model
=
[
model_list
[
1
],
]
else
:
optim_model
=
[
model_list
[
1
].
loss_func
[
i
]
for
i
in
range
(
len
(
model_list
[
1
].
loss_func
))
if
model_list
[
1
].
loss_func
[
i
].
__class__
.
__name__
==
optim_scope
]
optim_model
=
[
optim_model
[
i
]
for
i
in
range
(
len
(
optim_model
))
if
(
optim_model
[
i
]
is
not
None
)
and
(
len
(
optim_model
[
i
].
parameters
())
>
0
)
]
assert
len
(
optim_model
)
>
0
,
\
f
"optim_model is empty for optim_scope(
{
optim_scope
}
)"
optim
=
getattr
(
optimizer
,
optim_name
)(
optim
=
getattr
(
optimizer
,
optim_name
)(
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
learning_rate
=
lr
,
grad_clip
=
grad_clip
,
**
optim_cfg
)(
model_list
=
optim_model
)
**
optim_cfg
)(
model_list
=
optim_model
)
logger
.
debug
(
"build optimizer ({}) for scope ({}) success.."
.
format
(
logger
.
info
(
"build optimizer ({}) for scope ({}) success.."
.
format
(
optim
,
optim_scope
))
optim
.
__class__
.
__name__
,
optim_scope
))
optim_list
.
append
(
optim
)
optim_list
.
append
(
optim
)
lr_list
.
append
(
lr
)
lr_list
.
append
(
lr
)
return
optim_list
,
lr_list
return
optim_list
,
lr_list
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录