Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
ea5f339a
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ea5f339a
编写于
5月 23, 2022
作者:
W
Wenyu
提交者:
GitHub
5月 23, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
reorg optimizer (#6016)
上级
549df290
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
123 addition
and
87 deletion
+123
-87
ppdet/optimizer/__init__.py
ppdet/optimizer/__init__.py
+16
-0
ppdet/optimizer/ema.py
ppdet/optimizer/ema.py
+107
-0
ppdet/optimizer/optimizer.py
ppdet/optimizer/optimizer.py
+0
-87
未找到文件。
ppdet/optimizer/__init__.py
0 → 100644
浏览文件 @
ea5f339a
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.optimizer
import
*
from
.ema
import
ModelEMA
ppdet/optimizer/ema.py
0 → 100644
浏览文件 @
ea5f339a
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
weakref
class
ModelEMA
(
object
):
"""
Exponential Weighted Average for Deep Neutal Networks
Args:
model (nn.Layer): Detector of model.
decay (int): The decay used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = decay * ema_param + (1 - decay) * cur_param`.
Defaults is 0.9998.
ema_decay_type (str): type in ['threshold', 'normal', 'exponential'],
'threshold' as default.
cycle_epoch (int): The epoch of interval to reset ema_param and
step. Defaults is -1, which means not reset. Its function is to
add a regular effect to ema, which is set according to experience
and is effective when the total training epoch is large.
"""
def
__init__
(
self
,
model
,
decay
=
0.9998
,
ema_decay_type
=
'threshold'
,
cycle_epoch
=-
1
):
self
.
step
=
0
self
.
epoch
=
0
self
.
decay
=
decay
self
.
state_dict
=
dict
()
for
k
,
v
in
model
.
state_dict
().
items
():
self
.
state_dict
[
k
]
=
paddle
.
zeros_like
(
v
)
self
.
ema_decay_type
=
ema_decay_type
self
.
cycle_epoch
=
cycle_epoch
self
.
_model_state
=
{
k
:
weakref
.
ref
(
p
)
for
k
,
p
in
model
.
state_dict
().
items
()
}
def
reset
(
self
):
self
.
step
=
0
self
.
epoch
=
0
for
k
,
v
in
self
.
state_dict
.
items
():
self
.
state_dict
[
k
]
=
paddle
.
zeros_like
(
v
)
def
resume
(
self
,
state_dict
,
step
=
0
):
for
k
,
v
in
state_dict
.
items
():
if
k
in
self
.
state_dict
:
self
.
state_dict
[
k
]
=
v
self
.
step
=
step
def
update
(
self
,
model
=
None
):
if
self
.
ema_decay_type
==
'threshold'
:
decay
=
min
(
self
.
decay
,
(
1
+
self
.
step
)
/
(
10
+
self
.
step
))
elif
self
.
ema_decay_type
==
'exponential'
:
decay
=
self
.
decay
*
(
1
-
math
.
exp
(
-
(
self
.
step
+
1
)
/
2000
))
else
:
decay
=
self
.
decay
self
.
_decay
=
decay
if
model
is
not
None
:
model_dict
=
model
.
state_dict
()
else
:
model_dict
=
{
k
:
p
()
for
k
,
p
in
self
.
_model_state
.
items
()}
assert
all
(
[
v
is
not
None
for
_
,
v
in
model_dict
.
items
()]),
'python gc.'
for
k
,
v
in
self
.
state_dict
.
items
():
v
=
decay
*
v
+
(
1
-
decay
)
*
model_dict
[
k
]
v
.
stop_gradient
=
True
self
.
state_dict
[
k
]
=
v
self
.
step
+=
1
def
apply
(
self
):
if
self
.
step
==
0
:
return
self
.
state_dict
state_dict
=
dict
()
for
k
,
v
in
self
.
state_dict
.
items
():
if
self
.
ema_decay_type
!=
'exponential'
:
v
=
v
/
(
1
-
self
.
_decay
**
self
.
step
)
v
.
stop_gradient
=
True
state_dict
[
k
]
=
v
self
.
epoch
+=
1
if
self
.
cycle_epoch
>
0
and
self
.
epoch
==
self
.
cycle_epoch
:
self
.
reset
()
return
state_dict
ppdet/optimizer.py
→
ppdet/optimizer
/optimizer
.py
浏览文件 @
ea5f339a
...
...
@@ -18,7 +18,6 @@ from __future__ import print_function
import
sys
import
math
import
weakref
import
paddle
import
paddle.nn
as
nn
...
...
@@ -360,89 +359,3 @@ class OptimizerBuilder():
parameters
=
params
,
grad_clip
=
grad_clip
,
**
optim_args
)
class
ModelEMA
(
object
):
"""
Exponential Weighted Average for Deep Neutal Networks
Args:
model (nn.Layer): Detector of model.
decay (int): The decay used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = decay * ema_param + (1 - decay) * cur_param`.
Defaults is 0.9998.
ema_decay_type (str): type in ['threshold', 'normal', 'exponential'],
'threshold' as default.
cycle_epoch (int): The epoch of interval to reset ema_param and
step. Defaults is -1, which means not reset. Its function is to
add a regular effect to ema, which is set according to experience
and is effective when the total training epoch is large.
"""
def
__init__
(
self
,
model
,
decay
=
0.9998
,
ema_decay_type
=
'threshold'
,
cycle_epoch
=-
1
):
self
.
step
=
0
self
.
epoch
=
0
self
.
decay
=
decay
self
.
state_dict
=
dict
()
for
k
,
v
in
model
.
state_dict
().
items
():
self
.
state_dict
[
k
]
=
paddle
.
zeros_like
(
v
)
self
.
ema_decay_type
=
ema_decay_type
self
.
cycle_epoch
=
cycle_epoch
self
.
_model_state
=
{
k
:
weakref
.
ref
(
p
)
for
k
,
p
in
model
.
state_dict
().
items
()
}
def
reset
(
self
):
self
.
step
=
0
self
.
epoch
=
0
for
k
,
v
in
self
.
state_dict
.
items
():
self
.
state_dict
[
k
]
=
paddle
.
zeros_like
(
v
)
def
resume
(
self
,
state_dict
,
step
=
0
):
for
k
,
v
in
state_dict
.
items
():
if
k
in
self
.
state_dict
:
self
.
state_dict
[
k
]
=
v
self
.
step
=
step
def
update
(
self
,
model
=
None
):
if
self
.
ema_decay_type
==
'threshold'
:
decay
=
min
(
self
.
decay
,
(
1
+
self
.
step
)
/
(
10
+
self
.
step
))
elif
self
.
ema_decay_type
==
'exponential'
:
decay
=
self
.
decay
*
(
1
-
math
.
exp
(
-
(
self
.
step
+
1
)
/
2000
))
else
:
decay
=
self
.
decay
self
.
_decay
=
decay
if
model
is
not
None
:
model_dict
=
model
.
state_dict
()
else
:
model_dict
=
{
k
:
p
()
for
k
,
p
in
self
.
_model_state
.
items
()}
assert
all
(
[
v
is
not
None
for
_
,
v
in
model_dict
.
items
()]),
'python gc.'
for
k
,
v
in
self
.
state_dict
.
items
():
v
=
decay
*
v
+
(
1
-
decay
)
*
model_dict
[
k
]
v
.
stop_gradient
=
True
self
.
state_dict
[
k
]
=
v
self
.
step
+=
1
def
apply
(
self
):
if
self
.
step
==
0
:
return
self
.
state_dict
state_dict
=
dict
()
for
k
,
v
in
self
.
state_dict
.
items
():
if
self
.
ema_decay_type
!=
'exponential'
:
v
=
v
/
(
1
-
self
.
_decay
**
self
.
step
)
v
.
stop_gradient
=
True
state_dict
[
k
]
=
v
self
.
epoch
+=
1
if
self
.
cycle_epoch
>
0
and
self
.
epoch
==
self
.
cycle_epoch
:
self
.
reset
()
return
state_dict
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录