Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
a43aac32
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a43aac32
编写于
9月 03, 2020
作者:
littletomatodonkey
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix optimizer
上级
b17fbac3
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
118 addition
and
12 deletion
+118
-12
ppcls/optimizer/optimizer.py
ppcls/optimizer/optimizer.py
+118
-10
tools/program.py
tools/program.py
+0
-2
未找到文件。
ppcls/optimizer/optimizer.py
浏览文件 @
a43aac32
...
@@ -16,18 +16,124 @@ from __future__ import absolute_import
...
@@ -16,18 +16,124 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
division
from
__future__
import
print_function
from
__future__
import
print_function
import
paddle.fluid.optimizer
as
pfopt
import
sys
import
paddle.fluid.regularizer
as
pfreg
import
paddle.fluid
as
fluid
__all__
=
[
'OptimizerBuilder'
]
__all__
=
[
'OptimizerBuilder'
]
class
L1Decay
(
object
):
"""
L1 Weight Decay Regularization, which encourages the weights to be sparse.
Args:
factor(float): regularization coeff. Default:0.0.
"""
def
__init__
(
self
,
factor
=
0.0
):
super
(
L1Decay
,
self
).
__init__
()
self
.
regularization_coeff
=
factor
def
__call__
(
self
):
reg
=
fluid
.
regularizer
.
L1Decay
(
regularization_coeff
=
self
.
regularization_coeff
)
return
reg
class
L2Decay
(
object
):
"""
L2 Weight Decay Regularization, which encourages the weights to be sparse.
Args:
factor(float): regularization coeff. Default:0.0.
"""
def
__init__
(
self
,
factor
=
0.0
):
super
(
L2Decay
,
self
).
__init__
()
self
.
regularization_coeff
=
factor
def
__call__
(
self
):
reg
=
fluid
.
regularizer
.
L2Decay
(
regularization_coeff
=
self
.
regularization_coeff
)
return
reg
class
Momentum
(
object
):
"""
Simple Momentum optimizer with velocity state.
Args:
learning_rate (float|Variable) - The learning rate used to update parameters.
Can be a float value or a Variable with one float value as data element.
momentum (float) - Momentum factor.
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
"""
def
__init__
(
self
,
learning_rate
,
momentum
,
parameter_list
=
None
,
regularization
=
None
,
**
args
):
super
(
Momentum
,
self
).
__init__
()
self
.
learning_rate
=
learning_rate
self
.
momentum
=
momentum
self
.
parameter_list
=
parameter_list
self
.
regularization
=
regularization
def
__call__
(
self
):
opt
=
fluid
.
optimizer
.
Momentum
(
learning_rate
=
self
.
learning_rate
,
momentum
=
self
.
momentum
,
parameter_list
=
self
.
parameter_list
,
regularization
=
self
.
regularization
)
return
opt
class
RMSProp
(
object
):
"""
Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method.
Args:
learning_rate (float|Variable) - The learning rate used to update parameters.
Can be a float value or a Variable with one float value as data element.
momentum (float) - Momentum factor.
rho (float) - rho value in equation.
epsilon (float) - avoid division by zero, default is 1e-6.
regularization (WeightDecayRegularizer, optional) - The strategy of regularization.
"""
def
__init__
(
self
,
learning_rate
,
momentum
,
rho
=
0.95
,
epsilon
=
1e-6
,
parameter_list
=
None
,
regularization
=
None
,
**
args
):
super
(
RMSProp
,
self
).
__init__
()
self
.
learning_rate
=
learning_rate
self
.
momentum
=
momentum
self
.
rho
=
rho
self
.
epsilon
=
epsilon
self
.
parameter_list
=
parameter_list
self
.
regularization
=
regularization
def
__call__
(
self
):
opt
=
fluid
.
optimizer
.
RMSProp
(
learning_rate
=
self
.
learning_rate
,
momentum
=
self
.
momentum
,
rho
=
self
.
rho
,
epsilon
=
self
.
epsilon
,
parameter_list
=
self
.
parameter_list
,
regularization
=
self
.
regularization
)
return
opt
class
OptimizerBuilder
(
object
):
class
OptimizerBuilder
(
object
):
"""
"""
Build optimizer with fluid api in fluid.layers.optimizer,
Build optimizer
such as fluid.layers.optimizer.Momentum()
https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/optimizer_cn.html
https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/regularizer_cn.html
Args:
Args:
function(str): optimizer name of learning rate
function(str): optimizer name of learning rate
...
@@ -43,13 +149,15 @@ class OptimizerBuilder(object):
...
@@ -43,13 +149,15 @@ class OptimizerBuilder(object):
self
.
params
=
params
self
.
params
=
params
# create regularizer
# create regularizer
if
regularizer
is
not
None
:
if
regularizer
is
not
None
:
mod
=
sys
.
modules
[
__name__
]
reg_func
=
regularizer
[
'function'
]
+
'Decay'
reg_func
=
regularizer
[
'function'
]
+
'Decay'
reg_factor
=
regularizer
[
'factor
'
]
del
regularizer
[
'function
'
]
reg
=
getattr
(
pfreg
,
reg_func
)(
reg_factor
)
reg
=
getattr
(
mod
,
reg_func
)(
**
regularizer
)(
)
self
.
params
[
'regularization'
]
=
reg
self
.
params
[
'regularization'
]
=
reg
def
__call__
(
self
,
learning_rate
,
parameter_list
):
def
__call__
(
self
,
learning_rate
,
parameter_list
):
opt
=
getattr
(
pfopt
,
self
.
function
)
mod
=
sys
.
modules
[
__name__
]
opt
=
getattr
(
mod
,
self
.
function
)
return
opt
(
learning_rate
=
learning_rate
,
return
opt
(
learning_rate
=
learning_rate
,
parameter_list
=
parameter_list
,
parameter_list
=
parameter_list
,
**
self
.
params
)
**
self
.
params
)
()
tools/program.py
浏览文件 @
a43aac32
...
@@ -276,7 +276,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
...
@@ -276,7 +276,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
(
"lr"
,
AverageMeter
(
(
"lr"
,
AverageMeter
(
'lr'
,
'f'
,
need_avg
=
False
)),
'lr'
,
'f'
,
need_avg
=
False
)),
(
"batch_time"
,
AverageMeter
(
'elapse'
,
'.3f'
)),
(
"batch_time"
,
AverageMeter
(
'elapse'
,
'.3f'
)),
(
'reader_time'
,
AverageMeter
(
'reader'
,
'.3f'
)),
]
]
if
not
use_mix
:
if
not
use_mix
:
topk_name
=
'top{}'
.
format
(
config
.
topk
)
topk_name
=
'top{}'
.
format
(
config
.
topk
)
...
@@ -287,7 +286,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
...
@@ -287,7 +286,6 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
tic
=
time
.
time
()
tic
=
time
.
time
()
for
idx
,
batch
in
enumerate
(
dataloader
()):
for
idx
,
batch
in
enumerate
(
dataloader
()):
metric_list
[
'reader_time'
].
update
(
time
.
time
()
-
tic
)
batch_size
=
len
(
batch
[
0
])
batch_size
=
len
(
batch
[
0
])
feeds
=
create_feeds
(
batch
,
use_mix
)
feeds
=
create_feeds
(
batch
,
use_mix
)
fetchs
=
create_fetchs
(
feeds
,
net
,
config
,
mode
)
fetchs
=
create_fetchs
(
feeds
,
net
,
config
,
mode
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录