Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
a585092b
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a585092b
编写于
4月 28, 2020
作者:
W
wuzewu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update strategy
上级
77443f64
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
39 addition
and
26 deletion
+39
-26
demo/object_detection/train_faster_rcnn.py
demo/object_detection/train_faster_rcnn.py
+1
-1
demo/object_detection/train_ssd.py
demo/object_detection/train_ssd.py
+1
-1
demo/object_detection/train_yolo.py
demo/object_detection/train_yolo.py
+1
-1
paddlehub/finetune/strategy.py
paddlehub/finetune/strategy.py
+36
-23
未找到文件。
demo/object_detection/train_faster_rcnn.py
浏览文件 @
a585092b
...
...
@@ -63,7 +63,7 @@ def finetune(args):
enable_memory_optim
=
False
,
checkpoint_dir
=
args
.
checkpoint_dir
,
strategy
=
hub
.
finetune
.
strategy
.
DefaultFinetuneStrategy
(
learning_rate
=
0.00025
,
optimizer_name
=
"
adam"
))
learning_rate
=
0.00025
,
optimizer_name
=
"
momentum"
,
momentum
=
0.9
))
task
=
hub
.
FasterRCNNTask
(
data_reader
=
data_reader
,
...
...
demo/object_detection/train_ssd.py
浏览文件 @
a585092b
...
...
@@ -45,7 +45,7 @@ def finetune(args):
enable_memory_optim
=
False
,
checkpoint_dir
=
args
.
checkpoint_dir
,
strategy
=
hub
.
finetune
.
strategy
.
DefaultFinetuneStrategy
(
learning_rate
=
0.00025
,
optimizer_name
=
"
adam"
))
learning_rate
=
0.00025
,
optimizer_name
=
"
momentum"
,
momentum
=
0.9
))
task
=
hub
.
SSDTask
(
data_reader
=
data_reader
,
...
...
demo/object_detection/train_yolo.py
浏览文件 @
a585092b
...
...
@@ -45,7 +45,7 @@ def finetune(args):
enable_memory_optim
=
False
,
checkpoint_dir
=
args
.
checkpoint_dir
,
strategy
=
hub
.
finetune
.
strategy
.
DefaultFinetuneStrategy
(
learning_rate
=
0.00025
,
optimizer_name
=
"
adam"
))
learning_rate
=
0.00025
,
optimizer_name
=
"
momentum"
,
momentum
=
0.9
))
task
=
hub
.
YOLOTask
(
data_reader
=
data_reader
,
...
...
paddlehub/finetune/strategy.py
浏览文件 @
a585092b
...
...
@@ -133,39 +133,39 @@ def set_gradual_unfreeze(depth_params_dict, unfreeze_depths):
class
DefaultStrategy
(
object
):
def
__init__
(
self
,
learning_rate
=
1e-4
,
optimizer_name
=
"adam"
):
def
__init__
(
self
,
learning_rate
=
1e-4
,
optimizer_name
=
"adam"
,
**
kwargs
):
self
.
learning_rate
=
learning_rate
self
.
_optimizer_name
=
optimizer_name
if
self
.
_optimizer_name
.
lower
()
==
"sgd"
:
self
.
optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"adagrad"
:
self
.
optimizer
=
fluid
.
optimizer
.
Adagrad
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"adamax"
:
self
.
optimizer
=
fluid
.
optimizer
.
Adamax
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"decayedadagrad"
:
self
.
optimizer
=
fluid
.
optimizer
.
DecayedAdagrad
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"ftrl"
:
self
.
optimizer
=
fluid
.
optimizer
.
Ftrl
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"larsmomentum"
:
self
.
optimizer
=
fluid
.
optimizer
.
LarsMomentum
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"momentum"
:
self
.
optimizer
=
fluid
.
optimizer
.
Momentum
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"decayedadagrad"
:
self
.
optimizer
=
fluid
.
optimizer
.
DecayedAdagrad
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
elif
self
.
_optimizer_name
.
lower
()
==
"rmsprop"
:
self
.
optimizer
=
fluid
.
optimizer
.
RMSPropOptimizer
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
else
:
self
.
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
self
.
learning_rate
)
learning_rate
=
self
.
learning_rate
,
**
kwargs
)
def
execute
(
self
,
loss
,
data_reader
,
config
,
dev_count
):
if
self
.
optimizer
is
not
None
:
...
...
@@ -186,10 +186,13 @@ class CombinedStrategy(DefaultStrategy):
learning_rate
=
1e-4
,
scheduler
=
None
,
regularization
=
None
,
clip
=
None
):
clip
=
None
,
**
kwargs
):
super
(
CombinedStrategy
,
self
).
__init__
(
optimizer_name
=
optimizer_name
,
learning_rate
=
learning_rate
)
optimizer_name
=
optimizer_name
,
learning_rate
=
learning_rate
,
**
kwargs
)
self
.
kwargs
=
kwargs
# init set
self
.
scheduler
=
{
"warmup"
:
0.0
,
...
...
@@ -379,7 +382,9 @@ class CombinedStrategy(DefaultStrategy):
# set optimizer
super
(
CombinedStrategy
,
self
).
__init__
(
optimizer_name
=
self
.
_optimizer_name
,
learning_rate
=
scheduled_lr
)
optimizer_name
=
self
.
_optimizer_name
,
learning_rate
=
scheduled_lr
,
**
self
.
kwargs
)
# discriminative learning rate
# based on layer
...
...
@@ -568,7 +573,8 @@ class AdamWeightDecayStrategy(CombinedStrategy):
lr_scheduler
=
"linear_decay"
,
warmup_proportion
=
0.1
,
weight_decay
=
0.01
,
optimizer_name
=
"adam"
):
optimizer_name
=
"adam"
,
**
kwargs
):
scheduler
=
{
"warmup"
:
warmup_proportion
}
if
lr_scheduler
==
"noam_decay"
:
scheduler
[
"noam_decay"
]
=
True
...
...
@@ -587,14 +593,16 @@ class AdamWeightDecayStrategy(CombinedStrategy):
learning_rate
=
learning_rate
,
scheduler
=
scheduler
,
regularization
=
regularization
,
clip
=
clip
)
clip
=
clip
,
**
kwargs
)
class
L2SPFinetuneStrategy
(
CombinedStrategy
):
def
__init__
(
self
,
learning_rate
=
1e-4
,
optimizer_name
=
"adam"
,
regularization_coeff
=
1e-3
):
regularization_coeff
=
1e-3
,
**
kwargs
):
scheduler
=
{}
regularization
=
{
"L2SP"
:
regularization_coeff
}
clip
=
{}
...
...
@@ -603,14 +611,16 @@ class L2SPFinetuneStrategy(CombinedStrategy):
learning_rate
=
learning_rate
,
scheduler
=
scheduler
,
regularization
=
regularization
,
clip
=
clip
)
clip
=
clip
,
**
kwargs
)
class
DefaultFinetuneStrategy
(
CombinedStrategy
):
def
__init__
(
self
,
learning_rate
=
1e-4
,
optimizer_name
=
"adam"
,
regularization_coeff
=
1e-3
):
regularization_coeff
=
1e-3
,
**
kwargs
):
scheduler
=
{}
regularization
=
{
"L2"
:
regularization_coeff
}
clip
=
{}
...
...
@@ -620,7 +630,8 @@ class DefaultFinetuneStrategy(CombinedStrategy):
learning_rate
=
learning_rate
,
scheduler
=
scheduler
,
regularization
=
regularization
,
clip
=
clip
)
clip
=
clip
,
**
kwargs
)
class
ULMFiTStrategy
(
CombinedStrategy
):
...
...
@@ -632,7 +643,8 @@ class ULMFiTStrategy(CombinedStrategy):
dis_blocks
=
3
,
factor
=
2.6
,
frz_blocks
=
3
,
params_layer
=
None
):
params_layer
=
None
,
**
kwargs
):
scheduler
=
{
"slanted_triangle"
:
{
...
...
@@ -656,4 +668,5 @@ class ULMFiTStrategy(CombinedStrategy):
learning_rate
=
learning_rate
,
scheduler
=
scheduler
,
regularization
=
regularization
,
clip
=
clip
)
clip
=
clip
,
**
kwargs
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录