Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
03b66704
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
03b66704
编写于
4月 30, 2019
作者:
Z
zhangxuefei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix the bug that lack the defination of linear_warmup_decay function
上级
5e6cc426
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
34 addition
and
2 deletion
+34
-2
paddlehub/finetune/optimization.py
paddlehub/finetune/optimization.py
+26
-1
paddlehub/finetune/strategy.py
paddlehub/finetune/strategy.py
+8
-1
未找到文件。
paddlehub/finetune/optimization.py
浏览文件 @
03b66704
...
...
@@ -19,6 +19,8 @@ from __future__ import print_function
import
numpy
as
np
import
paddle.fluid
as
fluid
import
paddle.fluid.layers.learning_rate_scheduler
as
lr_scheduler
from
paddle.fluid.layers
import
control_flow
def
adam_weight_decay_optimization
(
loss
,
...
...
@@ -35,7 +37,7 @@ def adam_weight_decay_optimization(loss,
warmup_steps
)
elif
scheduler
==
'linear_decay'
:
scheduled_lr
=
linear_warmup_decay
(
learning_rate
,
warmup_steps
,
num_train_steps
)
main_program
)
else
:
raise
ValueError
(
"Unkown learning rate scheduler, should be "
"'noam_decay' or 'linear_decay'"
)
...
...
@@ -76,3 +78,26 @@ def adam_weight_decay_optimization(loss,
fluid
.
layers
.
assign
(
output
=
param
,
input
=
updated_param
)
return
scheduled_lr
def
linear_warmup_decay
(
init_lr
,
num_warmup_steps
,
main_program
):
with
main_program
.
_lr_schedule_guard
():
global_step
=
lr_scheduler
.
_decay_step_counter
()
lr
=
fluid
.
layers
.
create_global_var
(
shape
=
[
1
],
value
=
0.0
,
dtype
=
'float32'
,
persistable
=
True
,
name
=
"learning_rate"
)
with
control_flow
.
Switch
()
as
switch
:
with
switch
.
case
(
global_step
<
num_warmup_steps
):
decayed_lr
=
init_lr
*
global_step
*
1.0
/
num_warmup_steps
fluid
.
layers
.
assign
(
decayed_lr
,
lr
)
with
switch
.
default
():
last_value_var
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
float
(
init_lr
))
fluid
.
layers
.
assign
(
last_value_var
,
lr
)
return
lr
paddlehub/finetune/strategy.py
浏览文件 @
03b66704
...
...
@@ -89,7 +89,7 @@ class AdamWeightDecayStrategy(DefaultStrategy):
def
__init__
(
self
,
learning_rate
=
1e-4
,
lr_scheduler
=
"linear_decay"
,
warmup_proportion
=
0.
0
,
warmup_proportion
=
0.
1
,
weight_decay
=
0.01
,
optimizer_name
=
"adam"
):
super
(
AdamWeightDecayStrategy
,
self
).
__init__
(
...
...
@@ -118,6 +118,13 @@ class AdamWeightDecayStrategy(DefaultStrategy):
# calculate wamrup step
dev_count
=
self
.
_get_dev_count
(
config
)
num_train_examples
=
data_reader
.
get_num_examples
(
phase
=
'train'
)
data_reader
.
data_generator
(
batch_size
=
config
.
batch_size
,
phase
=
'train'
,
shuffle
=
True
)
data_reader
.
data_generator
(
batch_size
=
config
.
batch_size
,
phase
=
'val'
,
shuffle
=
False
)
data_reader
.
data_generator
(
batch_size
=
config
.
batch_size
,
phase
=
'dev'
,
shuffle
=
False
)
num_train_examples
=
data_reader
.
get_num_examples
(
phase
=
'train'
)
max_train_steps
=
config
.
num_epoch
*
num_train_examples
//
config
.
batch_size
//
dev_count
warmup_steps
=
int
(
max_train_steps
*
self
.
warmup_proportion
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录