Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
d7cfc311
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d7cfc311
编写于
3月 30, 2019
作者:
Z
Zeyu Chen
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/PaddleHub
into develop
上级
014f4f40
3eed05b4
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
36 addition
and
3 deletion
+36
-3
paddle_hub/finetune/finetune.py
paddle_hub/finetune/finetune.py
+36
-3
未找到文件。
paddle_hub/finetune/finetune.py
浏览文件 @
d7cfc311
...
@@ -21,6 +21,7 @@ import time
...
@@ -21,6 +21,7 @@ import time
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
visualdl
import
LogWriter
from
paddle_hub.tools.logger
import
logger
from
paddle_hub.tools.logger
import
logger
from
paddle_hub.finetune.optimization
import
bert_finetune
from
paddle_hub.finetune.optimization
import
bert_finetune
...
@@ -46,6 +47,8 @@ def _finetune_model(task,
...
@@ -46,6 +47,8 @@ def _finetune_model(task,
with_memory_optimization
=
config
.
with_memory_optimization
with_memory_optimization
=
config
.
with_memory_optimization
checkpoint_dir
=
config
.
checkpoint_dir
checkpoint_dir
=
config
.
checkpoint_dir
checkpoint_path
=
os
.
path
.
join
(
checkpoint_dir
,
CKPT_FILE
)
checkpoint_path
=
os
.
path
.
join
(
checkpoint_dir
,
CKPT_FILE
)
log_writter
=
LogWriter
(
os
.
path
.
join
(
checkpoint_dir
,
"vdllog"
),
sync_cycle
=
10
)
with
fluid
.
program_guard
(
main_program
,
startup_program
):
with
fluid
.
program_guard
(
main_program
,
startup_program
):
if
use_cuda
:
if
use_cuda
:
...
@@ -93,7 +96,17 @@ def _finetune_model(task,
...
@@ -93,7 +96,17 @@ def _finetune_model(task,
exe
.
run
(
fluid
.
default_startup_program
())
exe
.
run
(
fluid
.
default_startup_program
())
step
=
0
step
=
0
last_epoch
=
0
last_epoch
=
0
best_eval_acc
=
0
logger
.
info
(
"Finetune start"
)
logger
.
info
(
"Finetune start"
)
# add visualdl scalar
with
log_writter
.
mode
(
"train"
)
as
logw
:
train_loss_scalar
=
logw
.
scalar
(
tag
=
"loss[train]"
)
train_acc_scalar
=
logw
.
scalar
(
tag
=
"accuracy[train]"
)
with
log_writter
.
mode
(
"evaluate"
)
as
logw
:
eval_loss_scalar
=
logw
.
scalar
(
tag
=
"loss[evaluate]"
)
eval_acc_scalar
=
logw
.
scalar
(
tag
=
"accuracy[evaluate]"
)
train_time_begin
=
time
.
time
()
train_time_begin
=
time
.
time
()
for
index
in
range
(
last_epoch
,
epoch
):
for
index
in
range
(
last_epoch
,
epoch
):
train_reader
=
data_processor
.
data_generator
(
train_reader
=
data_processor
.
data_generator
(
...
@@ -108,6 +121,7 @@ def _finetune_model(task,
...
@@ -108,6 +121,7 @@ def _finetune_model(task,
accuracy_sum
+=
accuracy_v
*
len
(
batch
)
accuracy_sum
+=
accuracy_v
*
len
(
batch
)
loss_sum
+=
loss_v
*
len
(
batch
)
loss_sum
+=
loss_v
*
len
(
batch
)
# print log
if
step
%
config
.
log_interval
==
0
:
if
step
%
config
.
log_interval
==
0
:
train_time_used
=
time
.
time
()
-
train_time_begin
train_time_used
=
time
.
time
()
-
train_time_begin
speed
=
config
.
log_interval
/
train_time_used
speed
=
config
.
log_interval
/
train_time_used
...
@@ -115,6 +129,13 @@ def _finetune_model(task,
...
@@ -115,6 +129,13 @@ def _finetune_model(task,
logger
.
info
(
logger
.
info
(
"step %d: loss=%.5f acc=%.5f [step/sec: %.2f]"
%
"step %d: loss=%.5f acc=%.5f [step/sec: %.2f]"
%
(
step
,
loss_sum
/
size
,
accuracy_sum
/
size
,
speed
))
(
step
,
loss_sum
/
size
,
accuracy_sum
/
size
,
speed
))
# record visualdl log
record_step
=
step
train_loss_scalar
.
add_record
(
record_step
,
loss_sum
/
size
)
train_acc_scalar
.
add_record
(
record_step
,
accuracy_sum
/
size
)
size
=
accuracy_sum
=
loss_sum
=
0
size
=
accuracy_sum
=
loss_sum
=
0
if
step
%
config
.
save_ckpt_interval
==
0
:
if
step
%
config
.
save_ckpt_interval
==
0
:
...
@@ -128,12 +149,21 @@ def _finetune_model(task,
...
@@ -128,12 +149,21 @@ def _finetune_model(task,
last_model_dir
=
model_save_dir
)
last_model_dir
=
model_save_dir
)
if
eval_model
and
step
%
config
.
eval_interval
==
0
:
if
eval_model
and
step
%
config
.
eval_interval
==
0
:
eval
(
eval
_loss
,
eval_acc
,
eval_perf
=
evaluate
(
task
,
task
,
data_processor
,
data_processor
,
feed_list
,
feed_list
,
phase
=
"validate"
,
phase
=
"validate"
,
config
=
config
)
config
=
config
)
record_step
=
step
eval_loss_scalar
.
add_record
(
record_step
,
eval_loss
)
eval_acc_scalar
.
add_record
(
record_step
,
eval_acc
)
if
eval_acc
>
best_eval_acc
:
best_eval_acc
=
eval_acc
model_save_dir
=
os
.
path
.
join
(
checkpoint_dir
,
"model_best"
)
fluid
.
io
.
save_persistables
(
exe
,
dirname
=
model_save_dir
)
# update model and checkpoint
# update model and checkpoint
model_save_dir
=
os
.
path
.
join
(
checkpoint_dir
,
"model_latest"
)
model_save_dir
=
os
.
path
.
join
(
checkpoint_dir
,
"model_latest"
)
fluid
.
io
.
save_persistables
(
exe
,
dirname
=
model_save_dir
)
fluid
.
io
.
save_persistables
(
exe
,
dirname
=
model_save_dir
)
...
@@ -144,7 +174,8 @@ def _finetune_model(task,
...
@@ -144,7 +174,8 @@ def _finetune_model(task,
last_model_dir
=
model_save_dir
)
last_model_dir
=
model_save_dir
)
# eval before end
# eval before end
if
eval_model
:
if
eval_model
:
eval
(
task
,
data_processor
,
feed_list
,
phase
=
"test"
,
config
=
config
)
evaluate
(
task
,
data_processor
,
feed_list
,
phase
=
"test"
,
config
=
config
)
logger
.
info
(
"Finetune finished"
)
logger
.
info
(
"Finetune finished"
)
...
@@ -156,7 +187,7 @@ def finetune(task, data_processor, feed_list, config=None):
...
@@ -156,7 +187,7 @@ def finetune(task, data_processor, feed_list, config=None):
_finetune_model
(
task
,
data_processor
,
feed_list
,
config
,
eval_model
=
False
)
_finetune_model
(
task
,
data_processor
,
feed_list
,
config
,
eval_model
=
False
)
def
eval
(
task
,
data_processor
,
feed_list
,
phase
=
"test"
,
config
=
None
):
def
eval
uate
(
task
,
data_processor
,
feed_list
,
phase
=
"test"
,
config
=
None
):
inference_program
=
task
.
inference_program
()
inference_program
=
task
.
inference_program
()
main_program
=
task
.
main_program
()
main_program
=
task
.
main_program
()
loss
=
task
.
variable
(
"loss"
)
loss
=
task
.
variable
(
"loss"
)
...
@@ -181,3 +212,5 @@ def eval(task, data_processor, feed_list, phase="test", config=None):
...
@@ -181,3 +212,5 @@ def eval(task, data_processor, feed_list, phase="test", config=None):
eval_speed
=
index
/
eval_time_used
eval_speed
=
index
/
eval_time_used
logger
.
info
(
"[Evaluation] loss=%.5f acc=%.5f [step/sec: %.2f]"
%
logger
.
info
(
"[Evaluation] loss=%.5f acc=%.5f [step/sec: %.2f]"
%
(
loss_sum
/
size
,
accuracy_sum
/
size
,
eval_speed
))
(
loss_sum
/
size
,
accuracy_sum
/
size
,
eval_speed
))
return
loss_sum
/
size
,
accuracy_sum
/
size
,
eval_speed
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录