Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
f3e063ca
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
281
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
f3e063ca
编写于
3月 26, 2019
作者:
Z
Zeyu Chen
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add hun task and finetune_and_eval_v1 interface
上级
f98797ef
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
170 addition
and
5 deletion
+170
-5
paddle_hub/__init__.py
paddle_hub/__init__.py
+1
-1
paddle_hub/finetune/task.py
paddle_hub/finetune/task.py
+169
-4
未找到文件。
paddle_hub/__init__.py
浏览文件 @
f3e063ca
...
...
@@ -26,4 +26,4 @@ from .tools.logger import logger
from
.tools.paddle_helper
import
connect_program
from
.io.type
import
DataType
from
.hub_server
import
default_hub_server
from
.finetune.task
import
append_mlp_classifier
from
.finetune.task
import
append_mlp_classifier
,
finetune_and_eval
paddle_hub/finetune/task.py
浏览文件 @
f3e063ca
...
...
@@ -13,7 +13,14 @@
# limitations under the License.
import
os
import
collections
import
paddle.fluid
as
fluid
import
time
import
numpy
as
np
import
multiprocessing
from
paddle_hub.tools.logger
import
logger
from
paddle_hub.finetune.optimization
import
bert_optimization
__all__
=
[
'append_mlp_classifier'
]
...
...
@@ -45,9 +52,167 @@ def append_mlp_classifier(feature, label, num_classes=2, hidden_units=None):
input
=
probs
,
label
=
label
,
total
=
num_example
)
# TODO: encapsulate to Task
return
loss
,
probs
,
accuracy
,
num_example
graph_var_dict
=
{
"loss"
:
loss
,
"probs"
:
probs
,
"accuracy"
:
accuracy
,
"num_example"
:
num_example
}
task
=
Task
(
"text_classification"
,
graph_var_dict
)
return
task
def
finetune_and_eval
(
train_program
,
startup_program
,
task
,
feed_list
,
data_processor
,
config
=
None
):
if
config
.
use_cuda
:
place
=
fluid
.
CUDAPlace
(
int
(
os
.
getenv
(
'FLAGS_selected_gpus'
,
'0'
)))
dev_count
=
fluid
.
core
.
get_cuda_device_count
()
else
:
place
=
fluid
.
CPUPlace
()
dev_count
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
# data generator
data_generator
=
{
'train'
:
data_processor
.
data_generator
(
batch_size
=
config
.
batch_size
,
phase
=
'train'
,
epoch
=
config
.
epoch
,
shuffle
=
False
),
'test'
:
data_processor
.
data_generator
(
batch_size
=
config
.
batch_size
,
phase
=
'test'
,
shuffle
=
False
),
'dev'
:
data_processor
.
data_generator
(
batch_size
=
config
.
batch_size
,
phase
=
'dev'
,
shuffle
=
False
)
}
# hub.finetune_and_eval start here
#TODO: to simplify
loss
=
task
.
variable
(
"loss"
)
probs
=
task
.
variable
(
"probs"
)
accuracy
=
task
.
variable
(
"accuracy"
)
num_example
=
task
.
variable
(
"num_example"
)
num_train_examples
=
data_processor
.
get_num_examples
(
phase
=
'train'
)
if
config
.
in_tokens
:
max_train_steps
=
config
.
epoch
*
num_train_examples
//
(
config
.
batch_size
//
config
.
max_seq_len
)
//
dev_count
else
:
max_train_steps
=
config
.
epoch
*
num_train_examples
//
config
.
batch_size
//
dev_count
warmup_steps
=
int
(
max_train_steps
*
config
.
warmup_proportion
)
# clone test program before optimize
test_program
=
train_program
.
clone
(
for_test
=
True
)
bert_optimization
(
loss
,
warmup_steps
,
max_train_steps
,
config
.
learning_rate
,
train_program
,
config
.
weight_decay
)
# memory optimization
fluid
.
memory_optimize
(
input_program
=
train_program
,
skip_opt_set
=
[
# skip task graph variable memory optimization
loss
.
name
,
probs
.
name
,
accuracy
.
name
,
num_example
.
name
])
place
=
fluid
.
CUDAPlace
(
0
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
startup_program
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
feed_list
,
place
=
place
)
# Traning block
# prepare training dataset
train_data_generator
=
data_generator
[
'train'
]
total_loss
,
total_acc
,
total_num_example
=
[],
[],
[]
step
=
0
time_begin
=
time
.
time
()
train_time_used
=
0.0
for
example
in
train_data_generator
():
step
+=
1
train_time_begin
=
time
.
time
()
np_loss
,
np_acc
,
np_num_example
=
exe
.
run
(
program
=
train_program
,
feed
=
feeder
.
feed
([
example
]),
fetch_list
=
[
loss
,
accuracy
,
num_example
])
train_time_used
+=
time
.
time
()
-
train_time_begin
# Statistic Block
total_loss
.
extend
(
np_loss
*
np_num_example
)
total_acc
.
extend
(
np_acc
*
np_num_example
)
total_num_example
.
extend
(
np_num_example
)
if
step
%
config
.
stat_interval
==
0
:
# get training progress
accum_num_example
=
np
.
sum
(
total_num_example
)
print
(
"step {}: loss={:.5f} acc={:.5f} [step/sec: {:.2f}]"
.
format
(
step
,
np
.
sum
(
total_loss
)
/
accum_num_example
,
np
.
sum
(
total_acc
)
/
accum_num_example
,
config
.
stat_interval
/
train_time_used
))
# reset statistic variables
total_loss
,
total_acc
,
total_num_example
=
[],
[],
[]
train_time_used
=
0.0
# Evaluation block
if
step
%
config
.
eval_interval
==
0
:
print
(
"Evaluation start"
)
total_loss
,
total_acc
,
total_num_example
=
[],
[],
[]
dev_data_generator
=
data_generator
[
'dev'
]
eval_step
=
0
eval_time_begin
=
time
.
time
()
for
example
in
dev_data_generator
():
eval_step
+=
1
np_loss
,
np_acc
,
np_num_example
=
exe
.
run
(
program
=
test_program
,
feed
=
feeder
.
feed
([
example
]),
fetch_list
=
[
loss
,
accuracy
,
num_example
])
total_loss
.
extend
(
np_loss
*
np_num_example
)
total_acc
.
extend
(
np_acc
*
np_num_example
)
total_num_example
.
extend
(
np_num_example
)
eval_time_used
=
time
.
time
()
-
eval_time_begin
accum_num_example
=
np
.
sum
(
total_num_example
)
print
(
"[Evaluation] loss={:.5f} acc={:.5f} [step/sec: {:.2f}]"
.
format
(
np
.
sum
(
total_loss
)
/
accum_num_example
,
np
.
sum
(
total_acc
)
/
accum_num_example
,
eval_step
/
eval_time_used
))
if
step
%
config
.
eval_interval
==
0
:
# Final Test Block
total_loss
,
total_acc
,
total_num_example
=
[],
[],
[]
test_data_generator
=
data_generator
[
'test'
]
for
example
in
test_data_generator
():
np_loss
,
np_acc
,
np_num_example
=
exe
.
run
(
program
=
test_program
,
feed
=
feeder
.
feed
([
example
]),
fetch_list
=
[
loss
,
accuracy
,
num_example
])
total_loss
.
extend
(
np_loss
*
np_num_example
)
total_acc
.
extend
(
np_acc
*
np_num_example
)
total_num_example
.
extend
(
np_num_example
)
accum_num_example
=
np
.
sum
(
total_num_example
)
print
(
"[Final Test] loss={:.5f} acc={:.5f}"
.
format
(
np
.
sum
(
total_loss
)
/
accum_num_example
,
np
.
sum
(
total_acc
)
/
accum_num_example
))
class
Task
(
object
):
def
__init__
(
self
):
pass
def
__init__
(
self
,
task_type
,
graph_var_dict
):
self
.
task_type
=
task_type
self
.
graph_var_dict
=
graph_var_dict
def
variable
(
self
,
var_name
):
if
var_name
in
self
.
graph_var_dict
:
return
self
.
graph_var_dict
[
var_name
]
raise
KeyError
(
"var_name {} not in task graph"
.
format
(
var_name
))
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录