Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PALM
提交
cae26c4c
P
PALM
项目概览
PaddlePaddle
/
PALM
通知
4
Star
3
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PALM
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cae26c4c
编写于
2月 18, 2020
作者:
W
wangxiao1021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix bugs
上级
d56288b8
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
27 addition
and
7 deletion
+27
-7
examples/multi-task/predict-intent.py
examples/multi-task/predict-intent.py
+2
-2
examples/multi-task/predict-slot.py
examples/multi-task/predict-slot.py
+2
-2
paddlepalm/head/mrc.py
paddlepalm/head/mrc.py
+11
-3
paddlepalm/multihead_trainer.py
paddlepalm/multihead_trainer.py
+5
-0
paddlepalm/trainer.py
paddlepalm/trainer.py
+7
-0
未找到文件。
examples/multi-task/predict-intent.py
浏览文件 @
cae26c4c
...
...
@@ -46,7 +46,7 @@ if __name__ == '__main__':
trainer
.
build_predict_forward
(
pred_ernie
,
cls_pred_head
)
# step 6: load pretrained model
pred_model_path
=
'./outputs/ckpt.step
9282
'
pred_model_path
=
'./outputs/ckpt.step
4641
'
pred_ckpt
=
trainer
.
load_ckpt
(
pred_model_path
)
# step 7: fit prepared reader and data
...
...
@@ -54,4 +54,4 @@ if __name__ == '__main__':
# step 8: predict
print
(
'predicting..'
)
trainer
.
predict
(
print_steps
=
print_steps
,
output_dir
=
pred_output
)
\ No newline at end of file
trainer
.
predict
(
print_steps
=
print_steps
,
output_dir
=
pred_output
)
examples/multi-task/predict-slot.py
浏览文件 @
cae26c4c
...
...
@@ -47,7 +47,7 @@ if __name__ == '__main__':
trainer_seq_label
.
build_predict_forward
(
pred_ernie
,
seq_label_pred_head
)
# step 6: load pretrained model
pred_model_path
=
'./outputs/ckpt.step
9282
'
pred_model_path
=
'./outputs/ckpt.step
4641
'
pred_ckpt
=
trainer_seq_label
.
load_ckpt
(
pred_model_path
)
# step 7: fit prepared reader and data
...
...
@@ -55,4 +55,4 @@ if __name__ == '__main__':
# step 8: predict
print
(
'predicting..'
)
trainer_seq_label
.
predict
(
print_steps
=
print_steps
,
output_dir
=
pred_output
)
\ No newline at end of file
trainer_seq_label
.
predict
(
print_steps
=
print_steps
,
output_dir
=
pred_output
)
paddlepalm/head/mrc.py
浏览文件 @
cae26c4c
...
...
@@ -22,6 +22,14 @@ import math
import
six
import
paddlepalm.tokenizer.ernie_tokenizer
as
tokenization
import
json
import
sys
import
io
if
sys
.
version
[
0
]
==
'2'
:
reload
(
sys
)
sys
.
setdefaultencoding
(
'utf-8'
)
else
:
import
importlib
importlib
.
reload
(
sys
)
RawResult
=
collections
.
namedtuple
(
"RawResult"
,
[
"unique_id"
,
"start_logits"
,
"end_logits"
])
...
...
@@ -361,15 +369,15 @@ def _write_predictions(all_examples, all_features, all_results, n_best_size,
with
open
(
output_prediction_file
,
"w"
)
as
writer
:
with
io
.
open
(
output_prediction_file
,
"w"
,
encoding
=
'utf-8'
)
as
writer
:
writer
.
write
(
json
.
dumps
(
all_predictions
,
indent
=
4
,
ensure_ascii
=
False
)
+
"
\n
"
)
with
open
(
output_nbest_file
,
"w"
)
as
writer
:
with
io
.
open
(
output_nbest_file
,
"w"
,
encoding
=
'utf-8'
)
as
writer
:
writer
.
write
(
json
.
dumps
(
all_nbest_json
,
indent
=
4
,
ensure_ascii
=
False
)
+
"
\n
"
)
if
with_negative
:
with
open
(
output_null_log_odds_file
,
"w"
)
as
writer
:
with
io
.
open
(
output_null_log_odds_file
,
"w"
,
encoding
=
'utf-8'
)
as
writer
:
writer
.
write
(
json
.
dumps
(
scores_diff_json
,
indent
=
4
,
ensure_ascii
=
False
)
+
"
\n
"
)
...
...
paddlepalm/multihead_trainer.py
浏览文件 @
cae26c4c
...
...
@@ -7,6 +7,7 @@ from paddlepalm.utils import reader_helper
import
numpy
as
np
from
paddlepalm.distribute
import
gpu_dev_count
,
data_feeder
,
decode_fake
import
time
import
sys
dev_count
=
1
if
gpu_dev_count
<=
1
else
gpu_dev_count
VERBOSE
=
False
...
...
@@ -157,6 +158,7 @@ class MultiHeadTrainer(Trainer):
max_train_steps
=
int
(
num_epochs
*
t
.
mix_ratio
*
base_steps_pur_epoch
)
if
not
t
.
_as_auxilary
:
print
(
'{}: expected train steps {}.'
.
format
(
t
.
name
,
max_train_steps
))
sys
.
stdout
.
flush
()
self
.
_finish_steps
[
t
.
name
]
=
max_train_steps
self
.
_finish
[
t
.
name
]
=
False
else
:
...
...
@@ -176,6 +178,7 @@ class MultiHeadTrainer(Trainer):
joint_shape_and_dtypes
.
append
(
t
.
_shape_and_dtypes
)
print
(
'Estimated overall train steps {}.'
.
format
(
global_steps
))
sys
.
stdout
.
flush
()
self
.
_overall_train_steps
=
global_steps
iterator_fn
=
reader_helper
.
create_multihead_iterator_fn
(
iterators
,
prefixes
,
joint_shape_and_dtypes
,
\
...
...
@@ -199,6 +202,7 @@ class MultiHeadTrainer(Trainer):
if
trainers
[
task_name
].
_cur_train_step
==
self
.
_finish_steps
[
task_name
]:
if
not
silent
:
print
(
task_name
+
' train finish!'
)
sys
.
stdout
.
flush
()
self
.
_finish
[
task_name
]
=
True
flags
=
list
(
set
(
self
.
_finish
.
values
()))
return
len
(
flags
)
==
1
and
flags
[
0
]
==
True
...
...
@@ -236,6 +240,7 @@ class MultiHeadTrainer(Trainer):
(
self
.
_trainers
[
task_id
].
_cur_train_step
-
1
)
%
self
.
_trainers
[
task_id
].
_steps_pur_epoch
+
1
,
\
self
.
_trainers
[
task_id
].
_steps_pur_epoch
,
self
.
_trainers
[
task_id
].
_cur_train_epoch
,
\
loss
,
print_steps
/
time_cost
))
sys
.
stdout
.
flush
()
time_begin
=
time
.
time
()
self
.
_check_save
()
...
...
paddlepalm/trainer.py
浏览文件 @
cae26c4c
...
...
@@ -18,6 +18,7 @@ import os
import
json
from
paddle
import
fluid
import
time
import
sys
import
numpy
as
np
import
paddlepalm.utils.basic_helper
as
helper
from
paddlepalm.utils
import
reader_helper
,
saver
...
...
@@ -546,9 +547,11 @@ class Trainer(object):
if
self
.
_save_predict
:
self
.
_save
(
save_path
,
suffix
=
'pred.step'
+
str
(
self
.
_cur_train_step
))
print
(
'predict model has been saved at '
+
os
.
path
.
join
(
save_path
,
'pred.step'
+
str
(
self
.
_cur_train_step
)))
sys
.
stdout
.
flush
()
if
self
.
_save_ckpt
:
fluid
.
io
.
save_persistables
(
self
.
_exe
,
os
.
path
.
join
(
save_path
,
'ckpt.step'
+
str
(
self
.
_cur_train_step
)),
self
.
_train_prog
)
print
(
'checkpoint has been saved at '
+
os
.
path
.
join
(
save_path
,
'ckpt.step'
+
str
(
self
.
_cur_train_step
)))
sys
.
stdout
.
flush
()
return
True
else
:
return
False
...
...
@@ -608,6 +611,7 @@ class Trainer(object):
print
(
"step {}/{} (epoch {}), loss: {:.3f}, speed: {:.2f} steps/s"
.
format
(
(
self
.
_cur_train_step
-
1
)
%
self
.
_steps_pur_epoch
+
1
,
self
.
_steps_pur_epoch
,
self
.
_cur_train_epoch
,
loss
,
print_steps
/
time_cost
))
sys
.
stdout
.
flush
()
time_begin
=
time
.
time
()
# self._check_save()
# if cur_task.train_finish and cur_task.cur_train_step + cur_task.cur_train_epoch * cur_task.steps_pur_epoch == cur_task.expected_train_steps:
...
...
@@ -653,6 +657,7 @@ class Trainer(object):
print
(
"batch {}/{}, speed: {:.2f} steps/s"
.
format
(
cur_predict_step
,
self
.
_pred_steps_pur_epoch
,
print_steps
/
time_cost
))
sys
.
stdout
.
flush
()
time_begin
=
time
.
time
()
if
self
.
_pred_head
.
epoch_inputs_attrs
:
...
...
@@ -816,6 +821,7 @@ class Trainer(object):
with
open
(
os
.
path
.
join
(
dirpath
,
'__conf__'
),
'w'
)
as
writer
:
writer
.
write
(
json
.
dumps
(
conf
,
indent
=
1
))
print
(
self
.
_name
+
': predict model saved at '
+
dirpath
)
sys
.
stdout
.
flush
()
def
_load
(
self
,
infer_model_path
=
None
):
...
...
@@ -827,5 +833,6 @@ class Trainer(object):
pred_prog
,
self
.
_pred_input_varname_list
,
self
.
_pred_fetch_var_list
=
\
fluid
.
io
.
load_inference_model
(
infer_model_path
,
self
.
_exe
)
print
(
self
.
_name
+
': inference model loaded from '
+
infer_model_path
)
sys
.
stdout
.
flush
()
return
pred_prog
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录