Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
ERNIE
提交
a2b70aa6
E
ERNIE
项目概览
PaddlePaddle
/
ERNIE
大约 1 年 前同步成功
通知
109
Star
5997
Fork
1270
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
29
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
E
ERNIE
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
29
Issue
29
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a2b70aa6
编写于
11月 15, 2021
作者:
M
Meiyim
提交者:
GitHub
11月 15, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix for pd22 (#763)
* fix-load-pretrained model * update readme * fix path
上级
ff89c2a6
变更
11
显示空白变更内容
内联
并排
Showing
11 changed file
with
21 addition
and
21 deletion
+21
-21
README.zh.md
README.zh.md
+1
-1
demo/distill/distill.py
demo/distill/distill.py
+1
-1
demo/finetune_classifier.py
demo/finetune_classifier.py
+3
-3
demo/finetune_classifier_distributed.py
demo/finetune_classifier_distributed.py
+3
-3
demo/finetune_mrc.py
demo/finetune_mrc.py
+2
-2
demo/finetune_ner.py
demo/finetune_ner.py
+2
-2
demo/finetune_sentiment_analysis.py
demo/finetune_sentiment_analysis.py
+3
-3
demo/pretrain/pretrain.py
demo/pretrain/pretrain.py
+1
-1
demo/seq2seq/decode.py
demo/seq2seq/decode.py
+1
-1
demo/seq2seq/finetune_seq2seq.py
demo/seq2seq/finetune_seq2seq.py
+3
-3
ernie/modeling_ernie.py
ernie/modeling_ernie.py
+1
-1
未找到文件。
README.zh.md
浏览文件 @
a2b70aa6
...
@@ -173,7 +173,7 @@ data/xnli
...
@@ -173,7 +173,7 @@ data/xnli
-
使用
`动态图`
模型进行finetune:
-
使用
`动态图`
模型进行finetune:
```
script
```
script
python3 ./
ernie_d/
demo/finetune_classifier.py \
python3 ./demo/finetune_classifier.py \
--from_pretrained ernie-1.0 \
--from_pretrained ernie-1.0 \
--data_dir ./data/xnli
--data_dir ./data/xnli
```
```
...
...
demo/distill/distill.py
浏览文件 @
a2b70aa6
...
@@ -153,7 +153,7 @@ if not os.path.exists('./teacher_model.bin'):
...
@@ -153,7 +153,7 @@ if not os.path.exists('./teacher_model.bin'):
if
step
%
100
==
0
:
if
step
%
100
==
0
:
f1
=
evaluate_teacher
(
teacher_model
,
dev_ds
)
f1
=
evaluate_teacher
(
teacher_model
,
dev_ds
)
print
(
'teacher f1: %.5f'
%
f1
)
print
(
'teacher f1: %.5f'
%
f1
)
P
.
save
(
teacher_model
.
state_dict
(),
'./teacher_model.bin'
)
P
.
save
(
teacher_model
.
state_dict
(),
str
(
'./teacher_model.bin'
)
)
else
:
else
:
state_dict
=
P
.
load
(
'./teacher_model.bin'
)
state_dict
=
P
.
load
(
'./teacher_model.bin'
)
teacher_model
.
set_state_dict
(
state_dict
)
teacher_model
.
set_state_dict
(
state_dict
)
...
...
demo/finetune_classifier.py
浏览文件 @
a2b70aa6
...
@@ -162,7 +162,7 @@ model = ErnieModelForSequenceClassification.from_pretrained(
...
@@ -162,7 +162,7 @@ model = ErnieModelForSequenceClassification.from_pretrained(
if
args
.
init_checkpoint
is
not
None
:
if
args
.
init_checkpoint
is
not
None
:
log
.
info
(
'loading checkpoint from %s'
%
args
.
init_checkpoint
)
log
.
info
(
'loading checkpoint from %s'
%
args
.
init_checkpoint
)
sd
=
P
.
load
(
args
.
init_checkpoint
)
sd
=
P
.
load
(
str
(
args
.
init_checkpoint
)
)
model
.
set_state_dict
(
sd
)
model
.
set_state_dict
(
sd
)
g_clip
=
P
.
nn
.
ClipGradByGlobalNorm
(
1.0
)
#experimental
g_clip
=
P
.
nn
.
ClipGradByGlobalNorm
(
1.0
)
#experimental
...
@@ -238,9 +238,9 @@ with LogWriter(
...
@@ -238,9 +238,9 @@ with LogWriter(
log_writer
.
add_scalar
(
'eval/acc'
,
acc
,
step
=
step
)
log_writer
.
add_scalar
(
'eval/acc'
,
acc
,
step
=
step
)
log
.
debug
(
'acc %.5f'
%
acc
)
log
.
debug
(
'acc %.5f'
%
acc
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
args
.
inference_model_dir
is
not
None
:
if
args
.
inference_model_dir
is
not
None
:
class
InferenceModel
(
ErnieModelForSequenceClassification
):
class
InferenceModel
(
ErnieModelForSequenceClassification
):
...
...
demo/finetune_classifier_distributed.py
浏览文件 @
a2b70aa6
...
@@ -128,7 +128,7 @@ model = ErnieModelForSequenceClassification.from_pretrained(
...
@@ -128,7 +128,7 @@ model = ErnieModelForSequenceClassification.from_pretrained(
if
args
.
init_checkpoint
is
not
None
:
if
args
.
init_checkpoint
is
not
None
:
log
.
info
(
'loading checkpoint from %s'
%
args
.
init_checkpoint
)
log
.
info
(
'loading checkpoint from %s'
%
args
.
init_checkpoint
)
sd
=
P
.
load
(
args
.
init_checkpoint
)
sd
=
P
.
load
(
str
(
args
.
init_checkpoint
)
)
model
.
set_state_dict
(
sd
)
model
.
set_state_dict
(
sd
)
model
=
P
.
DataParallel
(
model
)
model
=
P
.
DataParallel
(
model
)
...
@@ -195,11 +195,11 @@ with P.amp.auto_cast(enable=args.use_amp):
...
@@ -195,11 +195,11 @@ with P.amp.auto_cast(enable=args.use_amp):
#log_writer.add_scalar('eval/acc', acc, step=step)
#log_writer.add_scalar('eval/acc', acc, step=step)
log
.
debug
(
'acc %.5f'
%
acc
)
log
.
debug
(
'acc %.5f'
%
acc
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
# exit
# exit
if
step
>
args
.
max_steps
:
if
step
>
args
.
max_steps
:
break
break
if
args
.
save_dir
is
not
None
and
env
.
dev_id
==
0
:
if
args
.
save_dir
is
not
None
and
env
.
dev_id
==
0
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
log
.
debug
(
'done'
)
log
.
debug
(
'done'
)
demo/finetune_mrc.py
浏览文件 @
a2b70aa6
...
@@ -145,7 +145,7 @@ def train(model, train_dataset, dev_dataset, dev_examples, dev_features,
...
@@ -145,7 +145,7 @@ def train(model, train_dataset, dev_dataset, dev_examples, dev_features,
log
.
debug
(
'[step %d] eval result: f1 %.5f em %.5f'
%
log
.
debug
(
'[step %d] eval result: f1 %.5f em %.5f'
%
(
step
,
f1
,
em
))
(
step
,
f1
,
em
))
if
env
.
dev_id
==
0
and
args
.
save_dir
is
not
None
:
if
env
.
dev_id
==
0
and
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
step
>
max_steps
:
if
step
>
max_steps
:
break
break
...
@@ -244,4 +244,4 @@ if __name__ == "__main__":
...
@@ -244,4 +244,4 @@ if __name__ == "__main__":
tokenizer
,
args
)
tokenizer
,
args
)
log
.
debug
(
'final eval result: f1 %.5f em %.5f'
%
(
f1
,
em
))
log
.
debug
(
'final eval result: f1 %.5f em %.5f'
%
(
f1
,
em
))
if
env
.
dev_id
==
0
and
args
.
save_dir
is
not
None
:
if
env
.
dev_id
==
0
and
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
demo/finetune_ner.py
浏览文件 @
a2b70aa6
...
@@ -249,10 +249,10 @@ with LogWriter(
...
@@ -249,10 +249,10 @@ with LogWriter(
log
.
debug
(
'eval f1: %.5f'
%
f1
)
log
.
debug
(
'eval f1: %.5f'
%
f1
)
log_writer
.
add_scalar
(
'eval/f1'
,
f1
,
step
=
step
)
log_writer
.
add_scalar
(
'eval/f1'
,
f1
,
step
=
step
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
f1
=
evaluate
(
model
,
dev_ds
)
f1
=
evaluate
(
model
,
dev_ds
)
log
.
debug
(
'final eval f1: %.5f'
%
f1
)
log
.
debug
(
'final eval f1: %.5f'
%
f1
)
log_writer
.
add_scalar
(
'eval/f1'
,
f1
,
step
=
step
)
log_writer
.
add_scalar
(
'eval/f1'
,
f1
,
step
=
step
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
demo/finetune_sentiment_analysis.py
浏览文件 @
a2b70aa6
...
@@ -177,9 +177,9 @@ if not args.eval:
...
@@ -177,9 +177,9 @@ if not args.eval:
log
.
debug
(
'acc %.5f'
%
acc
)
log
.
debug
(
'acc %.5f'
%
acc
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
else
:
else
:
feature_column
=
propeller
.
data
.
FeatureColumns
([
feature_column
=
propeller
.
data
.
FeatureColumns
([
propeller
.
data
.
TextColumn
(
propeller
.
data
.
TextColumn
(
...
@@ -189,7 +189,7 @@ else:
...
@@ -189,7 +189,7 @@ else:
tokenizer
=
tokenizer
.
tokenize
),
tokenizer
=
tokenizer
.
tokenize
),
])
])
sd
=
P
.
load
(
args
.
init_checkpoint
)
sd
=
P
.
load
(
str
(
args
.
init_checkpoint
)
)
model
.
set_dict
(
sd
)
model
.
set_dict
(
sd
)
model
.
eval
()
model
.
eval
()
...
...
demo/pretrain/pretrain.py
浏览文件 @
a2b70aa6
...
@@ -394,7 +394,7 @@ if __name__ == '__main__':
...
@@ -394,7 +394,7 @@ if __name__ == '__main__':
log
.
debug
(
msg
)
log
.
debug
(
msg
)
if
step
%
1000
==
0
and
env
.
dev_id
==
0
:
if
step
%
1000
==
0
and
env
.
dev_id
==
0
:
log
.
debug
(
'saveing...'
)
log
.
debug
(
'saveing...'
)
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
step
>
args
.
max_steps
:
if
step
>
args
.
max_steps
:
break
break
log
.
info
(
'done'
)
log
.
info
(
'done'
)
demo/seq2seq/decode.py
浏览文件 @
a2b70aa6
...
@@ -401,7 +401,7 @@ if __name__ == '__main__':
...
@@ -401,7 +401,7 @@ if __name__ == '__main__':
rev_dict
[
tokenizer
.
pad_id
]
=
''
# replace [PAD]
rev_dict
[
tokenizer
.
pad_id
]
=
''
# replace [PAD]
rev_dict
[
tokenizer
.
unk_id
]
=
''
# replace [PAD]
rev_dict
[
tokenizer
.
unk_id
]
=
''
# replace [PAD]
sd
=
P
.
load
(
args
.
save_dir
)
sd
=
P
.
load
(
str
(
args
.
save_dir
)
)
ernie
.
set_state_dict
(
sd
)
ernie
.
set_state_dict
(
sd
)
def
map_fn
(
src_ids
):
def
map_fn
(
src_ids
):
...
...
demo/seq2seq/finetune_seq2seq.py
浏览文件 @
a2b70aa6
...
@@ -308,7 +308,7 @@ def seq2seq(model, tokenizer, args):
...
@@ -308,7 +308,7 @@ def seq2seq(model, tokenizer, args):
log
.
debug
(
msg
)
log
.
debug
(
msg
)
if
args
.
save_dir
is
not
None
and
step
%
1000
==
0
and
env
.
dev_id
==
0
:
if
args
.
save_dir
is
not
None
and
step
%
1000
==
0
and
env
.
dev_id
==
0
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
args
.
predict_output_dir
is
not
None
and
step
>
args
.
skip_eval_steps
and
step
%
args
.
eval_steps
==
0
:
if
args
.
predict_output_dir
is
not
None
and
step
>
args
.
skip_eval_steps
and
step
%
args
.
eval_steps
==
0
:
assert
args
.
predict_output_dir
.
exists
(),
\
assert
args
.
predict_output_dir
.
exists
(),
\
...
@@ -320,7 +320,7 @@ def seq2seq(model, tokenizer, args):
...
@@ -320,7 +320,7 @@ def seq2seq(model, tokenizer, args):
evaluate
(
model
,
dev_ds
,
step
,
args
)
evaluate
(
model
,
dev_ds
,
step
,
args
)
if
args
.
save_dir
is
not
None
:
if
args
.
save_dir
is
not
None
:
P
.
save
(
model
.
state_dict
(),
args
.
save_dir
/
'ckpt.bin'
)
P
.
save
(
model
.
state_dict
(),
str
(
args
.
save_dir
/
'ckpt.bin'
)
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
@@ -414,7 +414,7 @@ if __name__ == '__main__':
...
@@ -414,7 +414,7 @@ if __name__ == '__main__':
if
args
.
init_checkpoint
is
not
None
:
if
args
.
init_checkpoint
is
not
None
:
log
.
info
(
'loading checkpoint from %s'
%
args
.
init_checkpoint
)
log
.
info
(
'loading checkpoint from %s'
%
args
.
init_checkpoint
)
sd
=
P
.
load
(
args
.
init_checkpoint
)
sd
=
P
.
load
(
str
(
args
.
init_checkpoint
)
)
ernie
.
set_state_dict
(
sd
)
ernie
.
set_state_dict
(
sd
)
seq2seq
(
ernie
,
tokenizer
,
args
)
seq2seq
(
ernie
,
tokenizer
,
args
)
ernie/modeling_ernie.py
浏览文件 @
a2b70aa6
...
@@ -290,7 +290,7 @@ class PretrainedModel(object):
...
@@ -290,7 +290,7 @@ class PretrainedModel(object):
# log.debug('load pretrained weight from program state')
# log.debug('load pretrained weight from program state')
# F.io.load_program_state(param_path) #buggy in dygraph.gurad, push paddle to fix
# F.io.load_program_state(param_path) #buggy in dygraph.gurad, push paddle to fix
if
state_dict_path
.
exists
():
if
state_dict_path
.
exists
():
m
=
P
.
load
(
st
ate_dict_path
)
m
=
P
.
load
(
st
r
(
state_dict_path
)
)
for
k
,
v
in
model
.
state_dict
().
items
():
for
k
,
v
in
model
.
state_dict
().
items
():
if
k
not
in
m
:
if
k
not
in
m
:
log
.
warn
(
'param:%s not set in pretrained model, skip'
%
k
)
log
.
warn
(
'param:%s not set in pretrained model, skip'
%
k
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录