Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
models
提交
a7e48513
M
models
项目概览
PaddlePaddle
/
models
大约 1 年 前同步成功
通知
222
Star
6828
Fork
2962
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
602
列表
看板
标记
里程碑
合并请求
255
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
models
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
602
Issue
602
列表
看板
标记
里程碑
合并请求
255
合并请求
255
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a7e48513
编写于
12月 09, 2020
作者:
T
taixiurong
提交者:
GitHub
12月 09, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add BERT support xpu (#5001)
上级
d0e40f2e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
61 addition
and
3 deletion
+61
-3
PaddleNLP/pretrain_language_models/BERT/config/xpu_bert_config.yaml
...pretrain_language_models/BERT/config/xpu_bert_config.yaml
+35
-0
PaddleNLP/pretrain_language_models/BERT/run_classifier.py
PaddleNLP/pretrain_language_models/BERT/run_classifier.py
+16
-3
PaddleNLP/pretrain_language_models/BERT/utils/args.py
PaddleNLP/pretrain_language_models/BERT/utils/args.py
+10
-0
未找到文件。
PaddleNLP/pretrain_language_models/BERT/config/xpu_bert_config.yaml
0 → 100644
浏览文件 @
a7e48513
# task_name for train.
task_name
:
"
XNLI"
#use cuda for train
use_cuda
:
false
#use xpu for train
use_xpu
:
true
# do train
do_train
:
true
#do val
do_val
:
true
#do test
do_test
:
true
#batch size
batch_size
:
16
#in_tokens
in_tokens
:
false
# init pretraining params for train
init_pretraining_params
:
'
chinese_L-12_H-768_A-12/params'
#xpu use data XNLI1.0
data_dir
:
'
data/XNLI1.0/'
#vocab_path
vocab_path
:
'
chinese_L-12_H-768_A-12/vocab.txt'
#checkpoints
checkpoints
:
'
./save/checkpoints'
save_steps
:
100
weight_decay
:
0.01
warmup_proportion
:
0.1
validation_steps
:
100
epoch
:
1
max_seq_len
:
128
learning_rate
:
5e-5
skip_steps
:
10
num_iteration_per_drop_scope
:
10
verbose
:
true
bert_config_path
:
'
chinese_L-12_H-768_A-12/bert_config.json'
PaddleNLP/pretrain_language_models/BERT/run_classifier.py
浏览文件 @
a7e48513
...
@@ -38,7 +38,7 @@ import reader.cls as reader
...
@@ -38,7 +38,7 @@ import reader.cls as reader
from
model.bert
import
BertConfig
from
model.bert
import
BertConfig
from
model.classifier
import
create_model
from
model.classifier
import
create_model
from
optimization
import
optimization
from
optimization
import
optimization
from
utils.args
import
ArgumentGroup
,
print_arguments
,
check_cuda
,
check_version
from
utils.args
import
ArgumentGroup
,
print_arguments
,
check_cuda
,
check_
xpu
,
check_
version
from
utils.init
import
init_pretraining_params
,
init_checkpoint
from
utils.init
import
init_pretraining_params
,
init_checkpoint
from
utils.cards
import
get_cards
from
utils.cards
import
get_cards
import
dist_utils
import
dist_utils
...
@@ -101,6 +101,7 @@ run_type_g.add_arg("is_profiler", int, 0, "the profiler
...
@@ -101,6 +101,7 @@ run_type_g.add_arg("is_profiler", int, 0, "the profiler
run_type_g
.
add_arg
(
"max_iter"
,
int
,
0
,
"the max batch nums to train. (used for benchmark)"
)
run_type_g
.
add_arg
(
"max_iter"
,
int
,
0
,
"the max batch nums to train. (used for benchmark)"
)
run_type_g
.
add_arg
(
"use_cuda"
,
bool
,
True
,
"If set, use GPU for training."
)
run_type_g
.
add_arg
(
"use_cuda"
,
bool
,
True
,
"If set, use GPU for training."
)
run_type_g
.
add_arg
(
"use_xpu"
,
bool
,
True
,
"If set, use XPU for training."
)
run_type_g
.
add_arg
(
"use_fast_executor"
,
bool
,
False
,
"If set, use fast parallel executor (in experiment)."
)
run_type_g
.
add_arg
(
"use_fast_executor"
,
bool
,
False
,
"If set, use fast parallel executor (in experiment)."
)
run_type_g
.
add_arg
(
"shuffle"
,
bool
,
True
,
""
)
run_type_g
.
add_arg
(
"shuffle"
,
bool
,
True
,
""
)
run_type_g
.
add_arg
(
"num_iteration_per_drop_scope"
,
int
,
1
,
"Ihe iteration intervals to clean up temporary variables."
)
run_type_g
.
add_arg
(
"num_iteration_per_drop_scope"
,
int
,
1
,
"Ihe iteration intervals to clean up temporary variables."
)
...
@@ -148,10 +149,17 @@ def get_device_num():
...
@@ -148,10 +149,17 @@ def get_device_num():
def
main
(
args
):
def
main
(
args
):
bert_config
=
BertConfig
(
args
.
bert_config_path
)
bert_config
=
BertConfig
(
args
.
bert_config_path
)
bert_config
.
print_config
()
bert_config
.
print_config
()
if
args
.
use_xpu
:
paddle
.
enable_static
()
if
args
.
use_cuda
:
if
args
.
use_cuda
:
place
=
fluid
.
CUDAPlace
(
int
(
os
.
getenv
(
'FLAGS_selected_gpus'
,
'0'
)))
place
=
fluid
.
CUDAPlace
(
int
(
os
.
getenv
(
'FLAGS_selected_gpus'
,
'0'
)))
dev_count
=
get_device_num
()
dev_count
=
get_device_num
()
elif
args
.
use_xpu
:
xpu_id
=
int
(
os
.
getenv
(
'FLAGS_selected_xpus'
,
'0'
))
place
=
fluid
.
XPUPlace
(
xpu_id
)
dev_count
=
len
([
place
])
else
:
else
:
place
=
fluid
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
dev_count
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
dev_count
=
int
(
os
.
environ
.
get
(
'CPU_NUM'
,
multiprocessing
.
cpu_count
()))
...
@@ -311,8 +319,12 @@ def main(args):
...
@@ -311,8 +319,12 @@ def main(args):
train_data_generator
=
fluid
.
contrib
.
reader
.
distributed_batch_reader
(
train_data_generator
=
fluid
.
contrib
.
reader
.
distributed_batch_reader
(
train_data_generator
)
train_data_generator
)
train_compiled_program
=
fluid
.
CompiledProgram
(
train_program
).
with_data_parallel
(
if
args
.
use_xpu
:
loss_name
=
loss
.
name
,
build_strategy
=
build_strategy
)
train_compiled_program
=
train_program
else
:
train_compiled_program
=
fluid
.
CompiledProgram
(
train_program
).
with_data_parallel
(
loss_name
=
loss
.
name
,
build_strategy
=
build_strategy
)
train_data_loader
.
set_batch_generator
(
train_data_generator
,
place
)
train_data_loader
.
set_batch_generator
(
train_data_generator
,
place
)
...
@@ -447,5 +459,6 @@ def main(args):
...
@@ -447,5 +459,6 @@ def main(args):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
print_arguments
(
args
)
print_arguments
(
args
)
check_cuda
(
args
.
use_cuda
)
check_cuda
(
args
.
use_cuda
)
check_xpu
(
args
.
use_xpu
)
check_version
()
check_version
()
main
(
args
)
main
(
args
)
PaddleNLP/pretrain_language_models/BERT/utils/args.py
浏览文件 @
a7e48513
...
@@ -61,6 +61,16 @@ def check_cuda(use_cuda, err = \
...
@@ -61,6 +61,16 @@ def check_cuda(use_cuda, err = \
except
Exception
as
e
:
except
Exception
as
e
:
pass
pass
def
check_xpu
(
use_xpu
,
err
=
\
"
\n
You can not set use_xpu = True in the model because you are using paddlepaddle-cpu or paddlepaddle-gpu.
\n
\
Please: 1. Install paddlepaddle-xpu to run your models on XPU or 2. Set use_xpu = False to run models on CPU.
\n
"
):
try
:
if
use_xpu
==
True
and
fluid
.
is_compiled_with_xpu
()
==
False
:
print
(
err
)
sys
.
exit
(
1
)
except
Exception
as
e
:
pass
def
check_version
():
def
check_version
():
"""
"""
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录