Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
8a69e360
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 2 年 前同步成功
通知
285
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8a69e360
编写于
3月 27, 2019
作者:
Z
Zeyu Chen
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove useless files of bert-cls
上级
3b308d24
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
5 addition
and
31 deletion
+5
-31
demo/bert-cls/finetune_with_hub.py
demo/bert-cls/finetune_with_hub.py
+3
-23
demo/bert-cls/run_fintune_with_hub.sh
demo/bert-cls/run_fintune_with_hub.sh
+1
-8
paddle_hub/finetune/task.py
paddle_hub/finetune/task.py
+1
-0
未找到文件。
demo/bert-cls/finetune_with_hub.py
浏览文件 @
8a69e360
...
@@ -18,7 +18,6 @@ from __future__ import division
...
@@ -18,7 +18,6 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
os
import
os
import
collections
import
time
import
time
import
argparse
import
argparse
import
numpy
as
np
import
numpy
as
np
...
@@ -29,20 +28,13 @@ import paddle.fluid as fluid
...
@@ -29,20 +28,13 @@ import paddle.fluid as fluid
import
paddle_hub
as
hub
import
paddle_hub
as
hub
import
reader.cls
as
reader
import
reader.cls
as
reader
from
model.bert
import
BertConfig
from
model.classifier
import
create_model_with_hub
,
create_model
from
utils.args
import
ArgumentGroup
,
print_arguments
from
utils.args
import
ArgumentGroup
,
print_arguments
from
utils.init
import
init_pretraining_params
,
init_checkpoint
from
paddle_hub.finetune.config
import
FinetuneConfig
# yapf: disable
# yapf: disable
parser
=
argparse
.
ArgumentParser
(
__doc__
)
parser
=
argparse
.
ArgumentParser
(
__doc__
)
model_g
=
ArgumentGroup
(
parser
,
"model"
,
"model configuration and paths."
)
model_g
=
ArgumentGroup
(
parser
,
"model"
,
"model configuration and paths."
)
model_g
.
add_arg
(
"bert_config_path"
,
str
,
None
,
"Path to the json file for bert model config."
)
model_g
.
add_arg
(
"bert_config_path"
,
str
,
None
,
"Path to the json file for bert model config."
)
model_g
.
add_arg
(
"init_checkpoint"
,
str
,
None
,
"Init checkpoint to resume training from."
)
model_g
.
add_arg
(
"init_pretraining_params"
,
str
,
None
,
"Init pre-training params which preforms fine-tuning from. If the "
"arg 'init_checkpoint' has been set, this argument wouldn't be valid."
)
model_g
.
add_arg
(
"checkpoints"
,
str
,
"checkpoints"
,
"Path to save checkpoints."
)
train_g
=
ArgumentGroup
(
parser
,
"training"
,
"training options."
)
train_g
=
ArgumentGroup
(
parser
,
"training"
,
"training options."
)
train_g
.
add_arg
(
"epoch"
,
int
,
3
,
"Number of epoches for fine-tuning."
)
train_g
.
add_arg
(
"epoch"
,
int
,
3
,
"Number of epoches for fine-tuning."
)
...
@@ -52,9 +44,7 @@ train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
...
@@ -52,9 +44,7 @@ train_g.add_arg("lr_scheduler", str, "linear_warmup_decay",
train_g
.
add_arg
(
"weight_decay"
,
float
,
0.01
,
"Weight decay rate for L2 regularizer."
)
train_g
.
add_arg
(
"weight_decay"
,
float
,
0.01
,
"Weight decay rate for L2 regularizer."
)
train_g
.
add_arg
(
"warmup_proportion"
,
float
,
0.1
,
train_g
.
add_arg
(
"warmup_proportion"
,
float
,
0.1
,
"Proportion of training steps to perform linear learning rate warmup for."
)
"Proportion of training steps to perform linear learning rate warmup for."
)
train_g
.
add_arg
(
"save_steps"
,
int
,
10000
,
"The steps interval to save checkpoints."
)
train_g
.
add_arg
(
"validation_steps"
,
int
,
1000
,
"The steps interval to evaluate model performance."
)
train_g
.
add_arg
(
"validation_steps"
,
int
,
1000
,
"The steps interval to evaluate model performance."
)
train_g
.
add_arg
(
"use_fp16"
,
bool
,
False
,
"Whether to use fp16 mixed precision training."
)
train_g
.
add_arg
(
"loss_scaling"
,
float
,
1.0
,
train_g
.
add_arg
(
"loss_scaling"
,
float
,
1.0
,
"Loss scaling factor for mixed precision training, only valid when use_fp16 is enabled."
)
"Loss scaling factor for mixed precision training, only valid when use_fp16 is enabled."
)
...
@@ -76,10 +66,6 @@ data_g.add_arg("random_seed", int, 0, "Random seed.")
...
@@ -76,10 +66,6 @@ data_g.add_arg("random_seed", int, 0, "Random seed.")
run_type_g
=
ArgumentGroup
(
parser
,
"run_type"
,
"running type options."
)
run_type_g
=
ArgumentGroup
(
parser
,
"run_type"
,
"running type options."
)
run_type_g
.
add_arg
(
"use_cuda"
,
bool
,
True
,
"If set, use GPU for training."
)
run_type_g
.
add_arg
(
"use_cuda"
,
bool
,
True
,
"If set, use GPU for training."
)
run_type_g
.
add_arg
(
"use_fast_executor"
,
bool
,
False
,
"If set, use fast parallel executor (in experiment)."
)
run_type_g
.
add_arg
(
"num_iteration_per_drop_scope"
,
int
,
1
,
"Ihe iteration intervals to clean up temporary variables."
)
run_type_g
.
add_arg
(
"task_name"
,
str
,
None
,
"The name of task to perform fine-tuning, should be in {'xnli', 'mnli', 'cola', 'mrpc'}."
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
# yapf: enable.
# yapf: enable.
...
@@ -121,17 +107,11 @@ def test_hub_api(args, config):
...
@@ -121,17 +107,11 @@ def test_hub_api(args, config):
hub
.
finetune_and_eval
(
task
,
feed_list
,
processor
,
config
)
hub
.
finetune_and_eval
(
task
,
feed_list
,
processor
,
config
)
FinetuneConfig
=
collections
.
namedtuple
(
'FinetuneConfig'
,
[
'stat_interval'
,
'eval_interval'
,
'use_cuda'
,
'learning_rate'
,
'weight_decay'
,
'in_tokens'
,
'epoch'
,
'batch_size'
,
'max_seq_len'
,
'warmup_proportion'
])
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
print_arguments
(
args
)
print_arguments
(
args
)
config
=
FinetuneConfig
(
config
=
FinetuneConfig
(
stat_interval
=
10
,
stat_interval
=
args
.
skip_steps
,
eval_interval
=
100
,
eval_interval
=
args
.
validation_steps
,
use_cuda
=
True
,
use_cuda
=
True
,
learning_rate
=
args
.
learning_rate
,
learning_rate
=
args
.
learning_rate
,
weight_decay
=
args
.
weight_decay
,
weight_decay
=
args
.
weight_decay
,
...
...
demo/bert-cls/run_fintune_with_hub.sh
浏览文件 @
8a69e360
export
FLAGS_enable_parallel_graph
=
1
export
FLAGS_sync_nccl_allreduce
=
1
export
CUDA_VISIBLE_DEVICES
=
6
export
CUDA_VISIBLE_DEVICES
=
6
BERT_BASE_PATH
=
"chinese_L-12_H-768_A-12"
BERT_BASE_PATH
=
"chinese_L-12_H-768_A-12"
TASK_NAME
=
'chnsenticorp'
TASK_NAME
=
'chnsenticorp'
DATA_PATH
=
chnsenticorp_data
DATA_PATH
=
chnsenticorp_data
CKPT_PATH
=
chn_checkpoints
rm
-rf
$CKPT_PATH
rm
-rf
$CKPT_PATH
python
-u
finetune_with_hub.py
--task_name
${
TASK_NAME
}
\
python
-u
finetune_with_hub.py
\
--use_cuda
true
\
--use_cuda
true
\
--batch_size
4096
\
--batch_size
4096
\
--in_tokens
true
\
--in_tokens
true
\
--init_pretraining_params
${
BERT_BASE_PATH
}
/params
\
--data_dir
${
DATA_PATH
}
\
--data_dir
${
DATA_PATH
}
\
--vocab_path
${
BERT_BASE_PATH
}
/vocab.txt
\
--vocab_path
${
BERT_BASE_PATH
}
/vocab.txt
\
--checkpoints
${
CKPT_PATH
}
\
--save_steps
100
\
--weight_decay
0.01
\
--weight_decay
0.01
\
--warmup_proportion
0.0
\
--warmup_proportion
0.0
\
--validation_steps
50
\
--validation_steps
50
\
--epoch
3
\
--epoch
3
\
--max_seq_len
128
\
--max_seq_len
128
\
--bert_config_path
${
BERT_BASE_PATH
}
/bert_config.json
\
--learning_rate
5e-5
\
--learning_rate
5e-5
\
--skip_steps
10
--skip_steps
10
paddle_hub/finetune/task.py
浏览文件 @
8a69e360
...
@@ -21,6 +21,7 @@ import multiprocessing
...
@@ -21,6 +21,7 @@ import multiprocessing
from
paddle_hub.tools.logger
import
logger
from
paddle_hub.tools.logger
import
logger
from
paddle_hub.finetune.optimization
import
bert_optimization
from
paddle_hub.finetune.optimization
import
bert_optimization
from
paddle_hub.finetune.config
import
FinetuneConfig
__all__
=
[
'append_mlp_classifier'
]
__all__
=
[
'append_mlp_classifier'
]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录