Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
50d1027b
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
280
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
50d1027b
编写于
4月 02, 2019
作者:
Z
Zeyu Chen
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update typo and remove useless variables
上级
14535dd4
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
10 addition
and
118 deletion
+10
-118
demo/bert-cls/finetune_with_hub.py
demo/bert-cls/finetune_with_hub.py
+0
-96
demo/bert-cls/run_fintune_with_hub.sh
demo/bert-cls/run_fintune_with_hub.sh
+0
-18
paddle_hub/__init__.py
paddle_hub/__init__.py
+1
-1
paddle_hub/finetune/config.py
paddle_hub/finetune/config.py
+9
-3
未找到文件。
demo/bert-cls/finetune_with_hub.py
已删除
100644 → 0
浏览文件 @
14535dd4
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
os
import
time
import
argparse
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle_hub
as
hub
# yapf: disable
parser
=
argparse
.
ArgumentParser
(
__doc__
)
parser
.
add_argument
(
"--num_epoch"
,
type
=
int
,
default
=
3
,
help
=
"Number of epoches for fine-tuning."
)
parser
.
add_argument
(
"--learning_rate"
,
type
=
float
,
default
=
5e-5
,
help
=
"Learning rate used to train with warmup."
)
parser
.
add_argument
(
"--hub_module_dir"
,
type
=
str
,
default
=
None
,
help
=
"PaddleHub module directory"
)
parser
.
add_argument
(
"--lr_scheduler"
,
type
=
str
,
default
=
"linear_warmup_decay"
,
help
=
"scheduler of learning rate."
,
choices
=
[
'linear_warmup_decay'
,
'noam_decay'
])
parser
.
add_argument
(
"--weight_decay"
,
type
=
float
,
default
=
0.01
,
help
=
"Weight decay rate for L2 regularizer."
)
parser
.
add_argument
(
"--data_dir"
,
type
=
str
,
default
=
None
,
help
=
"Path to training data."
)
parser
.
add_argument
(
"--checkpoint_dir"
,
type
=
str
,
default
=
None
,
help
=
"Directory to model checkpoint"
)
parser
.
add_argument
(
"--max_seq_len"
,
type
=
int
,
default
=
512
,
help
=
"Number of words of the longest seqence."
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
32
,
help
=
"Total examples' number in batch for training."
)
args
=
parser
.
parse_args
()
# yapf: enable.
if
__name__
==
'__main__'
:
strategy
=
hub
.
BERTFinetuneStrategy
(
weight_decay
=
args
.
weight_decay
)
config
=
hub
.
FinetuneConfig
(
log_interval
=
10
,
eval_interval
=
100
,
save_ckpt_interval
=
200
,
checkpoint_dir
=
args
.
checkpoint_dir
,
learning_rate
=
args
.
learning_rate
,
num_epoch
=
args
.
num_epoch
,
batch_size
=
args
.
batch_size
,
strategy
=
strategy
)
# loading Paddlehub BERT
module
=
hub
.
Module
(
module_dir
=
args
.
hub_module_dir
)
# Use BERTTokenizeReader to tokenize the dataset according to model's
# vocabulary
reader
=
hub
.
reader
.
BERTTokenizeReader
(
dataset
=
hub
.
dataset
.
ChnSentiCorp
(),
# download chnsenticorp dataset
vocab_path
=
module
.
get_vocab_path
(),
max_seq_len
=
args
.
max_seq_len
)
num_labels
=
len
(
reader
.
get_labels
())
input_dict
,
output_dict
,
program
=
module
.
context
(
sign_name
=
"tokens"
,
trainable
=
True
,
max_seq_len
=
args
.
max_seq_len
)
with
fluid
.
program_guard
(
program
):
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
'int64'
)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
pooled_output
=
output_dict
[
"pooled_output"
]
# Setup feed list for data feeder
# Must feed all the tensor of bert's module need
feed_list
=
[
input_dict
[
"input_ids"
].
name
,
input_dict
[
"position_ids"
].
name
,
input_dict
[
"segment_ids"
].
name
,
input_dict
[
"input_mask"
].
name
,
label
.
name
]
# Define a classfication finetune task by PaddleHub's API
cls_task
=
hub
.
append_mlp_classifier
(
pooled_output
,
label
,
num_classes
=
num_labels
)
# Finetune and evaluate by PaddleHub's API
# will finish training, evaluation, testing, save model automatically
hub
.
finetune_and_eval
(
task
=
cls_task
,
data_reader
=
reader
,
feed_list
=
feed_list
,
config
=
config
)
demo/bert-cls/run_fintune_with_hub.sh
已删除
100644 → 0
浏览文件 @
14535dd4
export
CUDA_VISIBLE_DEVICES
=
5
DATA_PATH
=
./chnsenticorp_data
HUB_MODULE_DIR
=
"./hub_module/bert_chinese_L-12_H-768_A-12.hub_module"
#HUB_MODULE_DIR="./hub_module/ernie_stable.hub_module"
CKPT_DIR
=
"./ckpt"
#rm -rf $CKPT_DIR
python
-u
finetune_with_hub.py
\
--batch_size
32
\
--hub_module_dir
=
$HUB_MODULE_DIR
\
--data_dir
${
DATA_PATH
}
\
--weight_decay
0.01
\
--checkpoint_dir
$CKPT_DIR
\
--num_epoch
3
\
--max_seq_len
128
\
--learning_rate
5e-5
paddle_hub/__init__.py
浏览文件 @
50d1027b
...
@@ -34,7 +34,7 @@ from .io.type import DataType
...
@@ -34,7 +34,7 @@ from .io.type import DataType
from
.finetune.network
import
append_mlp_classifier
from
.finetune.network
import
append_mlp_classifier
from
.finetune.finetune
import
finetune_and_eval
from
.finetune.finetune
import
finetune_and_eval
from
.finetune.config
import
Finetune
Config
from
.finetune.config
import
Run
Config
from
.finetune.task
import
Task
from
.finetune.task
import
Task
from
.finetune.strategy
import
BERTFinetuneStrategy
from
.finetune.strategy
import
BERTFinetuneStrategy
from
.finetune.strategy
import
DefaultStrategy
from
.finetune.strategy
import
DefaultStrategy
...
...
paddle_hub/finetune/config.py
浏览文件 @
50d1027b
...
@@ -15,10 +15,12 @@
...
@@ -15,10 +15,12 @@
import
time
import
time
from
.strategy
import
DefaultStrategy
from
.strategy
import
DefaultStrategy
from
paddle_hub.common.utils
import
md5
from
datetime
import
datetime
from
paddle_hub.common.logger
import
logger
class
FinetuneConfig
(
object
):
class
RunConfig
(
object
):
""" This class specifies the configurations for PaddleHub to finetune """
""" This class specifies the configurations for PaddleHub to finetune """
def
__init__
(
self
,
def
__init__
(
self
,
...
@@ -45,9 +47,13 @@ class FinetuneConfig(object):
...
@@ -45,9 +47,13 @@ class FinetuneConfig(object):
self
.
_strategy
=
strategy
self
.
_strategy
=
strategy
self
.
_enable_memory_optim
=
enable_memory_optim
self
.
_enable_memory_optim
=
enable_memory_optim
if
checkpoint_dir
is
None
:
if
checkpoint_dir
is
None
:
self
.
_checkpoint_dir
=
"hub_cpkt_"
+
md5
(
str
(
time
.
time
()))[
0
:
20
]
now
=
int
(
time
.
time
())
time_str
=
time
.
strftime
(
"%Y%m%d%H%M%S"
,
time
.
localtime
(
now
))
self
.
_checkpoint_dir
=
"ckpt_"
+
time_str
else
:
else
:
self
.
_checkpoint_dir
=
checkpoint_dir
self
.
_checkpoint_dir
=
checkpoint_dir
logger
.
info
(
"Checkpoint dir: {}"
.
format
(
self
.
_checkpoint_dir
))
@
property
@
property
def
log_interval
(
self
):
def
log_interval
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录