提交 ed3d2276 编写于 作者: W wuzewu

Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleHub into develop

...@@ -40,54 +40,52 @@ args = parser.parse_args() ...@@ -40,54 +40,52 @@ args = parser.parse_args()
# yapf: enable. # yapf: enable.
if __name__ == '__main__': if __name__ == '__main__':
# Select a finetune strategy # Step1: load Paddlehub ERNIE pretrained model
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
eval_interval=100,
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# loading Paddlehub ERNIE pretrained model
module = hub.Module(name="ernie") module = hub.Module(name="ernie")
inputs, outputs, program = module.context(
trainable=True, max_seq_len=args.max_seq_len)
# Sentence classification dataset reader # Step2: Download dataset and use ClassifyReader to read dataset
dataset = hub.dataset.NLPCC_DBQA()
reader = hub.reader.ClassifyReader( reader = hub.reader.ClassifyReader(
dataset=hub.dataset.NLPCC_DBQA(), # download NLPCC_DBQA dataset dataset=dataset,
vocab_path=module.get_vocab_path(), vocab_path=module.get_vocab_path(),
max_seq_len=args.max_seq_len) max_seq_len=args.max_seq_len)
num_labels = len(reader.get_labels()) num_labels = len(reader.get_labels())
input_dict, output_dict, program = module.context( # Step3: construct transfer learning network
sign_name="tokens", trainable=True, max_seq_len=args.max_seq_len)
with fluid.program_guard(program): with fluid.program_guard(program):
label = fluid.layers.data(name="label", shape=[1], dtype='int64') label = fluid.layers.data(name="label", shape=[1], dtype='int64')
# Use "pooled_output" for classification tasks on an entire sentence. # Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output. # Use "sequence_output" for token-level output.
pooled_output = output_dict["pooled_output"] pooled_output = outputs["pooled_output"]
# Setup feed list for data feeder # Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need # Must feed all the tensor of ERNIE's module need
feed_list = [ feed_list = [
input_dict["input_ids"].name, input_dict["position_ids"].name, inputs["input_ids"].name, inputs["position_ids"].name,
input_dict["segment_ids"].name, input_dict["input_mask"].name, inputs["segment_ids"].name, inputs["input_mask"].name, label.name
label.name
] ]
# Define a classfication finetune task by PaddleHub's API # Define a classfication finetune task by PaddleHub's API
cls_task = hub.create_text_classification_task( cls_task = hub.create_text_classification_task(
pooled_output, label, num_classes=num_labels) pooled_output, label, num_classes=num_labels)
# Step4: Select finetune strategy, setup config and finetune
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# Finetune and evaluate by PaddleHub's API # Finetune and evaluate by PaddleHub's API
# will finish training, evaluation, testing, save model automatically # will finish training, evaluation, testing, save model automatically
hub.finetune_and_eval( hub.finetune_and_eval(
......
...@@ -40,54 +40,52 @@ args = parser.parse_args() ...@@ -40,54 +40,52 @@ args = parser.parse_args()
# yapf: enable. # yapf: enable.
if __name__ == '__main__': if __name__ == '__main__':
# Select a finetune strategy # Step1: load Paddlehub ERNIE pretrained model
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
eval_interval=100,
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# loading Paddlehub ERNIE pretrained model
module = hub.Module(name="ernie") module = hub.Module(name="ernie")
inputs, outputs, program = module.context(
trainable=True, max_seq_len=args.max_seq_len)
# Sentence classification dataset reader # Step2: Download dataset and use ClassifyReader to read dataset
dataset = hub.dataset.LCQMC()
reader = hub.reader.ClassifyReader( reader = hub.reader.ClassifyReader(
dataset=hub.dataset.LCQMC(), # download LCQMC dataset dataset=dataset,
vocab_path=module.get_vocab_path(), vocab_path=module.get_vocab_path(),
max_seq_len=args.max_seq_len) max_seq_len=args.max_seq_len)
num_labels = len(reader.get_labels()) num_labels = len(reader.get_labels())
input_dict, output_dict, program = module.context( # Step3: construct transfer learning network
sign_name="tokens", trainable=True, max_seq_len=args.max_seq_len)
with fluid.program_guard(program): with fluid.program_guard(program):
label = fluid.layers.data(name="label", shape=[1], dtype='int64') label = fluid.layers.data(name="label", shape=[1], dtype='int64')
# Use "pooled_output" for classification tasks on an entire sentence. # Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output. # Use "sequence_output" for token-level output.
pooled_output = output_dict["pooled_output"] pooled_output = outputs["pooled_output"]
# Setup feed list for data feeder # Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need # Must feed all the tensor of ERNIE's module need
feed_list = [ feed_list = [
input_dict["input_ids"].name, input_dict["position_ids"].name, inputs["input_ids"].name, inputs["position_ids"].name,
input_dict["segment_ids"].name, input_dict["input_mask"].name, inputs["segment_ids"].name, inputs["input_mask"].name, label.name
label.name
] ]
# Define a classfication finetune task by PaddleHub's API # Define a classfication finetune task by PaddleHub's API
cls_task = hub.create_text_classification_task( cls_task = hub.create_text_classification_task(
pooled_output, label, num_classes=num_labels) pooled_output, label, num_classes=num_labels)
# Step4: Select finetune strategy, setup config and finetune
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# Finetune and evaluate by PaddleHub's API # Finetune and evaluate by PaddleHub's API
# will finish training, evaluation, testing, save model automatically # will finish training, evaluation, testing, save model automatically
hub.finetune_and_eval( hub.finetune_and_eval(
......
...@@ -40,54 +40,52 @@ args = parser.parse_args() ...@@ -40,54 +40,52 @@ args = parser.parse_args()
# yapf: enable. # yapf: enable.
if __name__ == '__main__': if __name__ == '__main__':
# Select a finetune strategy # Step1: load Paddlehub ERNIE pretrained model
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
eval_interval=100,
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# loading Paddlehub ERNIE pretrained model
module = hub.Module(name="ernie") module = hub.Module(name="ernie")
inputs, outputs, program = module.context(
trainable=True, max_seq_len=args.max_seq_len)
# Sentence classification dataset reader # Step2: Download dataset and use ClassifyReader to read dataset
dataset = hub.dataset.ChnSentiCorp()
reader = hub.reader.ClassifyReader( reader = hub.reader.ClassifyReader(
dataset=hub.dataset.ChnSentiCorp(), # download chnsenticorp dataset dataset=dataset,
vocab_path=module.get_vocab_path(), vocab_path=module.get_vocab_path(),
max_seq_len=args.max_seq_len) max_seq_len=args.max_seq_len)
num_labels = len(reader.get_labels()) num_labels = len(reader.get_labels())
input_dict, output_dict, program = module.context( # Step3: construct transfer learning network
sign_name="tokens", trainable=True, max_seq_len=args.max_seq_len)
with fluid.program_guard(program): with fluid.program_guard(program):
label = fluid.layers.data(name="label", shape=[1], dtype='int64') label = fluid.layers.data(name="label", shape=[1], dtype='int64')
# Use "pooled_output" for classification tasks on an entire sentence. # Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output. # Use "sequence_output" for token-level output.
pooled_output = output_dict["pooled_output"] pooled_output = outputs["pooled_output"]
# Setup feed list for data feeder # Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need # Must feed all the tensor of ERNIE's module need
feed_list = [ feed_list = [
input_dict["input_ids"].name, input_dict["position_ids"].name, inputs["input_ids"].name, inputs["position_ids"].name,
input_dict["segment_ids"].name, input_dict["input_mask"].name, inputs["segment_ids"].name, inputs["input_mask"].name, label.name
label.name
] ]
# Define a classfication finetune task by PaddleHub's API # Define a classfication finetune task by PaddleHub's API
cls_task = hub.create_text_classification_task( cls_task = hub.create_text_classification_task(
pooled_output, label, num_classes=num_labels) pooled_output, label, num_classes=num_labels)
# Step4: Select finetune strategy, setup config and finetune
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# Finetune and evaluate by PaddleHub's API # Finetune and evaluate by PaddleHub's API
# will finish training, evaluation, testing, save model automatically # will finish training, evaluation, testing, save model automatically
hub.finetune_and_eval( hub.finetune_and_eval(
......
export CUDA_VISIBLE_DEVICES=0 export CUDA_VISIBLE_DEVICES=0
CKPT_DIR="./ckpt" CKPT_DIR="./ckpt_sequence_labeling"
python -u finetune_with_hub.py \ python -u sequence_labeling.py \
--batch_size 16 \ --batch_size 16 \
--weight_decay 0.01 \ --weight_decay 0.01 \
--checkpoint_dir $CKPT_DIR \ --checkpoint_dir $CKPT_DIR \
......
...@@ -40,35 +40,21 @@ args = parser.parse_args() ...@@ -40,35 +40,21 @@ args = parser.parse_args()
# yapf: enable. # yapf: enable.
if __name__ == '__main__': if __name__ == '__main__':
# Select a finetune strategy # Step1: load Paddlehub ERNIE pretrained model
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
eval_interval=100,
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
strategy=strategy)
# loading Paddlehub ERNIE pretrained model
module = hub.Module(name="ernie") module = hub.Module(name="ernie")
inputs, outputs, program = module.context(
trainable=True, max_seq_len=args.max_seq_len)
# Sequence Label dataset reader # Step2: Download dataset and use SequenceLabelReader to read dataset
dataset = hub.dataset.MSRA_NER(),
reader = hub.reader.SequenceLabelReader( reader = hub.reader.SequenceLabelReader(
dataset=hub.dataset.MSRA_NER(), dataset=dataset,
vocab_path=module.get_vocab_path(), vocab_path=module.get_vocab_path(),
max_seq_len=args.max_seq_len) max_seq_len=args.max_seq_len)
num_labels = len(reader.get_labels()) num_labels = len(reader.get_labels())
input_dict, output_dict, program = module.context( # Step3: construct transfer learning network
sign_name="tokens", trainable=True, max_seq_len=args.max_seq_len)
with fluid.program_guard(program): with fluid.program_guard(program):
label = fluid.layers.data( label = fluid.layers.data(
name="label", shape=[args.max_seq_len, 1], dtype='int64') name="label", shape=[args.max_seq_len, 1], dtype='int64')
...@@ -76,14 +62,15 @@ if __name__ == '__main__': ...@@ -76,14 +62,15 @@ if __name__ == '__main__':
# Use "pooled_output" for classification tasks on an entire sentence. # Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_output" for token-level output. # Use "sequence_output" for token-level output.
sequence_output = output_dict["sequence_output"] sequence_output = outputs["sequence_output"]
# Setup feed list for data feeder # Setup feed list for data feeder
# Must feed all the tensor of ERNIE's module need # Must feed all the tensor of ERNIE's module need
# Compared to classification task, we need add seq_len tensor to feedlist
feed_list = [ feed_list = [
input_dict["input_ids"].name, input_dict["position_ids"].name, inputs["input_ids"].name, inputs["position_ids"].name,
input_dict["segment_ids"].name, input_dict["input_mask"].name, inputs["segment_ids"].name, inputs["input_mask"].name, label.name,
label.name, seq_len seq_len
] ]
# Define a sequence labeling finetune task by PaddleHub's API # Define a sequence labeling finetune task by PaddleHub's API
seq_label_task = hub.create_seq_labeling_task( seq_label_task = hub.create_seq_labeling_task(
...@@ -92,6 +79,19 @@ if __name__ == '__main__': ...@@ -92,6 +79,19 @@ if __name__ == '__main__':
seq_len=seq_len, seq_len=seq_len,
num_classes=num_labels) num_classes=num_labels)
# Select a finetune strategy
strategy = hub.BERTFinetuneStrategy(
weight_decay=args.weight_decay,
learning_rate=args.learning_rate,
warmup_strategy="linear_warmup_decay",
)
# Setup runing config for PaddleHub Finetune API
config = hub.RunConfig(
use_cuda=True,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
strategy=strategy)
# Finetune and evaluate model by PaddleHub's API # Finetune and evaluate model by PaddleHub's API
# will finish training, evaluation, testing, save model automatically # will finish training, evaluation, testing, save model automatically
hub.finetune_and_eval( hub.finetune_and_eval(
......
...@@ -23,12 +23,12 @@ class BaseProcessor(object): ...@@ -23,12 +23,12 @@ class BaseProcessor(object):
def preprocess(self, sign_name, data_dict): def preprocess(self, sign_name, data_dict):
raise NotImplementedError( raise NotImplementedError(
"BaseProcessor' preprocess should not be call!") "BaseProcessor' preprocess should not be called!")
def postprocess(self, sign_name, data_out, data_info, **kwargs): def postprocess(self, sign_name, data_out, data_info, **kwargs):
raise NotImplementedError( raise NotImplementedError(
"BaseProcessor' postprocess should not be call!") "BaseProcessor' postprocess should not be called!")
def data_format(self, sign_name): def data_format(self, sign_name):
raise NotImplementedError( raise NotImplementedError(
"BaseProcessor' data_format should not be call!") "BaseProcessor' data_format should not be called!")
...@@ -119,7 +119,7 @@ class Module(object): ...@@ -119,7 +119,7 @@ class Module(object):
if processor: if processor:
if not issubclass(processor, BaseProcessor): if not issubclass(processor, BaseProcessor):
raise TypeError( raise TypeError(
"processor shoule be an instance of paddlehub.BaseProcessor" "Processor shoule be an instance of paddlehub.BaseProcessor"
) )
if assets: if assets:
self.assets = utils.to_list(assets) self.assets = utils.to_list(assets)
...@@ -129,10 +129,10 @@ class Module(object): ...@@ -129,10 +129,10 @@ class Module(object):
self._generate_module_info(module_info) self._generate_module_info(module_info)
self._init_with_signature(signatures=signatures) self._init_with_signature(signatures=signatures)
else: else:
raise ValueError("Error! Module initialized parameter is empty") raise ValueError("Module initialized parameter is empty")
def _init_with_name(self, name): def _init_with_name(self, name):
logger.info("Try installing module %s" % name) logger.info("Installing %s module" % name)
result, tips, module_dir = default_module_manager.install_module( result, tips, module_dir = default_module_manager.install_module(
module_name=name) module_name=name)
if not result: if not result:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册