diff --git a/demo/ernie-classification/ernie_tiny_demo.py b/demo/ernie-classification/ernie_tiny_demo.py index 792d141af605cd3e69ffa90cb36d8f6694c3ff64..bf0d95d084891d8243b34feda431d6eacc773fe7 100644 --- a/demo/ernie-classification/ernie_tiny_demo.py +++ b/demo/ernie-classification/ernie_tiny_demo.py @@ -1,35 +1,39 @@ import paddle.fluid as fluid import paddlehub as hub +# Step1 module = hub.Module(name="ernie") inputs, outputs, program = module.context(trainable=True, max_seq_len=128) +# Step2 reader = hub.reader.ClassifyReader( dataset=hub.dataset.ChnSentiCorp(), vocab_path=module.get_vocab_path(), max_seq_len=128) +# Step3 with fluid.program_guard(program): label = fluid.layers.data(name="label", shape=[1], dtype='int64') pooled_output = outputs["pooled_output"] - feed_list = [ - inputs["input_ids"].name, inputs["position_ids"].name, - inputs["segment_ids"].name, inputs["input_mask"].name, label.name - ] - cls_task = hub.create_text_classification_task( - pooled_output, label, num_classes=reader.get_num_labels()) + feature=pooled_output, label=label, num_classes=reader.get_num_labels()) + +# Step4 +strategy = hub.BERTFinetuneStrategy( + learning_rate=5e-5, + warmup_proportion=0.1, + warmup_strategy="linear_warmup_decay", + weight_decay=0.01) - strategy = hub.BERTFinetuneStrategy( - weight_decay=0.01, - learning_rate=5e-5, - warmup_strategy="linear_warmup_decay", - ) +config = hub.RunConfig( + use_cuda=True, num_epoch=3, batch_size=32, strategy=strategy) - config = hub.RunConfig( - use_cuda=True, num_epoch=3, batch_size=32, strategy=strategy) +feed_list = [ + inputs["input_ids"].name, inputs["position_ids"].name, + inputs["segment_ids"].name, inputs["input_mask"].name, label.name +] - hub.finetune_and_eval( - task=cls_task, data_reader=reader, feed_list=feed_list, config=config) +hub.finetune_and_eval( + task=cls_task, data_reader=reader, feed_list=feed_list, config=config)