diff --git a/demo/ernie-classification/README.md b/demo/ernie-classification/README.md index 45321ae546c208332fa72aa74c3353d862180553..32cf3b5c642af1281f685da17d343ea9f21fd51e 100644 --- a/demo/ernie-classification/README.md +++ b/demo/ernie-classification/README.md @@ -95,7 +95,7 @@ with fluid.program_guard(program): # NOTE: 必须使用fluid.program_guard接口 ### Step4:选择优化策略并开始Finetune ```python -strategy = hub.BERTFinetuneStrategy( +strategy = hub.AdamWeightDecayStrategy( weight_decay=0.01, learning_rate=5e-5, warmup_strategy="linear_warmup_decay", diff --git a/demo/ernie-classification/ernie_tiny_demo.py b/demo/ernie-classification/ernie_tiny_demo.py index bf0d95d084891d8243b34feda431d6eacc773fe7..a8c24a10d10e712a15db0f39b97675b5410fa90e 100644 --- a/demo/ernie-classification/ernie_tiny_demo.py +++ b/demo/ernie-classification/ernie_tiny_demo.py @@ -21,7 +21,7 @@ with fluid.program_guard(program): feature=pooled_output, label=label, num_classes=reader.get_num_labels()) # Step4 -strategy = hub.BERTFinetuneStrategy( +strategy = hub.AdamWeightDecayStrategy( learning_rate=5e-5, warmup_proportion=0.1, warmup_strategy="linear_warmup_decay", diff --git a/demo/ernie-classification/question_answering.py b/demo/ernie-classification/question_answering.py index 3429b488860427bc74df8ea6adf7bed97c30dcb9..65406c45fcefb716dabef5c807e366567e5e730f 100644 --- a/demo/ernie-classification/question_answering.py +++ b/demo/ernie-classification/question_answering.py @@ -62,7 +62,7 @@ if __name__ == '__main__': pooled_output, label, num_classes=num_labels) # Step4: Select finetune strategy, setup config and finetune - strategy = hub.BERTFinetuneStrategy( + strategy = hub.AdamWeightDecayStrategy( weight_decay=args.weight_decay, learning_rate=args.learning_rate, warmup_strategy="linear_warmup_decay", diff --git a/demo/ernie-classification/question_matching.py b/demo/ernie-classification/question_matching.py index af2a0f6492c4ea169fec7906e49231d26d65acc2..351922c19c79c32458055213d2e95699d5cbb351 100644 --- a/demo/ernie-classification/question_matching.py +++ b/demo/ernie-classification/question_matching.py @@ -62,7 +62,7 @@ if __name__ == '__main__': pooled_output, label, num_classes=num_labels) # Step4: Select finetune strategy, setup config and finetune - strategy = hub.BERTFinetuneStrategy( + strategy = hub.AdamWeightDecayStrategy( weight_decay=args.weight_decay, learning_rate=args.learning_rate, warmup_strategy="linear_warmup_decay", diff --git a/demo/ernie-classification/sentiment_cls.py b/demo/ernie-classification/sentiment_cls.py index ec6b1b4be08d271effaa871c9d9a93e15192deab..fddc8c3ab8cdda3a8abcd4461dff047bff8122d3 100644 --- a/demo/ernie-classification/sentiment_cls.py +++ b/demo/ernie-classification/sentiment_cls.py @@ -61,7 +61,7 @@ if __name__ == '__main__': pooled_output, label, num_classes=reader.get_num_labels()) # Step4: Select finetune strategy, setup config and finetune - strategy = hub.BERTFinetuneStrategy( + strategy = hub.AdamWeightDecayStrategy( weight_decay=args.weight_decay, learning_rate=args.learning_rate, warmup_strategy="linear_warmup_decay", diff --git a/demo/ernie-seq-labeling/sequence_labeling.py b/demo/ernie-seq-labeling/sequence_labeling.py index 702801149e9cf082eb8da2019fa4dc6ecd57d91e..cb99b3b9cc8e17ddd610b5d3c3f7f8e7abc3bb08 100644 --- a/demo/ernie-seq-labeling/sequence_labeling.py +++ b/demo/ernie-seq-labeling/sequence_labeling.py @@ -69,7 +69,7 @@ if __name__ == '__main__': num_classes=num_labels) # Select a finetune strategy - strategy = hub.BERTFinetuneStrategy( + strategy = hub.AdamWeightDecayStrategy( weight_decay=args.weight_decay, learning_rate=args.learning_rate, warmup_strategy="linear_warmup_decay", diff --git a/paddlehub/__init__.py b/paddlehub/__init__.py index e9e5b5347a8b91ad0d799567ae33fc4f0f4eb360..183820bf65545a587132cc3ecfe352dad0dbefbc 100644 --- a/paddlehub/__init__.py +++ b/paddlehub/__init__.py @@ -39,5 +39,5 @@ from .finetune.task import create_text_classification_task from .finetune.task import create_img_classification_task from .finetune.finetune import finetune_and_eval from .finetune.config import RunConfig -from .finetune.strategy import BERTFinetuneStrategy +from .finetune.strategy import AdamWeightDecayStrategy from .finetune.strategy import DefaultStrategy diff --git a/paddlehub/finetune/finetune.py b/paddlehub/finetune/finetune.py index 204270ac19d8c9d13ce0fd86149ac03c4f8a666f..f5ab2cd3aad18478f31261972837bf4587740c6f 100644 --- a/paddlehub/finetune/finetune.py +++ b/paddlehub/finetune/finetune.py @@ -25,7 +25,7 @@ import numpy as np from visualdl import LogWriter from paddlehub.common.logger import logger -from paddlehub.finetune.strategy import BERTFinetuneStrategy, DefaultStrategy +from paddlehub.finetune.strategy import AdamWeightDecayStrategy, DefaultStrategy from paddlehub.finetune.checkpoint import load_checkpoint, save_checkpoint from paddlehub.finetune.evaluate import evaluate_cls_task, evaluate_seq_labeling_task import paddlehub as hub @@ -74,7 +74,7 @@ def _finetune_seq_label_task(task, data_feeder = fluid.DataFeeder(feed_list=feed_list, place=place) # Select strategy - if isinstance(config.strategy, hub.BERTFinetuneStrategy): + if isinstance(config.strategy, hub.AdamWeightDecayStrategy): scheduled_lr = config.strategy.execute(loss, main_program, data_reader, config) elif isinstance(config.strategy, hub.DefaultStrategy): @@ -173,7 +173,7 @@ def _finetune_cls_task(task, data_reader, feed_list, config=None, data_feeder = fluid.DataFeeder(feed_list=feed_list, place=place) # select strategy - if isinstance(config.strategy, hub.BERTFinetuneStrategy): + if isinstance(config.strategy, hub.AdamWeightDecayStrategy): scheduled_lr = config.strategy.execute(loss, main_program, data_reader, config) elif isinstance(config.strategy, hub.DefaultStrategy): diff --git a/paddlehub/finetune/strategy.py b/paddlehub/finetune/strategy.py index 093ae49915ebe2bdc9e5cfe1ecb00a9b98e08d28..f65288100d0cc9b304cb7424fbf32e1cd4625203 100644 --- a/paddlehub/finetune/strategy.py +++ b/paddlehub/finetune/strategy.py @@ -61,7 +61,7 @@ class DefaultStrategy(object): return "DefaultStrategy" -class BERTFinetuneStrategy(DefaultStrategy): +class AdamWeightDecayStrategy(DefaultStrategy): def __init__(self, learning_rate=1e-4, warmup_strategy="linear_warmup_decay", @@ -114,7 +114,7 @@ class BERTFinetuneStrategy(DefaultStrategy): # TODO complete __str__() def __str__(self): - return "BERTFintuneStrategy" + return "AdamWeightDecayStrategy" class DefaultFinetuneStrategy(DefaultStrategy):