From e1d33b797c378ab30fdc5e163a1a53adc1ea48d8 Mon Sep 17 00:00:00 2001 From: Zeyu Chen Date: Sun, 14 Apr 2019 17:56:52 +0800 Subject: [PATCH] update finetune task api name for simplication --- demo/ernie-classification/ernie_tiny_demo.py | 6 +----- demo/ernie-classification/run_sentiment_cls.sh | 1 + demo/ernie-classification/sentiment_cls.py | 5 ++++- demo/image-classification/retrain.py | 14 +++----------- paddlehub/__init__.py | 6 +++--- paddlehub/finetune/finetune.py | 8 ++++---- paddlehub/finetune/task.py | 14 ++++---------- 7 files changed, 20 insertions(+), 34 deletions(-) diff --git a/demo/ernie-classification/ernie_tiny_demo.py b/demo/ernie-classification/ernie_tiny_demo.py index e8cf88f8..ecb0ce98 100644 --- a/demo/ernie-classification/ernie_tiny_demo.py +++ b/demo/ernie-classification/ernie_tiny_demo.py @@ -20,11 +20,7 @@ with fluid.program_guard(program): feature=pooled_output, label=label, num_classes=dataset.num_labels) # Step4 -strategy = hub.AdamWeightDecayStrategy( - learning_rate=5e-5, - warmup_proportion=0.1, - warmup_strategy="linear_warmup_decay", - weight_decay=0.01) +strategy = hub.AdamWeightDecayStrategy(learning_rate=5e-5, weight_decay=0.01) config = hub.RunConfig( use_cuda=True, num_epoch=3, batch_size=32, strategy=strategy) diff --git a/demo/ernie-classification/run_sentiment_cls.sh b/demo/ernie-classification/run_sentiment_cls.sh index 246a4fa2..b4231fa6 100644 --- a/demo/ernie-classification/run_sentiment_cls.sh +++ b/demo/ernie-classification/run_sentiment_cls.sh @@ -3,6 +3,7 @@ export CUDA_VISIBLE_DEVICES=5 CKPT_DIR="./ckpt_sentiment_cls" python -u sentiment_cls.py \ --batch_size 32 \ + --use_gpu=False \ --weight_decay 0.01 \ --checkpoint_dir $CKPT_DIR \ --num_epoch 3 \ diff --git a/demo/ernie-classification/sentiment_cls.py b/demo/ernie-classification/sentiment_cls.py index 00c5e02e..4ad02db3 100644 --- a/demo/ernie-classification/sentiment_cls.py +++ b/demo/ernie-classification/sentiment_cls.py @@ -14,6 +14,7 @@ """Finetuning on classification task """ import argparse +import ast import paddle.fluid as fluid import paddlehub as hub @@ -21,6 +22,7 @@ import paddlehub as hub # yapf: disable parser = argparse.ArgumentParser(__doc__) parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.") +parser.add_argument("--use_gpu", type=ast.literal_eval, default=False, help="Whether use GPU for finetuning, input should be True or False") parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.") parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.") parser.add_argument("--data_dir", type=str, default=None, help="Path to training data.") @@ -70,8 +72,9 @@ if __name__ == '__main__': ) # Setup runing config for PaddleHub Finetune API + print(args.use_gpu) config = hub.RunConfig( - use_cuda=True, + use_cuda=args.use_gpu, num_epoch=args.num_epoch, batch_size=args.batch_size, checkpoint_dir=args.checkpoint_dir, diff --git a/demo/image-classification/retrain.py b/demo/image-classification/retrain.py index 52a0ea19..a5315b2e 100644 --- a/demo/image-classification/retrain.py +++ b/demo/image-classification/retrain.py @@ -1,13 +1,9 @@ -import paddle import paddle.fluid as fluid - import paddlehub as hub - -def train(): +if __name__ == "__main__": resnet_module = hub.Module(module_dir="ResNet50.hub_module") - input_dict, output_dict, program = resnet_module.context( - sign_name="feature_map", trainable=True) + input_dict, output_dict, program = resnet_module.context(trainable=True) dataset = hub.dataset.Flowers() data_reader = hub.reader.ImageClassificationReader( image_width=resnet_module.get_excepted_image_width(), @@ -29,11 +25,7 @@ def train(): feed_list = [img.name, label.name] - task = hub.create_img_classification_task( + task = hub.create_img_cls_task( feature=feature_map, label=label, num_classes=dataset.num_labels) hub.finetune_and_eval( task, feed_list=feed_list, data_reader=data_reader, config=config) - - -if __name__ == "__main__": - train() diff --git a/paddlehub/__init__.py b/paddlehub/__init__.py index 183820bf..dc4593ac 100644 --- a/paddlehub/__init__.py +++ b/paddlehub/__init__.py @@ -34,9 +34,9 @@ from .module.manager import default_module_manager from .io.type import DataType from .finetune.task import Task -from .finetune.task import create_seq_labeling_task -from .finetune.task import create_text_classification_task -from .finetune.task import create_img_classification_task +from .finetune.task import create_seq_label_task +from .finetune.task import create_text_cls_task +from .finetune.task import create_img_cls_task from .finetune.finetune import finetune_and_eval from .finetune.config import RunConfig from .finetune.strategy import AdamWeightDecayStrategy diff --git a/paddlehub/finetune/finetune.py b/paddlehub/finetune/finetune.py index f5ab2cd3..5a5bda64 100644 --- a/paddlehub/finetune/finetune.py +++ b/paddlehub/finetune/finetune.py @@ -45,10 +45,10 @@ def _do_memory_optimization(task, config): time_used = time.time() - optimize_time_begin logger.info("Memory optimization done! Time elapsed %f sec" % time_used) - lower_mem, upper_mem, unit = fluid.contrib.memory_usage( - program=fluid.default_main_program(), batch_size=config.batch_size) - logger.info("Theoretical memory usage in training: %.2f - %.2f %s" % - (lower_mem, upper_mem, unit)), + # lower_mem, upper_mem, unit = fluid.contrib.memory_usage( + # program=task.main_program(), batch_size=config.batch_size) + # logger.info("Theoretical memory usage in training: %.2f - %.2f %s" % + # (lower_mem, upper_mem, unit)), def _finetune_seq_label_task(task, diff --git a/paddlehub/finetune/task.py b/paddlehub/finetune/task.py index c23df06d..b3770cb5 100644 --- a/paddlehub/finetune/task.py +++ b/paddlehub/finetune/task.py @@ -62,10 +62,7 @@ class Task(object): return metric_variable_names -def create_text_classification_task(feature, - label, - num_classes, - hidden_units=None): +def create_text_cls_task(feature, label, num_classes, hidden_units=None): """ Append a multi-layer perceptron classifier for binary classification base on input feature @@ -108,10 +105,7 @@ def create_text_classification_task(feature, return task -def create_img_classification_task(feature, - label, - num_classes, - hidden_units=None): +def create_img_cls_task(feature, label, num_classes, hidden_units=None): """ Create the transfer learning task for image classification. Args: @@ -153,13 +147,13 @@ def create_img_classification_task(feature, "num_example": num_example } - task = Task("text_classification", graph_var_dict, + task = Task("image_classification", graph_var_dict, fluid.default_main_program(), fluid.default_startup_program()) return task -def create_seq_labeling_task(feature, labels, seq_len, num_classes): +def create_seq_label_task(feature, labels, seq_len, num_classes): logits = fluid.layers.fc( input=feature, size=num_classes, -- GitLab