提交 e1d33b79 编写于 作者: Z Zeyu Chen

update finetune task api name for simplication

上级 a3dbba0d
...@@ -20,11 +20,7 @@ with fluid.program_guard(program): ...@@ -20,11 +20,7 @@ with fluid.program_guard(program):
feature=pooled_output, label=label, num_classes=dataset.num_labels) feature=pooled_output, label=label, num_classes=dataset.num_labels)
# Step4 # Step4
strategy = hub.AdamWeightDecayStrategy( strategy = hub.AdamWeightDecayStrategy(learning_rate=5e-5, weight_decay=0.01)
learning_rate=5e-5,
warmup_proportion=0.1,
warmup_strategy="linear_warmup_decay",
weight_decay=0.01)
config = hub.RunConfig( config = hub.RunConfig(
use_cuda=True, num_epoch=3, batch_size=32, strategy=strategy) use_cuda=True, num_epoch=3, batch_size=32, strategy=strategy)
......
...@@ -3,6 +3,7 @@ export CUDA_VISIBLE_DEVICES=5 ...@@ -3,6 +3,7 @@ export CUDA_VISIBLE_DEVICES=5
CKPT_DIR="./ckpt_sentiment_cls" CKPT_DIR="./ckpt_sentiment_cls"
python -u sentiment_cls.py \ python -u sentiment_cls.py \
--batch_size 32 \ --batch_size 32 \
--use_gpu=False \
--weight_decay 0.01 \ --weight_decay 0.01 \
--checkpoint_dir $CKPT_DIR \ --checkpoint_dir $CKPT_DIR \
--num_epoch 3 \ --num_epoch 3 \
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
"""Finetuning on classification task """ """Finetuning on classification task """
import argparse import argparse
import ast
import paddle.fluid as fluid import paddle.fluid as fluid
import paddlehub as hub import paddlehub as hub
...@@ -21,6 +22,7 @@ import paddlehub as hub ...@@ -21,6 +22,7 @@ import paddlehub as hub
# yapf: disable # yapf: disable
parser = argparse.ArgumentParser(__doc__) parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.") parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=False, help="Whether use GPU for finetuning, input should be True or False")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.") parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.") parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--data_dir", type=str, default=None, help="Path to training data.") parser.add_argument("--data_dir", type=str, default=None, help="Path to training data.")
...@@ -70,8 +72,9 @@ if __name__ == '__main__': ...@@ -70,8 +72,9 @@ if __name__ == '__main__':
) )
# Setup runing config for PaddleHub Finetune API # Setup runing config for PaddleHub Finetune API
print(args.use_gpu)
config = hub.RunConfig( config = hub.RunConfig(
use_cuda=True, use_cuda=args.use_gpu,
num_epoch=args.num_epoch, num_epoch=args.num_epoch,
batch_size=args.batch_size, batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir, checkpoint_dir=args.checkpoint_dir,
......
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddlehub as hub import paddlehub as hub
if __name__ == "__main__":
def train():
resnet_module = hub.Module(module_dir="ResNet50.hub_module") resnet_module = hub.Module(module_dir="ResNet50.hub_module")
input_dict, output_dict, program = resnet_module.context( input_dict, output_dict, program = resnet_module.context(trainable=True)
sign_name="feature_map", trainable=True)
dataset = hub.dataset.Flowers() dataset = hub.dataset.Flowers()
data_reader = hub.reader.ImageClassificationReader( data_reader = hub.reader.ImageClassificationReader(
image_width=resnet_module.get_excepted_image_width(), image_width=resnet_module.get_excepted_image_width(),
...@@ -29,11 +25,7 @@ def train(): ...@@ -29,11 +25,7 @@ def train():
feed_list = [img.name, label.name] feed_list = [img.name, label.name]
task = hub.create_img_classification_task( task = hub.create_img_cls_task(
feature=feature_map, label=label, num_classes=dataset.num_labels) feature=feature_map, label=label, num_classes=dataset.num_labels)
hub.finetune_and_eval( hub.finetune_and_eval(
task, feed_list=feed_list, data_reader=data_reader, config=config) task, feed_list=feed_list, data_reader=data_reader, config=config)
if __name__ == "__main__":
train()
...@@ -34,9 +34,9 @@ from .module.manager import default_module_manager ...@@ -34,9 +34,9 @@ from .module.manager import default_module_manager
from .io.type import DataType from .io.type import DataType
from .finetune.task import Task from .finetune.task import Task
from .finetune.task import create_seq_labeling_task from .finetune.task import create_seq_label_task
from .finetune.task import create_text_classification_task from .finetune.task import create_text_cls_task
from .finetune.task import create_img_classification_task from .finetune.task import create_img_cls_task
from .finetune.finetune import finetune_and_eval from .finetune.finetune import finetune_and_eval
from .finetune.config import RunConfig from .finetune.config import RunConfig
from .finetune.strategy import AdamWeightDecayStrategy from .finetune.strategy import AdamWeightDecayStrategy
......
...@@ -45,10 +45,10 @@ def _do_memory_optimization(task, config): ...@@ -45,10 +45,10 @@ def _do_memory_optimization(task, config):
time_used = time.time() - optimize_time_begin time_used = time.time() - optimize_time_begin
logger.info("Memory optimization done! Time elapsed %f sec" % time_used) logger.info("Memory optimization done! Time elapsed %f sec" % time_used)
lower_mem, upper_mem, unit = fluid.contrib.memory_usage( # lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=fluid.default_main_program(), batch_size=config.batch_size) # program=task.main_program(), batch_size=config.batch_size)
logger.info("Theoretical memory usage in training: %.2f - %.2f %s" % # logger.info("Theoretical memory usage in training: %.2f - %.2f %s" %
(lower_mem, upper_mem, unit)), # (lower_mem, upper_mem, unit)),
def _finetune_seq_label_task(task, def _finetune_seq_label_task(task,
......
...@@ -62,10 +62,7 @@ class Task(object): ...@@ -62,10 +62,7 @@ class Task(object):
return metric_variable_names return metric_variable_names
def create_text_classification_task(feature, def create_text_cls_task(feature, label, num_classes, hidden_units=None):
label,
num_classes,
hidden_units=None):
""" """
Append a multi-layer perceptron classifier for binary classification base Append a multi-layer perceptron classifier for binary classification base
on input feature on input feature
...@@ -108,10 +105,7 @@ def create_text_classification_task(feature, ...@@ -108,10 +105,7 @@ def create_text_classification_task(feature,
return task return task
def create_img_classification_task(feature, def create_img_cls_task(feature, label, num_classes, hidden_units=None):
label,
num_classes,
hidden_units=None):
""" """
Create the transfer learning task for image classification. Create the transfer learning task for image classification.
Args: Args:
...@@ -153,13 +147,13 @@ def create_img_classification_task(feature, ...@@ -153,13 +147,13 @@ def create_img_classification_task(feature,
"num_example": num_example "num_example": num_example
} }
task = Task("text_classification", graph_var_dict, task = Task("image_classification", graph_var_dict,
fluid.default_main_program(), fluid.default_startup_program()) fluid.default_main_program(), fluid.default_startup_program())
return task return task
def create_seq_labeling_task(feature, labels, seq_len, num_classes): def create_seq_label_task(feature, labels, seq_len, num_classes):
logits = fluid.layers.fc( logits = fluid.layers.fc(
input=feature, input=feature,
size=num_classes, size=num_classes,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册