提交 e1d33b79 编写于 作者: Z Zeyu Chen

update finetune task api name for simplication

上级 a3dbba0d
......@@ -20,11 +20,7 @@ with fluid.program_guard(program):
feature=pooled_output, label=label, num_classes=dataset.num_labels)
# Step4
strategy = hub.AdamWeightDecayStrategy(
learning_rate=5e-5,
warmup_proportion=0.1,
warmup_strategy="linear_warmup_decay",
weight_decay=0.01)
strategy = hub.AdamWeightDecayStrategy(learning_rate=5e-5, weight_decay=0.01)
config = hub.RunConfig(
use_cuda=True, num_epoch=3, batch_size=32, strategy=strategy)
......
......@@ -3,6 +3,7 @@ export CUDA_VISIBLE_DEVICES=5
CKPT_DIR="./ckpt_sentiment_cls"
python -u sentiment_cls.py \
--batch_size 32 \
--use_gpu=False \
--weight_decay 0.01 \
--checkpoint_dir $CKPT_DIR \
--num_epoch 3 \
......
......@@ -14,6 +14,7 @@
"""Finetuning on classification task """
import argparse
import ast
import paddle.fluid as fluid
import paddlehub as hub
......@@ -21,6 +22,7 @@ import paddlehub as hub
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=False, help="Whether use GPU for finetuning, input should be True or False")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--data_dir", type=str, default=None, help="Path to training data.")
......@@ -70,8 +72,9 @@ if __name__ == '__main__':
)
# Setup runing config for PaddleHub Finetune API
print(args.use_gpu)
config = hub.RunConfig(
use_cuda=True,
use_cuda=args.use_gpu,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
......
import paddle
import paddle.fluid as fluid
import paddlehub as hub
def train():
if __name__ == "__main__":
resnet_module = hub.Module(module_dir="ResNet50.hub_module")
input_dict, output_dict, program = resnet_module.context(
sign_name="feature_map", trainable=True)
input_dict, output_dict, program = resnet_module.context(trainable=True)
dataset = hub.dataset.Flowers()
data_reader = hub.reader.ImageClassificationReader(
image_width=resnet_module.get_excepted_image_width(),
......@@ -29,11 +25,7 @@ def train():
feed_list = [img.name, label.name]
task = hub.create_img_classification_task(
task = hub.create_img_cls_task(
feature=feature_map, label=label, num_classes=dataset.num_labels)
hub.finetune_and_eval(
task, feed_list=feed_list, data_reader=data_reader, config=config)
if __name__ == "__main__":
train()
......@@ -34,9 +34,9 @@ from .module.manager import default_module_manager
from .io.type import DataType
from .finetune.task import Task
from .finetune.task import create_seq_labeling_task
from .finetune.task import create_text_classification_task
from .finetune.task import create_img_classification_task
from .finetune.task import create_seq_label_task
from .finetune.task import create_text_cls_task
from .finetune.task import create_img_cls_task
from .finetune.finetune import finetune_and_eval
from .finetune.config import RunConfig
from .finetune.strategy import AdamWeightDecayStrategy
......
......@@ -45,10 +45,10 @@ def _do_memory_optimization(task, config):
time_used = time.time() - optimize_time_begin
logger.info("Memory optimization done! Time elapsed %f sec" % time_used)
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
program=fluid.default_main_program(), batch_size=config.batch_size)
logger.info("Theoretical memory usage in training: %.2f - %.2f %s" %
(lower_mem, upper_mem, unit)),
# lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
# program=task.main_program(), batch_size=config.batch_size)
# logger.info("Theoretical memory usage in training: %.2f - %.2f %s" %
# (lower_mem, upper_mem, unit)),
def _finetune_seq_label_task(task,
......
......@@ -62,10 +62,7 @@ class Task(object):
return metric_variable_names
def create_text_classification_task(feature,
label,
num_classes,
hidden_units=None):
def create_text_cls_task(feature, label, num_classes, hidden_units=None):
"""
Append a multi-layer perceptron classifier for binary classification base
on input feature
......@@ -108,10 +105,7 @@ def create_text_classification_task(feature,
return task
def create_img_classification_task(feature,
label,
num_classes,
hidden_units=None):
def create_img_cls_task(feature, label, num_classes, hidden_units=None):
"""
Create the transfer learning task for image classification.
Args:
......@@ -153,13 +147,13 @@ def create_img_classification_task(feature,
"num_example": num_example
}
task = Task("text_classification", graph_var_dict,
task = Task("image_classification", graph_var_dict,
fluid.default_main_program(), fluid.default_startup_program())
return task
def create_seq_labeling_task(feature, labels, seq_len, num_classes):
def create_seq_label_task(feature, labels, seq_len, num_classes):
logits = fluid.layers.fc(
input=feature,
size=num_classes,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册