From f9faa71c942823ae45c5a23ec983443dd2e4d85a Mon Sep 17 00:00:00 2001 From: wuzewu Date: Wed, 3 Apr 2019 16:53:39 +0800 Subject: [PATCH] update demo and add README --- demo/image-classification/README.md | 35 ++++++++++++++++ demo/image-classification/create_module.sh | 23 +++++++++-- demo/image-classification/finetune.sh | 6 +-- demo/lac/README.md | 27 +++++++++++++ demo/lac/create_module.sh | 19 ++++++++- demo/lac/infer.sh | 2 +- demo/lac/infer_by_code.py | 46 ++++++++++++++++++++++ demo/senta/README.md | 27 +++++++++++++ demo/senta/create_module.sh | 19 ++++++++- demo/senta/infer_by_code.py | 46 ++++++++++++++++++++++ demo/ssd/README.md | 27 +++++++++++++ demo/ssd/create_module.sh | 19 ++++++++- demo/ssd/infer_by_code.py | 44 +++++++++++++++++++++ 13 files changed, 328 insertions(+), 12 deletions(-) create mode 100644 demo/image-classification/README.md create mode 100644 demo/lac/infer_by_code.py create mode 100644 demo/senta/README.md create mode 100644 demo/senta/infer_by_code.py create mode 100644 demo/ssd/README.md create mode 100644 demo/ssd/infer_by_code.py diff --git a/demo/image-classification/README.md b/demo/image-classification/README.md new file mode 100644 index 00000000..276c9ca7 --- /dev/null +++ b/demo/image-classification/README.md @@ -0,0 +1,35 @@ +## 关于图像分类 +https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/image_classification + +## 创建Module +本目录包含了创建一个基于ImageNet 2012数据集预训练的图像分类模型(ResNet/MobileNet)的Module的脚本。 +通过以下脚本来一键创建一个ResNet50 Module +```shell +sh create_module.sh +``` +NOTE: +* 如果进行下面示例的脚本或者代码,请确保执行上述脚本 +* 关于创建Module的API和细节,请查看`create_module.py` + +## 使用Module预测 +该Module创建完成后,可以通过命令行或者python API两种方式进行预测 +### 命令行方式 +`infer.sh`给出了使用命令行调用Module预测的示例脚本 +通过以下命令试验下效果 +```shell +sh infer.sh +``` +### 通过python API +`infer_by_code.py`给出了使用python API调用Module预测的示例代码 +通过以下命令试验下效果 +```shell +python infer_by_code.py +``` + +## 对预训练模型进行Finetune +通过以下命令进行Finetune +```shell +sh finetune.sh +``` + +更多关于Finetune的资料,请查看[基于PaddleHub的迁移学习](https://github.com/PaddlePaddle/PaddleHub/blob/develop/docs/transfer_learning_turtorial.md) diff --git a/demo/image-classification/create_module.sh b/demo/image-classification/create_module.sh index f94b886a..04443000 100644 --- a/demo/image-classification/create_module.sh +++ b/demo/image-classification/create_module.sh @@ -2,9 +2,6 @@ set -o nounset set -o errexit -script_path=$(cd `dirname $0`; pwd) -cd $script_path - model_name="ResNet50" while getopts "m:" options @@ -18,4 +15,24 @@ do esac done +script_path=$(cd `dirname $0`; pwd) +module_path=hub_module_${model_name} + +if [ -d $script_path/$module_path ] +then + echo "$module_path already existed!" + exit 0 +fi + +cd $script_path/resources/ + +if [ ! -d ${model_name}_pretrained ] +then + sh download.sh $model_name +fi + +cd $script_path/ + python create_module.py --pretrained_model=resources/${model_name}_pretrained --model ${model_name} + +echo "Successfully create $module_path" diff --git a/demo/image-classification/finetune.sh b/demo/image-classification/finetune.sh index e075ceed..9c9783f3 100644 --- a/demo/image-classification/finetune.sh +++ b/demo/image-classification/finetune.sh @@ -4,10 +4,6 @@ set -o errexit script_path=$(cd `dirname $0`; pwd) cd $script_path -hub_module_path=hub_module_ResNet50 -cd resources -sh download.sh ResNet50 -cd .. -sh create_module.sh +sh create_module.sh python retrain.py diff --git a/demo/lac/README.md b/demo/lac/README.md index e69de29b..57154126 100644 --- a/demo/lac/README.md +++ b/demo/lac/README.md @@ -0,0 +1,27 @@ +## 关于LAC +https://github.com/baidu/lac + +## 创建Module +本目录包含了创建一个基于LAC预训练模型的Module的脚本。 +通过以下脚本来一键创建一个LAC Module +```shell +sh create_module.sh +``` +NOTE: +* 如果进行下面示例的脚本或者代码,请确保执行上述脚本 +* 关于创建Module的API和细节,请查看`create_module.py` + +## 使用Module预测 +该Module创建完成后,可以通过命令行或者python API两种方式进行预测 +### 命令行方式 +`infer.sh`给出了使用命令行调用Module预测的示例脚本 +通过以下命令试验下效果 +```shell +sh infer.sh +``` +### 通过python API +`infer_by_code.py`给出了使用python API调用Module预测的示例代码 +通过以下命令试验下效果 +```shell +python infer_by_code.py +``` diff --git a/demo/lac/create_module.sh b/demo/lac/create_module.sh index 70bd274c..ecad3685 100644 --- a/demo/lac/create_module.sh +++ b/demo/lac/create_module.sh @@ -3,6 +3,23 @@ set -o nounset set -o errexit script_path=$(cd `dirname $0`; pwd) -cd $script_path +module_path=hub_module_lac + +if [ -d $script_path/$module_path ] +then + echo "$module_path already existed!" + exit 0 +fi + +cd $script_path/resources/ + +if [ ! -d senta_model ] +then + sh download.sh +fi + +cd $script_path/ python create_module.py + +echo "Successfully create $module_path" diff --git a/demo/lac/infer.sh b/demo/lac/infer.sh index 981a4229..6416781a 100644 --- a/demo/lac/infer.sh +++ b/demo/lac/infer.sh @@ -1 +1 @@ -python ../../paddle_hub/commands/hub.py run hub_module_lac/ --signature lexical_analysis --config resources/test/test.yml --dataset resources/test/test.csv +python ../../paddle_hub/commands/hub.py run hub_module_lac/ --signature lexical_analysis --config resources/test/test.yml --input_file resources/test/test.csv diff --git a/demo/lac/infer_by_code.py b/demo/lac/infer_by_code.py new file mode 100644 index 00000000..a1542860 --- /dev/null +++ b/demo/lac/infer_by_code.py @@ -0,0 +1,46 @@ +import os +import paddle_hub as hub + + +def infer_with_input_text(): + # get lac module + lac = hub.Module(module_dir="hub_module_lac") + + test_text = ["今天是个好日子", "天气预报说今天要下雨", "下一班地铁马上就要到了"] + + # get the input keys for signature 'lexical_analysis' + data_format = lac.processor.data_format(sign_name='lexical_analysis') + key = list(data_format.keys())[0] + + # set input dict + input_dict = {key: test_text} + + # execute predict and print the result + results = lac.lexical_analysis(data=input_dict) + for index, result in enumerate(results): + hub.logger.info( + "sentence %d segmented result: %s" % (index + 1, result['word'])) + + +def infer_with_input_file(): + # get lac module + lac = hub.Module(module_dir="hub_module_lac") + + # get the input keys for signature 'lexical_analysis' + data_format = lac.processor.data_format(sign_name='lexical_analysis') + key = list(data_format.keys())[0] + + # parse input file + test_csv = os.path.join("resources", "test", "test.csv") + test_text = hub.io.reader.csv_reader.read(test_csv)["TEXT_INPUT"] + + # set input dict + input_dict = {key: test_text} + results = lac.lexical_analysis(data=input_dict) + for index, result in enumerate(results): + hub.logger.info( + "sentence %d segmented result: %s" % (index + 1, result['word'])) + + +if __name__ == "__main__": + infer_with_input_text() diff --git a/demo/senta/README.md b/demo/senta/README.md new file mode 100644 index 00000000..33dfbd13 --- /dev/null +++ b/demo/senta/README.md @@ -0,0 +1,27 @@ +## 关于senta +https://github.com/baidu/Senta + +## 创建Module +本目录包含了创建一个基于senta预训练模型的Module的脚本。 +通过以下脚本来一键创建一个senta Module +```shell +sh create_module.sh +``` +NOTE: +* 如果进行下面示例的脚本或者代码,请确保执行上述脚本 +* 关于创建Module的API和细节,请查看`create_module.py` + +## 使用Module预测 +该Module创建完成后,可以通过命令行或者python API两种方式进行预测 +### 命令行方式 +`infer.sh`给出了使用命令行调用Module预测的示例脚本 +通过以下命令试验下效果 +```shell +sh infer.sh +``` +### 通过python API +`infer_by_code.py`给出了使用python API调用Module预测的示例代码 +通过以下命令试验下效果 +```shell +python infer_by_code.py +``` diff --git a/demo/senta/create_module.sh b/demo/senta/create_module.sh index 70bd274c..09e51e21 100644 --- a/demo/senta/create_module.sh +++ b/demo/senta/create_module.sh @@ -3,6 +3,23 @@ set -o nounset set -o errexit script_path=$(cd `dirname $0`; pwd) -cd $script_path +module_path=hub_module_senta + +if [ -d $script_path/$module_path ] +then + echo "$module_path already existed!" + exit 0 +fi + +cd $script_path/resources/ + +if [ ! -d senta_model ] +then + sh download.sh +fi + +cd $script_path/ python create_module.py + +echo "Successfully create $module_path" diff --git a/demo/senta/infer_by_code.py b/demo/senta/infer_by_code.py new file mode 100644 index 00000000..d814e4fb --- /dev/null +++ b/demo/senta/infer_by_code.py @@ -0,0 +1,46 @@ +import os +import paddle_hub as hub + + +def infer_with_input_text(): + # get senta module + senta = hub.Module(module_dir="hub_module_senta") + + test_text = ["这家餐厅很好吃", "这部电影真的很差劲"] + + # get the input keys for signature 'sentiment_classify' + data_format = senta.processor.data_format(sign_name='sentiment_classify') + key = list(data_format.keys())[0] + + # set input dict + input_dict = {key: test_text} + + # execute predict and print the result + results = senta.sentiment_classify(data=input_dict) + for index, result in enumerate(results): + hub.logger.info("sentence %d segmented result: %s" % + (index + 1, result['sentiment_key'])) + + +def infer_with_input_file(): + # get senta module + senta = hub.Module(module_dir="hub_module_senta") + + # get the input keys for signature 'sentiment_classify' + data_format = senta.processor.data_format(sign_name='sentiment_classify') + key = list(data_format.keys())[0] + + # parse input file + test_csv = os.path.join("resources", "test", "test.csv") + test_text = hub.io.reader.csv_reader.read(test_csv)["TEXT_INPUT"] + + # set input dict + input_dict = {key: test_text} + results = senta.sentiment_classify(data=input_dict) + for index, result in enumerate(results): + hub.logger.info("sentence %d segmented result: %s" % + (index + 1, result['sentiment_key'])) + + +if __name__ == "__main__": + infer_with_input_text() diff --git a/demo/ssd/README.md b/demo/ssd/README.md new file mode 100644 index 00000000..c4dbfd0e --- /dev/null +++ b/demo/ssd/README.md @@ -0,0 +1,27 @@ +## 关于SSD +https://github.com/PaddlePaddle/models/tree/develop/fluid/PaddleCV/object_detection + +## 创建Module +本目录包含了创建一个基于POSCAL VOC数据集预训练的SSD模型的Module的脚本。 +通过以下脚本来一键创建一个SSD Module +```shell +sh create_module.sh +``` +NOTE: +* 如果进行下面示例的脚本或者代码,请确保执行上述脚本 +* 关于创建Module的API和细节,请查看`create_module.py` + +## 使用Module预测 +该Module创建完成后,可以通过命令行或者python API两种方式进行预测 +### 命令行方式 +`infer.sh`给出了使用命令行调用Module预测的示例脚本 +通过以下命令试验下效果 +```shell +sh infer.sh +``` +### 通过python API +`infer_by_code.py`给出了使用python API调用Module预测的示例代码 +通过以下命令试验下效果 +```shell +python infer_by_code.py +``` diff --git a/demo/ssd/create_module.sh b/demo/ssd/create_module.sh index 70bd274c..13c9d573 100644 --- a/demo/ssd/create_module.sh +++ b/demo/ssd/create_module.sh @@ -3,6 +3,23 @@ set -o nounset set -o errexit script_path=$(cd `dirname $0`; pwd) -cd $script_path +module_path=hub_module_ssd + +if [ -d $script_path/$module_path ] +then + echo "$module_path already existed!" + exit 0 +fi + +cd $script_path/resources/ + +if [ ! -d ssd_mobilenet_v1_pascalvoc ] +then + sh download.sh +fi + +cd $script_path/ python create_module.py + +echo "Successfully create $module_path" diff --git a/demo/ssd/infer_by_code.py b/demo/ssd/infer_by_code.py new file mode 100644 index 00000000..063e7a87 --- /dev/null +++ b/demo/ssd/infer_by_code.py @@ -0,0 +1,44 @@ +import os +import paddle_hub as hub + + +def infer_with_input_text(): + # get ssd module + ssd = hub.Module(module_dir="hub_module_ssd") + + test_img_path = os.path.join("resources", "test", "test_img_bird.jpg") + + # get the input keys for signature 'object_detection' + data_format = ssd.processor.data_format(sign_name='object_detection') + key = list(data_format.keys())[0] + + # set input dict + input_dict = {key: [test_img_path]} + + # execute predict and print the result + results = ssd.object_detection(data=input_dict) + for result in results: + hub.logger.info(result) + + +def infer_with_input_file(): + # get ssd module + ssd = hub.Module(module_dir="hub_module_ssd") + + # get the input keys for signature 'object_detection' + data_format = ssd.processor.data_format(sign_name='object_detection') + key = list(data_format.keys())[0] + + # parse input file + test_csv = os.path.join("resources", "test", "test.csv") + test_images = hub.io.reader.csv_reader.read(test_csv)["IMAGE_PATH"] + + # set input dict + input_dict = {key: test_images} + results = ssd.object_detection(data=input_dict) + for result in results: + hub.logger.info(result) + + +if __name__ == "__main__": + infer_with_input_file() -- GitLab