From 2317a5dc6067258642e002ef79ed9eb0f286cdbc Mon Sep 17 00:00:00 2001 From: David Lin Date: Tue, 3 Mar 2020 15:10:14 +0800 Subject: [PATCH] Make use of unittest to compelete unit testing. (#413) * Make use of unittest to compelete unit testing. delete 4 .py scripts and 2 shell scripts. --- scripts/test_all_case.sh | 29 -------- scripts/test_all_module.sh | 16 ----- tests/modules/test_lac.py | 21 ------ tests/modules/test_senta.py | 14 ---- tests/modules/test_simnet.py | 18 ----- tests/modules/test_ssd.py | 16 ----- .../{modules => }/resources/test_img_cat.jpg | Bin tests/test_module.py | 63 ++++++++++++++++++ 8 files changed, 63 insertions(+), 114 deletions(-) delete mode 100755 scripts/test_all_case.sh delete mode 100755 scripts/test_all_module.sh delete mode 100644 tests/modules/test_lac.py delete mode 100644 tests/modules/test_senta.py delete mode 100644 tests/modules/test_simnet.py delete mode 100644 tests/modules/test_ssd.py rename tests/{modules => }/resources/test_img_cat.jpg (100%) create mode 100644 tests/test_module.py diff --git a/scripts/test_all_case.sh b/scripts/test_all_case.sh deleted file mode 100755 index db74b35d..00000000 --- a/scripts/test_all_case.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -set -o errexit - -function usage() { - echo "usage: sh $0 {test_case_list_file}" -} - -if [ $# -lt 1 ] -then - usage - exit 1 -fi - -listfile=$1 -base_path=$(cd `dirname $0`/..; pwd) -test_case_path=${base_path}/tests -export PYTHONPATH=$base_path:$PYTHONPATH - -# install the require package -cd ${base_path} -pip install -r requirements.txt - -# run all case list in the {listfile} -cd - -for test_file in `cat $listfile | grep -v ^#` -do - echo "run test case ${test_file}" - python ${test_case_path}/${test_file}.py -done diff --git a/scripts/test_all_module.sh b/scripts/test_all_module.sh deleted file mode 100755 index ee41692e..00000000 --- a/scripts/test_all_module.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -o errexit - -base_path=$(cd `dirname $0`/..; pwd) -test_module_path=${base_path}/tests/modules - -# install the require package -cd ${base_path} - -# run all case list in the {listfile} -cd - -for test_file in `ls $test_module_path | grep test` -do - echo "run module ${test_file}" - python $test_module_path/$test_file -done diff --git a/tests/modules/test_lac.py b/tests/modules/test_lac.py deleted file mode 100644 index da48208b..00000000 --- a/tests/modules/test_lac.py +++ /dev/null @@ -1,21 +0,0 @@ -#coding:utf-8 -import paddlehub as hub -import six -import json - -# Load LAC Module -lac = hub.Module(name="lac") -test_text = ["今天是个好日子", "天气预报说今天要下雨", "下一班地铁马上就要到了"] - -# Set input dict -inputs = {"text": test_text} - -# execute predict and print the result -results = lac.lexical_analysis(data=inputs) -for result in results: - if six.PY2: - print(json.dumps(result['word'], encoding="utf8", ensure_ascii=False)) - print(json.dumps(result['tag'], encoding="utf8", ensure_ascii=False)) - else: - print(result['word']) - print(result['tag']) diff --git a/tests/modules/test_senta.py b/tests/modules/test_senta.py deleted file mode 100644 index c86c5c25..00000000 --- a/tests/modules/test_senta.py +++ /dev/null @@ -1,14 +0,0 @@ -#coding:utf-8 -import paddlehub as hub - -senta = hub.Module(name="senta_bilstm") -test_text = ["这家餐厅很好吃", "这部电影真的很差劲"] -input_dict = {"text": test_text} -results = senta.sentiment_classify(data=input_dict) - -for result in results: - print(result['text']) - print(result['sentiment_label']) - print(result['sentiment_key']) - print(result['positive_probs']) - print(result['negative_probs']) diff --git a/tests/modules/test_simnet.py b/tests/modules/test_simnet.py deleted file mode 100644 index b6547a78..00000000 --- a/tests/modules/test_simnet.py +++ /dev/null @@ -1,18 +0,0 @@ -#coding:utf-8 -import paddlehub as hub - -simnet_bow = hub.Module(name="simnet_bow") -test_text_1 = ["这道题太难了", "这道题太难了", "这道题太难了"] -test_text_2 = ["这道题是上一年的考题", "这道题不简单", "这道题很有意思"] - -inputs = {"text_1": test_text_1, "text_2": test_text_2} -results = simnet_bow.similarity(data=inputs) - -max_score = -1 -result_text = "" -for result in results: - if result['similarity'] > max_score: - max_score = result['similarity'] - result_text = result['text_2'] - -print("The most matching with the %s is %s" % (test_text_1[0], result_text)) diff --git a/tests/modules/test_ssd.py b/tests/modules/test_ssd.py deleted file mode 100644 index 449a128a..00000000 --- a/tests/modules/test_ssd.py +++ /dev/null @@ -1,16 +0,0 @@ -import paddlehub as hub -import os - -ssd = hub.Module(name="ssd_mobilenet_v1_pascal") - -base_dir = os.path.dirname(__file__) -test_img_path = os.path.join(base_dir, "resources", "test_img_cat.jpg") - -# set input dict -input_dict = {"image": [test_img_path]} - -# execute predict and print the result -results = ssd.object_detection(data=input_dict) -for result in results: - print(result['path']) - print(result['data']) diff --git a/tests/modules/resources/test_img_cat.jpg b/tests/resources/test_img_cat.jpg similarity index 100% rename from tests/modules/resources/test_img_cat.jpg rename to tests/resources/test_img_cat.jpg diff --git a/tests/test_module.py b/tests/test_module.py new file mode 100644 index 00000000..c2aa65ae --- /dev/null +++ b/tests/test_module.py @@ -0,0 +1,63 @@ +# coding=utf-8 +import os +import unittest +import paddlehub as hub + + +class TestHubModule(unittest.TestCase): + def test_lac(self): + lac = hub.Module(name="lac") + test_text = ["今天是个好日子", "天气预报说今天要下雨", "下一班地铁马上就要到了"] + inputs = {"text": test_text} + results = lac.lexical_analysis(data=inputs) + self.assertEqual(results[0]['word'], ['今天', '是', '个', '好日子']) + self.assertEqual(results[0]['tag'], ['TIME', 'v', 'q', 'n']) + self.assertEqual(results[1]['word'], ['天气预报', '说', '今天', '要', '下雨']) + self.assertEqual(results[1]['tag'], ['n', 'v', 'TIME', 'v', 'v']) + self.assertEqual(results[2]['word'], + ['下', '一班', '地铁', '马上', '就要', '到', '了']) + self.assertEqual(results[2]['tag'], + ['f', 'm', 'n', 'd', 'v', 'v', 'xc']) + + def test_senta(self): + senta = hub.Module(name="senta_bilstm") + test_text = ["这家餐厅很好吃", "这部电影真的很差劲"] + input_dict = {"text": test_text} + results = senta.sentiment_classify(data=input_dict) + self.assertEqual(results[0]['sentiment_label'], 1) + self.assertEqual(results[0]['sentiment_key'], 'positive') + self.assertEqual(results[1]['sentiment_label'], 0) + self.assertEqual(results[1]['sentiment_key'], 'negative') + for result in results: + print(result['text']) + print(result['positive_probs']) + print(result['negative_probs']) + + def test_simnet(self): + simnet_bow = hub.Module(name="simnet_bow") + test_text_1 = ["这道题太难了", "这道题太难了", "这道题太难了"] + test_text_2 = ["这道题是上一年的考题", "这道题不简单", "这道题很有意思"] + inputs = {"text_1": test_text_1, "text_2": test_text_2} + results = simnet_bow.similarity(data=inputs) + max_score = -1 + result_text = "" + for result in results: + if result['similarity'] > max_score: + max_score = result['similarity'] + result_text = result['text_2'] + print("The most matching with the %s is %s" % (test_text_1[0], + result_text)) + + def test_ssd(self): + ssd = hub.Module(name="ssd_mobilenet_v1_pascal") + test_img_path = os.path.join( + os.path.dirname(__file__), "resources", "test_img_cat.jpg") + input_dict = {"image": [test_img_path]} + results = ssd.object_detection(data=input_dict) + for result in results: + print(result['path']) + print(result['data']) + + +if __name__ == "__main__": + unittest.main() -- GitLab