diff --git a/demo/serving/module_serving/GAN_stgan_celeba/README.md b/demo/serving/module_serving/GAN_stgan_celeba/README.md index e8a2af1fd0cf37eeee577f8a85cdb81d7de23082..efa6c49807b987da81aab28a4d6c480a4e1b7438 100644 --- a/demo/serving/module_serving/GAN_stgan_celeba/README.md +++ b/demo/serving/module_serving/GAN_stgan_celeba/README.md @@ -18,6 +18,14 @@ Loading stgan_celeba successful. 这样就完成了一个图像生成服务化API的部署,默认端口号为8866。 ## Step2:测试图像生成在线API +首先指定编码格式及引入需要的包: +```python +# coding: utf8 +import requests +import json +import base64 +import os +``` 我们用来测试的样例图片为:

@@ -36,7 +44,6 @@ info为图像描述,根据示例图像信息,info应为"Male,Black_Hair,Eyeg image为要生成的图像风格,我们选取"Bald"(秃顶的)作为生成图像的风格。 -代码如下: ```python >>> # 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ] >>> file_list = ["../img/man.png"] @@ -52,7 +59,14 @@ image为要生成的图像风格,我们选取"Bald"(秃顶的)作为生成图 >>> url = "http://127.0.0.1:8866/predict/image/stgan_celeba" >>> r = requests.post(url=url, data=data, files=files) ``` -stgan_celeba返回的结果包括生成图像的base64编码格式,经过转换可以得到生成图像,代码如下: +stgan_celeba返回的结果包括生成图像的base64编码格式,经过转换可以得到生成图像。 + +我们建立一个指定的文件夹用于存放结果图片: +```python +>>> if not os.path.exists("stgan_output"): + os.mkdir("stgan_output") +``` +然后将图片进行保存,代码如下: ```python >>> for item in results: ... with open(output_path, "wb") as fp: diff --git a/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py b/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py index 1a7505dd69866308bee30529c20daa212e26e98a..4e20281520c06ec1639474389f86795058a3dd95 100644 --- a/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py +++ b/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py @@ -13,12 +13,11 @@ if __name__ == "__main__": # 指定图片生成方法为stgan_celeba并发送post请求 url = "http://127.0.0.1:8866/predict/image/stgan_celeba" r = requests.post(url=url, data=data, files=files) - print(r.text) - results = eval(r.json()["results"]) # 保存生成的图片到output文件夹,打印模型输出结果 if not os.path.exists("stgan_output"): os.mkdir("stgan_output") + results = eval(r.json()["results"]) for item in results: output_path = os.path.join("stgan_output", item["path"].split("/")[-1]) with open(output_path, "wb") as fp: diff --git a/demo/serving/module_serving/README.md b/demo/serving/module_serving/README.md index 1991881485d7110c7d8a657f9038ce2f605d4456..66ebb05b9d691e1dd462ae5189eca0641464b35f 100644 --- a/demo/serving/module_serving/README.md +++ b/demo/serving/module_serving/README.md @@ -42,7 +42,7 @@ PaddleHub Serving是基于PaddleHub的一键模型服务部署工具,能够通   该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。 -* [中文情感分析-基于simnet_bow](../module_serving/semantic_model_simnet_bow) +* [中文情感分析-基于senta_lstm](../module_serving/sentiment_analysis_senta_lstm)   该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。 diff --git a/demo/serving/module_serving/classification_vgg11_imagenet/README.md b/demo/serving/module_serving/classification_vgg11_imagenet/README.md index 5d035c496a9e45310f62736c36976c140b77136d..e82efb98226a63a89037db8e33c41b21530664f6 100644 --- a/demo/serving/module_serving/classification_vgg11_imagenet/README.md +++ b/demo/serving/module_serving/classification_vgg11_imagenet/README.md @@ -18,6 +18,12 @@ Loading vgg11_imagenet successful. 这样就完成了一个图像分类服务化API的部署,默认端口号为8866。 ## Step2:测试图像分类在线API +首先引入需要的包: +```python +>>> import requests +>>> import json +``` + 我们用来测试的样例图片为:

@@ -49,11 +55,20 @@ files = [("image", file_1), ("image", file_2)] ``` vgg11_imagenent返回的结果为图像分类结果及其对应的概率,我们尝试打印接口返回结果: ```python - ->>> print(json.dumps(r.json(), indent=4, ensure_ascii=False)) -{ - "results": "[[{'Egyptian cat': 0.540287435054779}], [{'daisy': 0.9976677298545837}]]" -} +>>> results = eval(r.json()["results"]) +>>> print(json.dumps(results, indent=4, ensure_ascii=False)) +[ + [ + { + "Egyptian cat": 0.540287435054779 + } + ], + [ + { + "daisy": 0.9976677298545837 + } + ] +] ``` 这样我们就完成了对图像分类预测服务化部署和测试。 diff --git a/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py b/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py index 6cd7e36e34631943d8ba84e7e8f50c5b45999c92..091f95175143f06457218fcb414c905dd9c42159 100644 --- a/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py +++ b/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py @@ -12,5 +12,7 @@ if __name__ == "__main__": url = "http://127.0.0.1:8866/predict/image/vgg11_imagenet" r = requests.post(url=url, files=files) + results = eval(r.json()["results"]) + # 打印预测结果 - print(json.dumps(r.json(), indent=4, ensure_ascii=False)) + print(json.dumps(results, indent=4, ensure_ascii=False)) diff --git a/demo/serving/module_serving/lexical_analysis_lac/README.md b/demo/serving/module_serving/lexical_analysis_lac/README.md index 9181ba0a9e8a6548b4e9a3fdf827bbd5fc57eb38..b91346dae42ecaee1d9b0008f7a4c611b3bb4ff2 100644 --- a/demo/serving/module_serving/lexical_analysis_lac/README.md +++ b/demo/serving/module_serving/lexical_analysis_lac/README.md @@ -20,7 +20,12 @@ Loading lac successful. ## Step2:测试语言模型在线API ### 不使用自定义词典 在服务部署好之后,我们可以进行测试,用来测试的文本为`今天是个好日子`和`天气预报说今天要下雨`。 - +首先指定编码格式及引入需要的包: +```python +>>> # coding: utf8 +>>> import requests +>>> import json +``` 准备的数据格式为: ```python {"text": [text_1, text_2, ...]} @@ -46,38 +51,26 @@ Loading lac successful. # 打印预测结果 >>> print(json.dumps(r.json(), indent=4, ensure_ascii=False)) { + "msg": "", "results": [ { "tag": [ - "TIME", - "v", - "q", - "n" + "TIME", "v", "q", "n" ], "word": [ - "今天", - "是", - "个", - "好日子" + "今天", "是", "个", "好日子" ] }, { "tag": [ - "n", - "v", - "TIME", - "v", - "v" + "n", "v", "TIME", "v", "v" ], "word": [ - "天气预报", - "说", - "今天", - "要", - "下雨" + "天气预报", "说", "今天", "要", "下雨" ] } - ] + ], + "status": "0" } ``` 这样我们就完成了对词法分析的预测服务化部署和测试。 @@ -99,3 +92,58 @@ Loading lac successful. ``` 完整的测试代码见[lac_with_dict_serving_demo.py](lac_with_dict_serving_demo.py)。 + +### 客户端请求新版模型的方式 +对某些新版模型,客户端请求方式有所变化,更接近本地预测的请求方式,以降低学习成本。 +以lac(2.1.0)为例,使用上述方法进行请求将提示: +```python +{ + "Warnning": "This usage is out of date, please use 'application/json' as content-type to post to /predict/lac. See 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.6/docs/tutorial/serving.md' for more details." +} +``` +对于lac(2.1.0),请求的方式如下: +```python +# coding: utf8 +import requests +import json + +if __name__ == "__main__": + # 指定用于预测的文本并生成字典[text_1, text_2, ... ] + text = ["今天是个好日子", "天气预报说今天要下雨"] + # 以key的方式指定text传入预测方法的时的参数,此例中为"texts" + # 对应本地部署,则为lac.analysis_lexical(text=[text1, text2]) + data = {"texts": text, "batch_size": 1} + # 指定预测方法为lac并发送post请求 + url = "http://127.0.0.1:8866/predict/lac" + # 指定post请求的headers为application/json方式 + headers = {"Content-Type": "application/json"} + + r = requests.post(url=url, headers=headers, data=json.dumps(data)) + + # 打印预测结果 + print(json.dumps(r.json(), indent=4, ensure_ascii=False)) +``` +对结果的解析等与前种方式一致,显示如下: +```python +{ + "results": [ + { + "tag": [ + "TIME", "v", "q", "n" + ], + "word": [ + "今天", "是", "个", "好日子" + ] + }, + { + "tag": [ + "n", "v", "TIME", "v", "v" + ], + "word": [ + "天气预报", "说", "今天", "要", "下雨" + ] + } + ] +} +``` +此Demo的具体信息和代码请参见[LAC Serving_2.1.0](lac_2.1.0_serving_demo.py)。 diff --git a/demo/serving/module_serving/lexical_analysis_lac/lac_2.1.0_serving_demo.py b/demo/serving/module_serving/lexical_analysis_lac/lac_2.1.0_serving_demo.py index 02571ba6e8deed28a7d834f2660c79094a1a1906..05e40f8a63a68a8909da4a15018dc2fad353ee4d 100644 --- a/demo/serving/module_serving/lexical_analysis_lac/lac_2.1.0_serving_demo.py +++ b/demo/serving/module_serving/lexical_analysis_lac/lac_2.1.0_serving_demo.py @@ -4,11 +4,11 @@ import json if __name__ == "__main__": # 指定用于预测的文本并生成字典{"text": [text_1, text_2, ... ]} - text = {"text": ["今天是个好日子", "天气预报说今天要下雨"]} + text = ["今天是个好日子", "天气预报说今天要下雨"] # 以key的方式指定text传入预测方法的时的参数,此例中为"data" - # 对应本地部署,则为lac.analysis_lexical(data=text) - data = {"data": text} - # 指定预测方法为lac并发送post请求 + # 对应本地部署,则为lac.analysis_lexical(data=text, batch_size=1) + data = {"texts": text, "batch_size": 1} + # 指定预测方法为lac并发送post请求,content-type类型应指定json方式 url = "http://127.0.0.1:8866/predict/lac" # 指定post请求的headers为application/json方式 headers = {"Content-Type": "application/json"} diff --git a/demo/serving/module_serving/lexical_analysis_lac/lac_serving_demo.py b/demo/serving/module_serving/lexical_analysis_lac/lac_serving_demo.py index 58d696bc3142966264735ee2f02a66a1ae22839c..c81dce2a4c4e427f611cfdd4e975de20cc591f38 100644 --- a/demo/serving/module_serving/lexical_analysis_lac/lac_serving_demo.py +++ b/demo/serving/module_serving/lexical_analysis_lac/lac_serving_demo.py @@ -7,7 +7,7 @@ if __name__ == "__main__": text_list = ["今天是个好日子", "天气预报说今天要下雨"] text = {"text": text_list} # 指定预测方法为lac并发送post请求 - url = "http://127.0.0.1:8866/predict/text/lac" + url = "http://0.0.0.0:8866/predict/text/lac" r = requests.post(url=url, data=text) # 打印预测结果 diff --git a/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/README.md b/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/README.md index 3e93cc87c07b895cc6d5fc40343314f6109211f0..f5c552b213f435bdf88d4cb2a5353d7aceb00db8 100644 --- a/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/README.md +++ b/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/README.md @@ -24,6 +24,13 @@ Loading pyramidbox_lite_server_mask successful. 这样就完成了一个口罩检测服务化API的部署,默认端口号为8866。 ## Step2:测试图像生成在线API +首先指定编码格式及引入需要的包: +```python +>>> import requests +>>> import json +>>> import base64 +>>> import os +``` 我们用来测试的样例图片为:

@@ -56,7 +63,7 @@ files = [("image", file_1), ("image", file_2)] ```python >>> # 指定检测方法为pyramidbox_lite_server_mask并发送post请求 >>> url = "http://127.0.0.1:8866/predict/image/pyramidbox_lite_server_mask" ->>> r = requests.post(url=url, files=files) +>>> r = requests.post(url=url, files=files, data={"visual_result": "True"}) ``` 我们可以打印接口返回结果: ```python @@ -67,63 +74,79 @@ files = [("image", file_1), ("image", file_2)] "data": [ { "label": "MASK", - "left": 455.5180733203888, - "right": 658.8289226293564, - "top": 186.38022020459175, - "bottom": 442.67284870147705, - "confidence": 0.92117363 + "left": 938.8167103528976, + "right": 1126.8890985250473, + "top": 335.8177453279495, + "bottom": 586.0342741012573, + "confidence": 0.9775171 }, { - "label": "MASK", - "left": 938.9076416492462, - "right": 1121.0804233551025, - "top": 326.9856423139572, - "bottom": 586.0468536615372, - "confidence": 0.997152 + "label": "NO MASK", + "left": 1166.563014626503, + "right": 1331.2186390161514, + "top": 298.1251895427704, + "bottom": 496.373051404953, + "confidence": 0.6512484 }, { - "label": "NO MASK", - "left": 1166.189564704895, - "right": 1325.6211009025574, - "top": 295.55220007896423, - "bottom": 496.9406336545944, - "confidence": 0.9346678 + "label": "MASK", + "left": 458.2292696237564, + "right": 664.9880893230438, + "top": 179.45007160305977, + "bottom": 446.70506715774536, + "confidence": 0.98069304 } ], - "path": "", + "path": "family_mask.jpg", "id": 1 }, { "data": [ { "label": "MASK", - "left": 1346.7342281341553, - "right": 1593.7974529266357, - "top": 239.36296990513802, - "bottom": 574.6375751495361, - "confidence": 0.95378655 + "left": 1340.4194090366364, + "right": 1595.8429119586945, + "top": 251.97067219018936, + "bottom": 584.6931987404823, + "confidence": 0.9681898 }, { "label": "MASK", - "left": 840.5126552581787, - "right": 1083.8391423225403, - "top": 417.5169044137001, - "bottom": 733.8856244087219, - "confidence": 0.85434145 + "left": 839.8990581035614, + "right": 1084.293223142624, + "top": 446.8751857280731, + "bottom": 758.4936121702194, + "confidence": 0.9673422 + }, + { + "label": "NO MASK", + "left": 1145.4194769859314, + "right": 1253.0083780288696, + "top": 128.66552621126175, + "bottom": 283.0486469864845, + "confidence": 0.97426504 } ], - "path": "", + "path": "woman_mask.jpg", "id": 2 } ] ``` 根据结果可以看出准确识别了请求图片中的人脸位置及戴口罩确信度。 -pyramidbox_lite_server_mask返回的结果还包括标注检测框的图像的base64编码格式,经过转换可以得到生成图像,代码如下: +pyramidbox_lite_server_mask返回的结果还包括标注检测框的图像的base64编码格式,经过转换可以得到生成图像。 + +我们建立一个用于保存结果图片的文件夹: ```python +>>> if not os.path.exists("output"): +>>> os.mkdir("output") +``` +然后将图片数据进行解码并保存,代码如下: +```python +>>> results = eval(r.json()["results"]) >>> for item in results: -... with open(output_path, "wb") as fp: -... fp.write(base64.b64decode(item["base64"].split(',')[-1])) +>>> with open(output_path, "wb") as fp: +>>> fp.write(base64.b64decode(item["base64"].split(',')[-1])) ``` 查看指定输出文件夹,就能看到生成图像了,如图: diff --git a/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/pyramidbox_lite_server_mask_file_serving_demo.py b/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/pyramidbox_lite_server_mask_file_serving_demo.py index 9922cd7f3f4dead93b67c986d8f58ebbb6d8989a..0096d2a83b13364d9e3d3ca2f8c7a9370c481c9f 100644 --- a/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/pyramidbox_lite_server_mask_file_serving_demo.py +++ b/demo/serving/module_serving/object_detection_pyramidbox_lite_server_mask/pyramidbox_lite_server_mask_file_serving_demo.py @@ -13,12 +13,13 @@ if __name__ == "__main__": # 指定检测方法为pyramidbox_lite_server_mask并发送post请求 url = "http://127.0.0.1:8866/predict/image/pyramidbox_lite_server_mask" - r = requests.post(url=url, files=files) - - results = eval(r.json()["results"]) + r = requests.post(url=url, files=files, data={"visual_result": "True"}) + # 创建图片保存文件夹 if not os.path.exists("output"): os.mkdir("output") + + results = eval(r.json()["results"]) for item in results: with open(os.path.join("output", item["path"]), "wb") as fp: fp.write(base64.b64decode(item["base64"].split(',')[-1])) diff --git a/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/README.md b/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/README.md index 68d8a7bbe481f98f4cf6d8a12e7d8c08aa363498..7893fa7b97f94ec5362034866440c263893afc29 100644 --- a/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/README.md +++ b/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/README.md @@ -18,6 +18,13 @@ Loading yolov3_darknet53_coco2017 successful. 这样就完成了一个图像生成服务化API的部署,默认端口号为8866。 ## Step2:测试图像生成在线API +首先引入需要的包: +```python +>>> import requests +>>> import json +>>> import base64 +>>> import os +``` 我们用来测试的样例图片为:

@@ -95,8 +102,15 @@ files = [("image", file_1), ("image", file_2)] ``` 根据结果可以看出准确识别了请求的图片。 -yolov3_darknet53_coco2017返回的结果还包括标注检测框的图像的base64编码格式,经过转换可以得到生成图像,代码如下: +yolov3_darknet53_coco2017返回的结果还包括标注检测框的图像的base64编码格式,经过转换可以得到生成图像。 +我们创建一个保存结果图片的文件夹: ```python +>>> if not os.path.exists("output"): +>>> os.mkdir("output") +``` +然后将图片数据进行解码并保存,代码如下: +```python +>>> results = eval(r.json()["results"]) >>> for item in results: ... with open(output_path, "wb") as fp: ... fp.write(base64.b64decode(item["base64"].split(',')[-1])) diff --git a/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py b/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py index a653cbc1ea3aedbb8484a0f0f39d13a8c0aa8a78..cf7fad91f5af96002fd5c2d89bf8acf4404acfe8 100644 --- a/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py +++ b/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py @@ -14,11 +14,11 @@ if __name__ == "__main__": url = "http://127.0.0.1:8866/predict/image/yolov3_darknet53_coco2017" r = requests.post(url=url, files=files) - results = eval(r.json()["results"]) - # 保存检测生成的图片到output文件夹,打印模型输出结果 if not os.path.exists("output"): os.mkdir("output") + + results = eval(r.json()["results"]) for item in results: with open(os.path.join("output", item["path"]), "wb") as fp: fp.write(base64.b64decode(item["base64"].split(',')[-1])) diff --git a/demo/serving/module_serving/semantic_model_simnet_bow/README.md b/demo/serving/module_serving/semantic_model_simnet_bow/README.md index c55111c84f531731e312cf7c97173192744061d3..636bdd00265d3d278ac3daf272c91f1b6cd8a2ef 100644 --- a/demo/serving/module_serving/semantic_model_simnet_bow/README.md +++ b/demo/serving/module_serving/semantic_model_simnet_bow/README.md @@ -20,6 +20,12 @@ Loading lac successful. ## Step2:测试语义模型在线API 在服务部署好之后,我们可以进行测试,用来测试的文本对分别为`[这道题太难了:这道题是上一年的考题], [这道题太难了:这道题不简单], [这道题太难了:这道题很有意思]`。 +首先指定编码格式及引入需要的包: +```python +>>> # coding: utf8 +>>> import requests +>>> import json +``` 准备的数据格式为: ```python {"text_1": [text_a1, text_a2, ... ], "text_2": [text_b1, text_b2, ... ]} @@ -48,6 +54,7 @@ Loading lac successful. # 打印预测结果 >>> print(json.dumps(r.json(), indent=4, ensure_ascii=False)) { + "msg": "", "results": [ { "similarity": 0.8445, @@ -64,7 +71,8 @@ Loading lac successful. "text_1": "这道题太难了", "text_2": "这道题很有意思" } - ] + ], + "status": "0" } ``` 这样我们就完成了对语义模型simnet_bow的预测服务化部署和测试。 diff --git a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/README.md b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/README.md index 21294670b59cfd15fa4d761c932e2e23d5ecdd62..1b5a2aabffb04a2729295c20d83ab55d912baeef 100644 --- a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/README.md +++ b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/README.md @@ -18,6 +18,14 @@ Loading deeplabv3p_xception65_humanseg successful. 这样就完成了一个图像分割服务化API的部署,默认端口号为8866。 ## Step2:测试图像分割在线API +首先指定编码格式及引入需要的包: +```python +>>> # coding: utf8 +>>> import requests +>>> import json +>>> import base64 +>>> import os +``` 我们用来测试的样例图片为:

@@ -51,15 +59,21 @@ files = [("image", file_1), ("image", file_2)] >>> results = eval(r.json()["results"]) >>> print(json.dumps(results, indent=4, ensure_ascii=False)) [ - { - "origin": "girl.jpg", - "processed": "humanseg_output/girl.png" - } + { + "origin": "girl.jpg", + "processed": "humanseg_output/girl.png" + } ] ``` - -deeplabv3p_xception65_humanseg返回的结果还包括人像分割后的图像的base64编码格式,经过转换可以得到生成图像,代码如下: +deeplabv3p_xception65_humanseg返回的结果还包括人像分割后的图像的base64编码格式,经过转换可以得到生成图像。 +我们建立一个文件夹用于存放结果图片: ```python +>>> if not os.path.exists("output"): +>>> os.mkdir("output") +``` +然后将图片数据解码并保存,代码如下: +```python +>>> results = eval(r.json()["results"]) >>> for item in results: ... with open(output_path, "wb") as fp: ... fp.write(base64.b64decode(item["base64"].split(',')[-1])) diff --git a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py index 96b201de4eaac6c6e931517291b98ab9960d171b..77b80754d1e3e9164250483adfa432a6b6595fb7 100644 --- a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py +++ b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py @@ -12,11 +12,11 @@ if __name__ == "__main__": url = "http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg" r = requests.post(url=url, files=files) - results = eval(r.json()["results"]) - # 保存分割后的图片到output文件夹,打印模型输出结果 if not os.path.exists("output"): os.mkdir("output") + + results = eval(r.json()["results"]) for item in results: with open( os.path.join("output", item["processed"].split("/")[-1]), diff --git a/demo/serving/module_serving/sentiment_analysis_senta_lstm/README.md b/demo/serving/module_serving/sentiment_analysis_senta_lstm/README.md index ceb1634a06080514789ae37c979cd8ab742fc737..62c9c5c2f13c4a880ef2065b69a455ebec93b003 100644 --- a/demo/serving/module_serving/sentiment_analysis_senta_lstm/README.md +++ b/demo/serving/module_serving/sentiment_analysis_senta_lstm/README.md @@ -19,9 +19,16 @@ Loading senta_lstm successful. 这样就完成了一个词法分析服务化API的部署,默认端口号为8866。 ## Step2:测试词法分析在线API -在服务部署好之后,我们可以进行测试,用来测试的文本为`我不爱吃甜食`和`我喜欢躺在床上看电影`。 +在服务部署好之后,我们可以进行测试。 -准备的数据格式为: +首先指定编码格式及引入需要的包: +```python +>>> # coding: utf8 +>>> import requests +>>> import json +``` + +用来测试的文本为`我不爱吃甜食`和`我喜欢躺在床上看电影`,准备的数据格式为: ```python {"text": [text_1, text_2, ...]} ``` @@ -37,47 +44,33 @@ Loading senta_lstm successful. ## Step3:获取并验证结果 接下来发送请求到词法分析API,并得到结果,代码如下: ```python -# 指定预测方法为lac并发送post请求 +# 指定预测方法为senta_lstm并发送post请求 >>> url = "http://127.0.0.1:8866/predict/text/senta_lstm" >>> r = requests.post(url=url, data=text) ``` -`LAC`模型返回的结果为每个文本分词后的结果,我们尝试打印接口返回结果: +我们尝试打印接口返回结果: ```python # 打印预测结果 >>> print(json.dumps(r.json(), indent=4, ensure_ascii=False)) { + "msg": "", "results": [ { - "tag": [ - "TIME", - "v", - "q", - "n" - ], - "word": [ - "今天", - "是", - "个", - "好日子" - ] + "negative_probs": 0.7079, + "positive_probs": 0.2921, + "sentiment_key": "negative", + "sentiment_label": 0, + "text": "我不爱吃甜食" }, { - "tag": [ - "n", - "v", - "TIME", - "v", - "v" - ], - "word": [ - "天气预报", - "说", - "今天", - "要", - "下雨" - ] + "negative_probs": 0.0149, + "positive_probs": 0.9851, + "sentiment_key": "positive", + "sentiment_label": 1, + "text": "我喜欢躺在床上看电影" } - ] + ], + "status": "0" } ``` 这样我们就完成了对词法分析的预测服务化部署和测试。 diff --git a/demo/serving/module_serving/text_censorship_porn_detection_lstm/README.md b/demo/serving/module_serving/text_censorship_porn_detection_lstm/README.md index acc4d144b79c2195c084d1c0cbcb81e6259dd5c6..ea0a3135fe3cd301be6e129b3a2af8071c929f8b 100644 --- a/demo/serving/module_serving/text_censorship_porn_detection_lstm/README.md +++ b/demo/serving/module_serving/text_censorship_porn_detection_lstm/README.md @@ -19,9 +19,16 @@ Loading porn_detection_lstm successful. 这样就完成了一个文本审核服务化API的部署,默认端口号为8866。 ## Step2:测试文本审核在线API -在服务部署好之后,我们可以进行测试,用来测试的文本为`黄片下载`和`中国黄页`。 +在服务部署好之后,我们可以进行测试。 -准备的数据格式为: +首先指定编码格式及引入需要的包: +```python +>>> # coding: utf8 +>>> import requests +>>> import json +``` + +用来测试的文本为`黄片下载`和`中国黄页`,准备的数据格式为: ```python {"text": [text_1, text_2, ...]} ``` @@ -45,6 +52,7 @@ Loading porn_detection_lstm successful. # 打印预测结果 >>> print(json.dumps(r.json(), indent=4, ensure_ascii=False)) { + "msg": "", "results": [ { "not_porn_probs": 0.0121, @@ -60,7 +68,8 @@ Loading porn_detection_lstm successful. "porn_probs": 0.0046, "text": "中国黄页" } - ] + ], + "status": "0" } ``` 可以看出正确得到了两个文本的预测结果。 diff --git a/docs/tutorial/cmdintro.md b/docs/tutorial/cmdintro.md index e307febc4cecc27781a431e28a97f441274ac5a0..0fedbb4c4ad55d4a1d8df3ac7c6483f206477ef4 100644 --- a/docs/tutorial/cmdintro.md +++ b/docs/tutorial/cmdintro.md @@ -79,14 +79,14 @@ PaddleHub在使用过程中会产生一些缓存数据,这部分数据默认 ## `config` -用于查看和设置paddlehub相关设置,包括对server地址、日志级别的设置 +用于查看和设置paddlehub相关设置,包括对server地址、日志级别的设置: `示例` * `hub config`: 显示当前paddlehub的设置 * `hub config reset`: 恢复当前paddlehub的设置为默认设置 -* `hub config server==[address]`: 设置当前server地址为[address] +* `hub config server==[address]`: 设置当前paddlehub-server地址为[address],paddlehub客户端从此地址获取模型信息 * `hub config log==[level]`: 设置当前日志级别为[level], 可选值为critical, error, warning, info, debug, nolog, 从左到右优先级从高到低,nolog表示不显示日志信息 diff --git a/docs/tutorial/serving.md b/docs/tutorial/serving.md index a5ad0935e863f19832ec615a35a71c0b688fc7e4..14e505b7b53b0d1aa226be5abfe1ca6a465d1843 100644 --- a/docs/tutorial/serving.md +++ b/docs/tutorial/serving.md @@ -31,7 +31,7 @@ $ hub serving start --modules [Module1==Version1, Module2==Version2, ...] \ |--use_gpu|使用GPU进行预测,必须安装paddlepaddle-gpu| |--use_multiprocess|是否启用并发方式,默认为单进程方式,推荐多核CPU机器使用此方式
*`Windows操作系统只支持单进程方式`*| |--workers|在并发方式下指定的并发任务数,默认为`2*cpu_count-1`,其中`cpu_count`为CPU核数| - +**NOTE:** --use_gpu不可与--use_multiprocess共用。 #### 配置文件启动 启动命令 ```shell @@ -249,7 +249,7 @@ $ PaddleHub Serving will stop.   该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。 -* [中文情感分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow) +* [中文情感分析-基于senta_lstm](../../demo/serving/module_serving/sentiment_analysis_senta_lstm)   该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。 @@ -269,10 +269,10 @@ import json if __name__ == "__main__": # 指定用于预测的文本并生成字典{"text": [text_1, text_2, ... ]} - text = {"text": ["今天是个好日子", "天气预报说今天要下雨"]} + text = ["今天是个好日子", "天气预报说今天要下雨"] # 以key的方式指定text传入预测方法的时的参数,此例中为"data" - # 对应本地部署,则为lac.analysis_lexical(data=text) - data = {"data": text} + # 对应本地部署,则为lac.analysis_lexical(texts=[text1, text2]) + data = {"texts": text, "batch_size": 2} # 指定预测方法为lac并发送post请求 url = "http://127.0.0.1:8866/predict/lac" # 指定post请求的headers为application/json方式 @@ -283,29 +283,7 @@ if __name__ == "__main__": # 打印预测结果 print(json.dumps(r.json(), indent=4, ensure_ascii=False)) ``` -对结果的解析等与前种方式一致,显示如下: -```python -{ - "results": [ - { - "tag": [ - "TIME", "v", "q", "n" - ], - "word": [ - "今天", "是", "个", "好日子" - ] - }, - { - "tag": [ - "n", "v", "TIME", "v", "v" - ], - "word": [ - "天气预报", "说", "今天", "要", "下雨" - ] - } - ] -} -``` + 此Demo的具体信息和代码请参见[LAC Serving_2.1.0](../../demo/serving/module_serving/lexical_analysis_lac/lac_2.1.0_serving_demo.py)。 ## Bert Service diff --git a/paddlehub/commands/serving.py b/paddlehub/commands/serving.py index 5841277ac46252bc96ff945ce40f925c683664bb..c7c5b868df1b39a1f673c1a67608a28d7a932035 100644 --- a/paddlehub/commands/serving.py +++ b/paddlehub/commands/serving.py @@ -313,7 +313,7 @@ class ServingCommand(BaseCommand): with open(self.args.config, "r") as fp: self.args.config = json.load(fp) self.modules_info = self.args.config["modules_info"] - if isinstance(self.module_info, list): + if isinstance(self.modules_info, list): raise RuntimeError( "This configuration method is outdated, see 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.6/docs/tutorial/serving.md' for more details." ) @@ -334,9 +334,7 @@ class ServingCommand(BaseCommand): "init_args": { "version": version }, - "predict_args": { - "use_gpu": self.args.use_gpu - } + "predict_args": {} } }) @@ -370,7 +368,7 @@ class ServingCommand(BaseCommand): if self.args.use_multiprocess is True: self.start_app_with_args(self.args.workers) else: - self.start_app_with_args(1) + self.start_single_app_with_args() @staticmethod def show_help(): diff --git a/paddlehub/serving/app_single.py b/paddlehub/serving/app_single.py index d01fe4ab5e22d962decc5d3e8bd3ef11076c460c..f9e308ef16b2f5f0b5024a449691ec964b78e4d8 100644 --- a/paddlehub/serving/app_single.py +++ b/paddlehub/serving/app_single.py @@ -23,6 +23,10 @@ import logging import glob +def gen_result(status, msg, data): + return {"status": status, "msg": msg, "results": data} + + def predict_v2(module_info, input): serving_method_name = module_info["method_name"] serving_method = getattr(module_info["module"], serving_method_name) @@ -32,8 +36,13 @@ def predict_v2(module_info, input): for item in serving_method.__code__.co_varnames: if item in module_info.keys(): predict_args.update({item: module_info[item]}) - output = serving_method(**predict_args) - return {"results": output} + try: + output = serving_method(**predict_args) + except Exception as err: + curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) + print(curr, " - ", err) + return gen_result("-1", "Please check data format!", "") + return gen_result("0", "", output) def predict_v2_advanced(module_info, input): @@ -45,16 +54,21 @@ def predict_v2_advanced(module_info, input): for item in serving_method.__code__.co_varnames: if item in module_info.keys(): predict_args.update({item: module_info[item]}) - - output = serving_method(**predict_args) - return {"results": output} + try: + output = serving_method(**predict_args) + except Exception as err: + curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) + print(curr, " - ", err) + return gen_result("-1", "Please check data format!", "") + return gen_result("0", "", output) def predict_nlp(module_info, input_text, req_id, extra=None): method_name = module_info["method_name"] predict_method = getattr(module_info["module"], method_name) - predict_args = {"data": input_text} + predict_args = module_info["predict_args"].copy() + predict_args.update({"data": input_text}) if isinstance(predict_method, functools.partial): predict_method = predict_method.func predict_args.update({"sign_name": method_name}) @@ -71,20 +85,22 @@ def predict_nlp(module_info, input_text, req_id, extra=None): except Exception as err: curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(curr, " - ", err) - return {"results": "Please check data format!"} + return gen_result("-1", "Please check data format!", "") finally: user_dict = extra.get("user_dict", []) for item in user_dict: if os.path.exists(item): os.remove(item) - return {"results": res} + return gen_result("0", "", res) def predict_classification(module_info, input_img, id, extra={}): method_name = module_info["method_name"] module = module_info["module"] predict_method = getattr(module, method_name) - predict_args = {"data": {"image": input_img}} + + predict_args = module_info["predict_args"].copy() + predict_args.update({"data": {"image": input_img}}) if isinstance(predict_method, functools.partial): predict_method = predict_method.func predict_args.update({"sign_name": method_name}) @@ -96,19 +112,21 @@ def predict_classification(module_info, input_img, id, extra={}): except Exception as err: curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(curr, " - ", err) - return {"result": "Please check data format!"} + return gen_result("-1", "Please check data format!", "") finally: for item in input_img: if os.path.exists(item): os.remove(item) - return results + return gen_result("0", "", str(results)) def predict_gan(module_info, input_img, id, extra={}): method_name = module_info["method_name"] module = module_info["module"] predict_method = getattr(module, method_name) - predict_args = {"data": {"image": input_img}} + + predict_args = module_info["predict_args"].copy() + predict_args.update({"data": {"image": input_img}}) predict_args["data"].update(extra) if isinstance(predict_method, functools.partial): predict_method = predict_method.func @@ -122,7 +140,7 @@ def predict_gan(module_info, input_img, id, extra={}): except Exception as err: curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(curr, " - ", err) - return {"result": "Please check data format!"} + return gen_result("-1", "Please check data format!", "") finally: base64_list = [] results_pack = [] @@ -141,10 +159,10 @@ def predict_gan(module_info, input_img, id, extra={}): results_pack.append(results[index]) os.remove(item) os.remove(output_file) - return results_pack + return gen_result("0", "", str(results_pack)) -def predict_mask(module_info, input_img, id, extra=None, r_img=True): +def predict_mask(module_info, input_img, id, extra=None, r_img=False): output_folder = "detection_result" method_name = module_info["method_name"] module = module_info["module"] @@ -156,8 +174,10 @@ def predict_mask(module_info, input_img, id, extra=None, r_img=True): data.update(input_img) if extra is not None: data.update(extra) - r_img = True if "r_img" in extra.keys() else False - predict_args = {"data": data} + r_img = True if "visual_result" in extra.keys() else False + + predict_args = module_info["predict_args"].copy() + predict_args.update({"data": data}) if isinstance(predict_method, functools.partial): predict_method = predict_method.func predict_args.update({"sign_name": method_name}) @@ -170,7 +190,7 @@ def predict_mask(module_info, input_img, id, extra=None, r_img=True): except Exception as err: curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(curr, " - ", err) - return {"result": "Please check data format!"} + return gen_result("-1", "Please check data format!", "") finally: base64_list = [] results_pack = [] @@ -212,7 +232,7 @@ def predict_mask(module_info, input_img, id, extra=None, r_img=True): else: results_pack = results - return results_pack + return gen_result("0", "", str(results_pack)) def predict_object_detection(module_info, input_img, id, extra={}): @@ -220,7 +240,10 @@ def predict_object_detection(module_info, input_img, id, extra={}): method_name = module_info["method_name"] module = module_info["module"] predict_method = getattr(module, method_name) - predict_args = {"data": {"image": input_img}} + + predict_args = module_info["predict_args"].copy() + predict_args.update({"data": {"image": input_img}}) + if isinstance(predict_method, functools.partial): predict_method = predict_method.func predict_args.update({"sign_name": method_name}) @@ -232,7 +255,7 @@ def predict_object_detection(module_info, input_img, id, extra={}): except Exception as err: curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(curr, " - ", err) - return {"result": "Please check data format!"} + return gen_result("-1", "Please check data format!", "") finally: base64_list = [] results_pack = [] @@ -250,14 +273,17 @@ def predict_object_detection(module_info, input_img, id, extra={}): results_pack.append(results[index]) os.remove(item) os.remove(os.path.join(output_folder, item)) - return results_pack + return gen_result("0", "", str(results_pack)) def predict_semantic_segmentation(module_info, input_img, id, extra={}): method_name = module_info["method_name"] module = module_info["module"] predict_method = getattr(module, method_name) - predict_args = {"data": {"image": input_img}} + + predict_args = module_info["predict_args"].copy() + predict_args.update({"data": {"image": input_img}}) + if isinstance(predict_method, functools.partial): predict_method = predict_method.func predict_args.update({"sign_name": method_name}) @@ -269,7 +295,7 @@ def predict_semantic_segmentation(module_info, input_img, id, extra={}): except Exception as err: curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) print(curr, " - ", err) - return {"result": "Please check data format!"} + return gen_result("-1", "Please check data format!", "") finally: base64_list = [] results_pack = [] @@ -291,7 +317,7 @@ def predict_semantic_segmentation(module_info, input_img, id, extra={}): os.remove(item) if output_file_path != "": os.remove(output_file_path) - return results_pack + return gen_result("0", "", str(results_pack)) def create_app(init_flag=False, configs=None): @@ -342,10 +368,10 @@ def create_app(init_flag=False, configs=None): "use 'application/json' as " "content-type to post to " "/predict/%s. See " - "'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.5/docs/tutorial/serving.md' for more details." + "'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.6/docs/tutorial/serving.md' for more details." % (module_name) }) - return results + return gen_result("-1", results, "") req_id = request.data.get("id") img_base64 = request.form.getlist("image") extra_info = {} @@ -364,7 +390,7 @@ def create_app(init_flag=False, configs=None): for item in img_base64: ext = item.split(";")[0].split("/")[-1] if ext not in ["jpeg", "jpg", "png"]: - return {"result": "Unrecognized file type"} + return gen_result("-1", "Unrecognized file type", "") filename = req_id + "_" \ + utils.md5(str(time.time()) + item[0:20]) \ + "." \ @@ -379,9 +405,7 @@ def create_app(init_flag=False, configs=None): file_name = req_id + "_" + item.filename item.save(file_name) file_name_list.append(file_name) - # module = default_module_manager.get_module(module_name) - # predict_func_name = cv_module_info.get_module_info(module_name)[ - # "method_name"] + module = module_info["module"] predict_func_name = cv_module_info.cv_module_method.get(module_name, "") if predict_func_name != "": @@ -394,8 +418,8 @@ def create_app(init_flag=False, configs=None): if extra_info == {}: extra_info = None results = predict_func(module_info, file_name_list, req_id, extra_info) - r = {"results": str(results)} - return r + + return results @app_instance.route("/predict/text/", methods=["POST"]) def predict_text(module_name): @@ -403,18 +427,9 @@ def create_app(init_flag=False, configs=None): return {"error": "Module {} is not available.".format(module_name)} module_info = nlp_module_info.get_module_info(module_name) if module_info["code_version"] == "v2": - results = {} - # results = predict_v2(module_info, inputs) - results.update({ - "Warnning": - "This usage is out of date, please " - "use 'application/json' as " - "content-type to post to " - "/predict/%s. See " - "'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.5/docs/tutorial/serving.md' for more details." - % (module_name) - }) - return results + results = "This usage is out of date, please use 'application/json' as content-type to post to /predict/%s. See 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.5/docs/tutorial/serving.md' for more details." % ( + module_name) + return gen_result("-1", results, "") req_id = request.data.get("id") inputs = {} for item in list(request.form.keys()): @@ -441,8 +456,14 @@ def create_app(init_flag=False, configs=None): elif module_name in cv_module_info.cv_modules: module_info = cv_module_info.get_module_info(module_name) else: - return {"Error": "Module {} is not available.".format(module_name)} + msg = "Module {} is not available.".format(module_name) + return gen_result("-1", msg, "") inputs = request.json + if inputs is None: + results = "This usage is out of date, please use 'application/json' as content-type to post to /predict/%s. See 'https://github.com/PaddlePaddle/PaddleHub/blob/release/v1.5/docs/tutorial/serving.md' for more details." % ( + module_name) + return gen_result("-1", results, "") + results = predict_v2_advanced(module_info, inputs) return results