提交 427e3f20 编写于 作者: 走神的阿圆's avatar 走神的阿圆

update serving add no-file

上级 a2effc2a
...@@ -6,7 +6,7 @@ import os ...@@ -6,7 +6,7 @@ import os
if __name__ == "__main__": if __name__ == "__main__":
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ] # 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list = ["../img/man.png"] file_list = ["../../../../docs/imgs/man.png"]
files = [("image", (open(item, "rb"))) for item in file_list] files = [("image", (open(item, "rb"))) for item in file_list]
# 为每张图片对应指定info和style # 为每张图片对应指定info和style
data = {"info": ["Male,Black_Hair"], "style": ["Bald"]} data = {"info": ["Male,Black_Hair"], "style": ["Bald"]}
......
...@@ -4,7 +4,9 @@ import json ...@@ -4,7 +4,9 @@ import json
if __name__ == "__main__": if __name__ == "__main__":
# 指定要预测的图片并生成列表[("image", img_1), ("image", img_2), ... ] # 指定要预测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
file_list = ["../img/cat.jpg", "../img/flower.jpg"] file_list = [
"../../../../docs/imgs/cat.jpg", "../../../../docs/imgs/flower.jpg"
]
files = [("image", (open(item, "rb"))) for item in file_list] files = [("image", (open(item, "rb"))) for item in file_list]
# 指定预测方法为vgg11_imagenet并发送post请求 # 指定预测方法为vgg11_imagenet并发送post请求
url = "http://127.0.0.1:8866/predict/image/vgg11_imagenet" url = "http://127.0.0.1:8866/predict/image/vgg11_imagenet"
......
...@@ -6,7 +6,9 @@ import os ...@@ -6,7 +6,9 @@ import os
if __name__ == "__main__": if __name__ == "__main__":
# 指定要检测的图片并生成列表[("image", img_1), ("image", img_2), ... ] # 指定要检测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
file_list = ["../img/cat.jpg", "../img/dog.jpg"] file_list = [
"../../../../docs/imgs/cat.jpg", "../../../../docs/imgs/dog.jpg"
]
files = [("image", (open(item, "rb"))) for item in file_list] files = [("image", (open(item, "rb"))) for item in file_list]
# 指定检测方法为yolov3_coco2017并发送post请求 # 指定检测方法为yolov3_coco2017并发送post请求
url = "http://127.0.0.1:8866/predict/image/yolov3_darknet53_coco2017" url = "http://127.0.0.1:8866/predict/image/yolov3_darknet53_coco2017"
......
...@@ -6,7 +6,7 @@ import os ...@@ -6,7 +6,7 @@ import os
if __name__ == "__main__": if __name__ == "__main__":
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ] # 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list = ["../img/girl.jpg"] file_list = ["../../../../docs/imgs/girl.jpg"]
files = [("image", (open(item, "rb"))) for item in file_list] files = [("image", (open(item, "rb"))) for item in file_list]
# 指定图片分割方法为deeplabv3p_xception65_humanseg并发送post请求 # 指定图片分割方法为deeplabv3p_xception65_humanseg并发送post请求
url = "http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg" url = "http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg"
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
<div align="center"> <div align="center">
<img src="../docs/imgs/bs.png" aligh="center" width="100%" alt="BS流程图" /> <img src="../imgs/bs.png" aligh="center" width="100%" alt="BS流程图" />
</div> </div>
...@@ -203,7 +203,7 @@ result = bc.get_result(input_text=input_text) ...@@ -203,7 +203,7 @@ result = bc.get_result(input_text=input_text)
```python ```python
[[0.9993321895599361, 0.9994612336158751, 0.9999646544456481, 0.732795298099517, -0.34387934207916204, ... ]] [[0.9993321895599361, 0.9994612336158751, 0.9999646544456481, 0.732795298099517, -0.34387934207916204, ... ]]
``` ```
客户端代码demo文件见[示例](../demo/serving/bert_service/bert_service_client.py) 客户端代码demo文件见[示例](../../demo/serving/bert_service/bert_service_client.py)
运行命令如下: 运行命令如下:
```shell ```shell
$ python bert_service_client.py $ python bert_service_client.py
......
...@@ -81,7 +81,7 @@ http://0.0.0.0:8866/predict/<CATEGORY\>/\<MODULE> ...@@ -81,7 +81,7 @@ http://0.0.0.0:8866/predict/<CATEGORY\>/\<MODULE>
<p align="center"> <p align="center">
<img src="../docs/imgs/web_demo.png" width="60%" /> <img src="../imgs/web_demo.png" width="60%" />
</p> </p>
...@@ -117,7 +117,7 @@ $ hub serving start -c serving_config.json ...@@ -117,7 +117,7 @@ $ hub serving start -c serving_config.json
<p align="center"> <p align="center">
<img src="../docs/imgs/start_serving_lac.png" width="100%" /> <img src="../imgs/start_serving_lac.png" width="100%" />
</p> </p>
...@@ -171,41 +171,41 @@ if __name__ == "__main__": ...@@ -171,41 +171,41 @@ if __name__ == "__main__":
} }
``` ```
此Demo的具体信息和代码请参见[LAC Serving](../demo/serving/module_serving/lexical_analysis_lac)。另外,下面展示了一些其他的一键服务部署Demo。 此Demo的具体信息和代码请参见[LAC Serving](../../demo/serving/module_serving/lexical_analysis_lac)。另外,下面展示了一些其他的一键服务部署Demo。
## Demo——其他模型的一键部署服务 ## Demo——其他模型的一键部署服务
获取其他PaddleHub Serving的一键服务部署场景示例,可参见下列demo 获取其他PaddleHub Serving的一键服务部署场景示例,可参见下列demo
* [图像分类-基于vgg11_imagent](../demo/serving/module_serving/classification_vgg11_imagenet) * [图像分类-基于vgg11_imagent](../../demo/serving/module_serving/classification_vgg11_imagenet)
&emsp;&emsp;该示例展示了利用vgg11_imagent完成图像分类服务化部署和在线预测,获取图像分类结果。 &emsp;&emsp;该示例展示了利用vgg11_imagent完成图像分类服务化部署和在线预测,获取图像分类结果。
* [图像生成-基于stgan_celeba](../demo/serving/module_serving/GAN_stgan_celeba) * [图像生成-基于stgan_celeba](../../demo/serving/module_serving/GAN_stgan_celeba)
&emsp;&emsp;该示例展示了利用stgan_celeba生成图像服务化部署和在线预测,获取指定风格的生成图像。 &emsp;&emsp;该示例展示了利用stgan_celeba生成图像服务化部署和在线预测,获取指定风格的生成图像。
* [文本审核-基于porn_detection_lstm](../demo/serving/module_serving/text_censorship_porn_detection_lstm) * [文本审核-基于porn_detection_lstm](../../demo/serving/module_serving/text_censorship_porn_detection_lstm)
&emsp;&emsp;该示例展示了利用porn_detection_lstm完成中文文本黄色敏感信息鉴定的服务化部署和在线预测,获取文本是否敏感及其置信度。 &emsp;&emsp;该示例展示了利用porn_detection_lstm完成中文文本黄色敏感信息鉴定的服务化部署和在线预测,获取文本是否敏感及其置信度。
* [中文词法分析-基于lac](../demo/serving/module_serving/lexical_analysis_lac) * [中文词法分析-基于lac](../../demo/serving/module_serving/lexical_analysis_lac)
&emsp;&emsp;该示例展示了利用lac完成中文文本分词服务化部署和在线预测,获取文本的分词结果,并可通过用户自定义词典干预分词结果。 &emsp;&emsp;该示例展示了利用lac完成中文文本分词服务化部署和在线预测,获取文本的分词结果,并可通过用户自定义词典干预分词结果。
* [目标检测-基于yolov3_darknet53_coco2017](../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017) * [目标检测-基于yolov3_darknet53_coco2017](../../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017)
&emsp;&emsp;该示例展示了利用yolov3_darknet53_coco2017完成目标检测服务化部署和在线预测,获取检测结果和覆盖识别框的图片。 &emsp;&emsp;该示例展示了利用yolov3_darknet53_coco2017完成目标检测服务化部署和在线预测,获取检测结果和覆盖识别框的图片。
* [中文语义分析-基于simnet_bow](../demo/serving/module_serving/semantic_model_simnet_bow) * [中文语义分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow)
&emsp;&emsp;该示例展示了利用simnet_bow完成中文文本相似度检测服务化部署和在线预测,获取文本的相似程度。 &emsp;&emsp;该示例展示了利用simnet_bow完成中文文本相似度检测服务化部署和在线预测,获取文本的相似程度。
* [图像分割-基于deeplabv3p_xception65_humanseg](../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg) * [图像分割-基于deeplabv3p_xception65_humanseg](../../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg)
&emsp;&emsp;该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。 &emsp;&emsp;该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。
* [中文情感分析-基于simnet_bow](../demo/serving/module_serving/semantic_model_simnet_bow) * [中文情感分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow)
&emsp;&emsp;该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。 &emsp;&emsp;该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。
......
...@@ -103,6 +103,8 @@ class ServingCommand(BaseCommand): ...@@ -103,6 +103,8 @@ class ServingCommand(BaseCommand):
self.parser.add_argument("--config", "-c", nargs="?") self.parser.add_argument("--config", "-c", nargs="?")
self.parser.add_argument("--port", "-p", nargs="?", default=8866) self.parser.add_argument("--port", "-p", nargs="?", default=8866)
self.parser.add_argument("--gpu", "-i", nargs="?", default=0) self.parser.add_argument("--gpu", "-i", nargs="?", default=0)
self.parser.add_argument(
"--use_singleprocess", action="store_true", default=False)
def dump_pid_file(self): def dump_pid_file(self):
pid = os.getpid() pid = os.getpid()
...@@ -336,12 +338,13 @@ class ServingCommand(BaseCommand): ...@@ -336,12 +338,13 @@ class ServingCommand(BaseCommand):
def start_serving(self): def start_serving(self):
config_file = self.args.config config_file = self.args.config
single_mode = self.args.use_singleprocess
if config_file is not None: if config_file is not None:
if os.path.exists(config_file): if os.path.exists(config_file):
with open(config_file, "r") as fp: with open(config_file, "r") as fp:
configs = json.load(fp) configs = json.load(fp)
use_multiprocess = configs.get("use_multiprocess", False) use_multiprocess = configs.get("use_multiprocess", False)
if platform.system() == "Windows": if single_mode is True or platform.system() == "Windows":
print( print(
"Warning: Windows cannot use multiprocess working " "Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode" "mode, PaddleHub Serving will switch to single process mode"
...@@ -357,7 +360,7 @@ class ServingCommand(BaseCommand): ...@@ -357,7 +360,7 @@ class ServingCommand(BaseCommand):
else: else:
print("config_file ", config_file, "not exists.") print("config_file ", config_file, "not exists.")
else: else:
if platform.system() == "Windows": if single_mode is True or platform.system() == "Windows":
print( print(
"Warning: Windows cannot use multiprocess working " "Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode" "mode, PaddleHub Serving will switch to single process mode"
......
...@@ -22,9 +22,12 @@ import os ...@@ -22,9 +22,12 @@ import os
import multiprocessing import multiprocessing
import hashlib import hashlib
import platform import platform
import base64
import paddle.fluid as fluid import paddle.fluid as fluid
import six import six
import numpy as np
import cv2
from paddlehub.module import module_desc_pb2 from paddlehub.module import module_desc_pb2
from paddlehub.common.logger import logger from paddlehub.common.logger import logger
...@@ -51,6 +54,42 @@ def version_compare(version1, version2): ...@@ -51,6 +54,42 @@ def version_compare(version1, version2):
return len(version1) > len(version2) return len(version1) > len(version2)
def base64s_to_cvmats(base64s):
for index, value in enumerate(base64s):
value = bytes(value, encoding="utf8")
value = base64.b64decode(value)
value = np.fromstring(value, np.uint8)
value = cv2.imdecode(value, 1)
base64s[index] = value
return base64s
def handle_mask_results(results):
result = []
if len(results) <= 0:
return results
_id = results[0]["id"]
_item = {
"data": [],
"path": results[0].get("path", ""),
"id": results[0]["id"]
}
for item in results:
if item["id"] == _id:
_item["data"].append(item["data"])
else:
result.append(_item)
_id = _id + 1
_item = {
"data": [item["data"]],
"path": item.get("path", ""),
"id": item.get("id", _id)
}
result.append(_item)
return result
def get_platform(): def get_platform():
return platform.platform() return platform.platform()
......
...@@ -18,6 +18,7 @@ import time ...@@ -18,6 +18,7 @@ import time
import os import os
import base64 import base64
import logging import logging
import shutil
cv_module_method = { cv_module_method = {
"vgg19_imagenet": "predict_classification", "vgg19_imagenet": "predict_classification",
...@@ -47,7 +48,9 @@ cv_module_method = { ...@@ -47,7 +48,9 @@ cv_module_method = {
"faster_rcnn_coco2017": "predict_object_detection", "faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan", "cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation", "deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
"ace2p": "predict_semantic_segmentation" "ace2p": "predict_semantic_segmentation",
"pyramidbox_lite_server_mask": "predict_mask",
"pyramidbox_lite_mobile_mask": "predict_mask"
} }
...@@ -132,6 +135,59 @@ def predict_gan(module, input_img, id, batch_size, extra={}): ...@@ -132,6 +135,59 @@ def predict_gan(module, input_img, id, batch_size, extra={}):
return results_pack return results_pack
def predict_mask(module, input_img, id, batch_size, extra=None, r_img=False):
output_folder = "detection_result"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
data = {}
if input_img is not None:
input_img = {"image": input_img}
data.update(input_img)
if extra is not None:
data.update(extra)
r_img = True if "r_img" in extra.keys() else False
results = predict_method(
data=data, use_gpu=use_gpu, batch_size=batch_size)
results = utils.handle_mask_results(results)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
finally:
base64_list = []
results_pack = []
if input_img is not None:
if r_img is False:
shutil.rmtree(output_folder)
for index in range(len(results)):
results[index]["path"] = ""
results_pack = results
else:
input_img = input_img.get("image", [])
for index in range(len(input_img)):
item = input_img[index]
with open(os.path.join(output_folder, item), "rb") as fp:
b_head = "data:image/" + item.split(".")[-1] + ";base64"
b_body = base64.b64encode(fp.read())
b_body = str(b_body).replace("b'", "").replace("'", "")
b_img = b_head + "," + b_body
base64_list.append(b_img)
results[index]["path"] = results[index]["path"].replace(
id + "_", "") if results[index]["path"] != "" \
else ""
results[index].update({"base64": b_img})
results_pack.append(results[index])
os.remove(item)
os.remove(os.path.join(output_folder, item))
else:
results_pack = results
return results_pack
def predict_object_detection(module, input_img, id, batch_size, extra={}): def predict_object_detection(module, input_img, id, batch_size, extra={}):
output_folder = "detection_result" output_folder = "detection_result"
global use_gpu global use_gpu
...@@ -253,6 +309,14 @@ def create_app(init_flag=False, configs=None): ...@@ -253,6 +309,14 @@ def create_app(init_flag=False, configs=None):
extra_info = {} extra_info = {}
for item in list(request.form.keys()): for item in list(request.form.keys()):
extra_info.update({item: request.form.getlist(item)}) extra_info.update({item: request.form.getlist(item)})
for key in extra_info.keys():
if isinstance(extra_info[key], list):
extra_info[key] = utils.base64s_to_cvmats(
eval(extra_info[key][0])["b64s"]) if isinstance(
extra_info[key][0], str
) and "b64s" in extra_info[key][0] else extra_info[key]
file_name_list = [] file_name_list = []
if img_base64 != []: if img_base64 != []:
for item in img_base64: for item in img_base64:
...@@ -260,7 +324,7 @@ def create_app(init_flag=False, configs=None): ...@@ -260,7 +324,7 @@ def create_app(init_flag=False, configs=None):
if ext not in ["jpeg", "jpg", "png"]: if ext not in ["jpeg", "jpg", "png"]:
return {"result": "Unrecognized file type"} return {"result": "Unrecognized file type"}
filename = req_id + "_" \ filename = req_id + "_" \
+ utils.md5(str(time.time())+item[0:20]) \ + utils.md5(str(time.time()) + item[0:20]) \
+ "." \ + "." \
+ ext + ext
img_data = base64.b64decode(item.split(',')[-1]) img_data = base64.b64decode(item.split(',')[-1])
...@@ -281,6 +345,10 @@ def create_app(init_flag=False, configs=None): ...@@ -281,6 +345,10 @@ def create_app(init_flag=False, configs=None):
module_type = module.type.split("/")[-1].replace("-", "_").lower() module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type) predict_func = eval("predict_" + module_type)
batch_size = batch_size_dict.get(module_name, 1) batch_size = batch_size_dict.get(module_name, 1)
if file_name_list == []:
file_name_list = None
if extra_info == {}:
extra_info = None
results = predict_func(module, file_name_list, req_id, batch_size, results = predict_func(module, file_name_list, req_id, batch_size,
extra_info) extra_info)
r = {"results": str(results)} r = {"results": str(results)}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册