diff --git a/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py b/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
index f345ed272f781c5f11c81ea65cc95e18a10f9859..1a7505dd69866308bee30529c20daa212e26e98a 100644
--- a/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
+++ b/demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
@@ -6,7 +6,7 @@ import os
if __name__ == "__main__":
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
- file_list = ["../img/man.png"]
+ file_list = ["../../../../docs/imgs/man.png"]
files = [("image", (open(item, "rb"))) for item in file_list]
# 为每张图片对应指定info和style
data = {"info": ["Male,Black_Hair"], "style": ["Bald"]}
diff --git a/demo/serving/module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png b/demo/serving/module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png
index e5b17a741668a5e1a3d1df7a4fb519b447a36e31..56ef052d9f5d7cf123136989babc582402bb5f21 100644
Binary files a/demo/serving/module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png and b/demo/serving/module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png differ
diff --git a/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py b/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
index 2927bfb16097acd2fe1087643570d89a121d0707..6cd7e36e34631943d8ba84e7e8f50c5b45999c92 100644
--- a/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
+++ b/demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
@@ -4,7 +4,9 @@ import json
if __name__ == "__main__":
# 指定要预测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
- file_list = ["../img/cat.jpg", "../img/flower.jpg"]
+ file_list = [
+ "../../../../docs/imgs/cat.jpg", "../../../../docs/imgs/flower.jpg"
+ ]
files = [("image", (open(item, "rb"))) for item in file_list]
# 指定预测方法为vgg11_imagenet并发送post请求
url = "http://127.0.0.1:8866/predict/image/vgg11_imagenet"
diff --git a/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py b/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py
index 8b85f223ada1116ddefa8000fadb42b55eb02369..a653cbc1ea3aedbb8484a0f0f39d13a8c0aa8a78 100644
--- a/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py
+++ b/demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py
@@ -6,7 +6,9 @@ import os
if __name__ == "__main__":
# 指定要检测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
- file_list = ["../img/cat.jpg", "../img/dog.jpg"]
+ file_list = [
+ "../../../../docs/imgs/cat.jpg", "../../../../docs/imgs/dog.jpg"
+ ]
files = [("image", (open(item, "rb"))) for item in file_list]
# 指定检测方法为yolov3_coco2017并发送post请求
url = "http://127.0.0.1:8866/predict/image/yolov3_darknet53_coco2017"
diff --git a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
index 1c08c318865ef1f953be9b85a417f954468af739..96b201de4eaac6c6e931517291b98ab9960d171b 100644
--- a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
+++ b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
@@ -6,7 +6,7 @@ import os
if __name__ == "__main__":
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
- file_list = ["../img/girl.jpg"]
+ file_list = ["../../../../docs/imgs/girl.jpg"]
files = [("image", (open(item, "rb"))) for item in file_list]
# 指定图片分割方法为deeplabv3p_xception65_humanseg并发送post请求
url = "http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg"
diff --git a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/output/girl.png b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/output/girl.png
index 667e9e70e5e8f0cf84cc5db7728eada13bf14607..9059ede00e10a410446bce9b5324531dbf5a515a 100644
Binary files a/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/output/girl.png and b/demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/output/girl.png differ
diff --git a/docs/tutorial/bert_service.md b/docs/tutorial/bert_service.md
index 7c3e314ed0ec2bdf0a66c7120978f66e8ca69946..3f96e155dab73d69d3fed97f891653f24db6a585 100644
--- a/docs/tutorial/bert_service.md
+++ b/docs/tutorial/bert_service.md
@@ -9,7 +9,7 @@
-

+
@@ -203,7 +203,7 @@ result = bc.get_result(input_text=input_text)
```python
[[0.9993321895599361, 0.9994612336158751, 0.9999646544456481, 0.732795298099517, -0.34387934207916204, ... ]]
```
-客户端代码demo文件见[示例](../demo/serving/bert_service/bert_service_client.py)。
+客户端代码demo文件见[示例](../../demo/serving/bert_service/bert_service_client.py)。
运行命令如下:
```shell
$ python bert_service_client.py
diff --git a/docs/tutorial/serving.md b/docs/tutorial/serving.md
index c754a1b7a2737458b84326c347a5295520d76afe..932400984ef4504b77ad3f2967397ab197d20820 100644
--- a/docs/tutorial/serving.md
+++ b/docs/tutorial/serving.md
@@ -81,7 +81,7 @@ http://0.0.0.0:8866/predict//\
-
+
@@ -117,7 +117,7 @@ $ hub serving start -c serving_config.json
-
+
@@ -171,41 +171,41 @@ if __name__ == "__main__":
}
```
-此Demo的具体信息和代码请参见[LAC Serving](../demo/serving/module_serving/lexical_analysis_lac)。另外,下面展示了一些其他的一键服务部署Demo。
+此Demo的具体信息和代码请参见[LAC Serving](../../demo/serving/module_serving/lexical_analysis_lac)。另外,下面展示了一些其他的一键服务部署Demo。
## Demo——其他模型的一键部署服务
获取其他PaddleHub Serving的一键服务部署场景示例,可参见下列demo
-* [图像分类-基于vgg11_imagent](../demo/serving/module_serving/classification_vgg11_imagenet)
+* [图像分类-基于vgg11_imagent](../../demo/serving/module_serving/classification_vgg11_imagenet)
该示例展示了利用vgg11_imagent完成图像分类服务化部署和在线预测,获取图像分类结果。
-* [图像生成-基于stgan_celeba](../demo/serving/module_serving/GAN_stgan_celeba)
+* [图像生成-基于stgan_celeba](../../demo/serving/module_serving/GAN_stgan_celeba)
该示例展示了利用stgan_celeba生成图像服务化部署和在线预测,获取指定风格的生成图像。
-* [文本审核-基于porn_detection_lstm](../demo/serving/module_serving/text_censorship_porn_detection_lstm)
+* [文本审核-基于porn_detection_lstm](../../demo/serving/module_serving/text_censorship_porn_detection_lstm)
该示例展示了利用porn_detection_lstm完成中文文本黄色敏感信息鉴定的服务化部署和在线预测,获取文本是否敏感及其置信度。
-* [中文词法分析-基于lac](../demo/serving/module_serving/lexical_analysis_lac)
+* [中文词法分析-基于lac](../../demo/serving/module_serving/lexical_analysis_lac)
该示例展示了利用lac完成中文文本分词服务化部署和在线预测,获取文本的分词结果,并可通过用户自定义词典干预分词结果。
-* [目标检测-基于yolov3_darknet53_coco2017](../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017)
+* [目标检测-基于yolov3_darknet53_coco2017](../../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017)
该示例展示了利用yolov3_darknet53_coco2017完成目标检测服务化部署和在线预测,获取检测结果和覆盖识别框的图片。
-* [中文语义分析-基于simnet_bow](../demo/serving/module_serving/semantic_model_simnet_bow)
+* [中文语义分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow)
该示例展示了利用simnet_bow完成中文文本相似度检测服务化部署和在线预测,获取文本的相似程度。
-* [图像分割-基于deeplabv3p_xception65_humanseg](../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg)
+* [图像分割-基于deeplabv3p_xception65_humanseg](../../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg)
该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。
-* [中文情感分析-基于simnet_bow](../demo/serving/module_serving/semantic_model_simnet_bow)
+* [中文情感分析-基于simnet_bow](../../demo/serving/module_serving/semantic_model_simnet_bow)
该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。
diff --git a/paddlehub/commands/serving.py b/paddlehub/commands/serving.py
index 16b22d2b04459e124f9d3b31616124d699272df5..c4a1043f350f45bc39818e4f857bcc9f982718ff 100644
--- a/paddlehub/commands/serving.py
+++ b/paddlehub/commands/serving.py
@@ -103,6 +103,8 @@ class ServingCommand(BaseCommand):
self.parser.add_argument("--config", "-c", nargs="?")
self.parser.add_argument("--port", "-p", nargs="?", default=8866)
self.parser.add_argument("--gpu", "-i", nargs="?", default=0)
+ self.parser.add_argument(
+ "--use_singleprocess", action="store_true", default=False)
def dump_pid_file(self):
pid = os.getpid()
@@ -336,12 +338,13 @@ class ServingCommand(BaseCommand):
def start_serving(self):
config_file = self.args.config
+ single_mode = self.args.use_singleprocess
if config_file is not None:
if os.path.exists(config_file):
with open(config_file, "r") as fp:
configs = json.load(fp)
use_multiprocess = configs.get("use_multiprocess", False)
- if platform.system() == "Windows":
+ if single_mode is True or platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode"
@@ -357,7 +360,7 @@ class ServingCommand(BaseCommand):
else:
print("config_file ", config_file, "not exists.")
else:
- if platform.system() == "Windows":
+ if single_mode is True or platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode"
diff --git a/paddlehub/common/utils.py b/paddlehub/common/utils.py
index 45c02a427b078537872ad5b3113a3965f917fbd2..7bd4e74a001ff68aac4d792404bbb04878b3b2f6 100644
--- a/paddlehub/common/utils.py
+++ b/paddlehub/common/utils.py
@@ -22,9 +22,12 @@ import os
import multiprocessing
import hashlib
import platform
+import base64
import paddle.fluid as fluid
import six
+import numpy as np
+import cv2
from paddlehub.module import module_desc_pb2
from paddlehub.common.logger import logger
@@ -51,6 +54,42 @@ def version_compare(version1, version2):
return len(version1) > len(version2)
+def base64s_to_cvmats(base64s):
+ for index, value in enumerate(base64s):
+ value = bytes(value, encoding="utf8")
+ value = base64.b64decode(value)
+ value = np.fromstring(value, np.uint8)
+ value = cv2.imdecode(value, 1)
+
+ base64s[index] = value
+ return base64s
+
+
+def handle_mask_results(results):
+ result = []
+ if len(results) <= 0:
+ return results
+ _id = results[0]["id"]
+ _item = {
+ "data": [],
+ "path": results[0].get("path", ""),
+ "id": results[0]["id"]
+ }
+ for item in results:
+ if item["id"] == _id:
+ _item["data"].append(item["data"])
+ else:
+ result.append(_item)
+ _id = _id + 1
+ _item = {
+ "data": [item["data"]],
+ "path": item.get("path", ""),
+ "id": item.get("id", _id)
+ }
+ result.append(_item)
+ return result
+
+
def get_platform():
return platform.platform()
diff --git a/paddlehub/serving/app_single.py b/paddlehub/serving/app_single.py
index 96fd56f6cd5fcf45a22239c690e309b74b87608f..88d504869113bdc063c75ac3fe10a0eec142463e 100644
--- a/paddlehub/serving/app_single.py
+++ b/paddlehub/serving/app_single.py
@@ -18,6 +18,7 @@ import time
import os
import base64
import logging
+import shutil
cv_module_method = {
"vgg19_imagenet": "predict_classification",
@@ -47,7 +48,9 @@ cv_module_method = {
"faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
- "ace2p": "predict_semantic_segmentation"
+ "ace2p": "predict_semantic_segmentation",
+ "pyramidbox_lite_server_mask": "predict_mask",
+ "pyramidbox_lite_mobile_mask": "predict_mask"
}
@@ -132,6 +135,59 @@ def predict_gan(module, input_img, id, batch_size, extra={}):
return results_pack
+def predict_mask(module, input_img, id, batch_size, extra=None, r_img=False):
+ output_folder = "detection_result"
+ global use_gpu
+ method_name = module.desc.attr.map.data['default_signature'].s
+ predict_method = getattr(module, method_name)
+ try:
+ data = {}
+ if input_img is not None:
+ input_img = {"image": input_img}
+ data.update(input_img)
+ if extra is not None:
+ data.update(extra)
+ r_img = True if "r_img" in extra.keys() else False
+ results = predict_method(
+ data=data, use_gpu=use_gpu, batch_size=batch_size)
+ results = utils.handle_mask_results(results)
+ except Exception as err:
+ curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
+ print(curr, " - ", err)
+ return {"result": "Please check data format!"}
+ finally:
+ base64_list = []
+ results_pack = []
+ if input_img is not None:
+ if r_img is False:
+ shutil.rmtree(output_folder)
+ for index in range(len(results)):
+ results[index]["path"] = ""
+ results_pack = results
+ else:
+ input_img = input_img.get("image", [])
+ for index in range(len(input_img)):
+ item = input_img[index]
+ with open(os.path.join(output_folder, item), "rb") as fp:
+ b_head = "data:image/" + item.split(".")[-1] + ";base64"
+ b_body = base64.b64encode(fp.read())
+ b_body = str(b_body).replace("b'", "").replace("'", "")
+ b_img = b_head + "," + b_body
+ base64_list.append(b_img)
+ results[index]["path"] = results[index]["path"].replace(
+ id + "_", "") if results[index]["path"] != "" \
+ else ""
+
+ results[index].update({"base64": b_img})
+ results_pack.append(results[index])
+ os.remove(item)
+ os.remove(os.path.join(output_folder, item))
+ else:
+ results_pack = results
+
+ return results_pack
+
+
def predict_object_detection(module, input_img, id, batch_size, extra={}):
output_folder = "detection_result"
global use_gpu
@@ -253,6 +309,14 @@ def create_app(init_flag=False, configs=None):
extra_info = {}
for item in list(request.form.keys()):
extra_info.update({item: request.form.getlist(item)})
+
+ for key in extra_info.keys():
+ if isinstance(extra_info[key], list):
+ extra_info[key] = utils.base64s_to_cvmats(
+ eval(extra_info[key][0])["b64s"]) if isinstance(
+ extra_info[key][0], str
+ ) and "b64s" in extra_info[key][0] else extra_info[key]
+
file_name_list = []
if img_base64 != []:
for item in img_base64:
@@ -260,7 +324,7 @@ def create_app(init_flag=False, configs=None):
if ext not in ["jpeg", "jpg", "png"]:
return {"result": "Unrecognized file type"}
filename = req_id + "_" \
- + utils.md5(str(time.time())+item[0:20]) \
+ + utils.md5(str(time.time()) + item[0:20]) \
+ "." \
+ ext
img_data = base64.b64decode(item.split(',')[-1])
@@ -281,6 +345,10 @@ def create_app(init_flag=False, configs=None):
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
batch_size = batch_size_dict.get(module_name, 1)
+ if file_name_list == []:
+ file_name_list = None
+ if extra_info == {}:
+ extra_info = None
results = predict_func(module, file_name_list, req_id, batch_size,
extra_info)
r = {"results": str(results)}