提交 ab419cc1 编写于 作者: S shenyuhan

1.change --start to start 2.add dict module-method 3.add use_multiprocess in config file

...@@ -19,6 +19,7 @@ from __future__ import print_function ...@@ -19,6 +19,7 @@ from __future__ import print_function
import argparse import argparse
import os import os
import platform
import socket import socket
import json import json
import paddlehub as hub import paddlehub as hub
...@@ -39,9 +40,9 @@ class ServingCommand(BaseCommand): ...@@ -39,9 +40,9 @@ class ServingCommand(BaseCommand):
usage='%(prog)s', usage='%(prog)s',
add_help=True) add_help=True)
self.parser.add_argument("command") self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.sub_parse = self.parser.add_mutually_exclusive_group( self.sub_parse = self.parser.add_mutually_exclusive_group(
required=False) required=False)
self.sub_parse.add_argument("--start", action="store_true")
self.parser.add_argument( self.parser.add_argument(
"--use_gpu", action="store_true", default=False) "--use_gpu", action="store_true", default=False)
self.parser.add_argument( self.parser.add_argument(
...@@ -51,7 +52,7 @@ class ServingCommand(BaseCommand): ...@@ -51,7 +52,7 @@ class ServingCommand(BaseCommand):
self.parser.add_argument("--port", "-p", nargs="+", default=[8866]) self.parser.add_argument("--port", "-p", nargs="+", default=[8866])
@staticmethod @staticmethod
def port_is_open(ip, port): def is_port_occupied(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try: try:
s.connect((ip, int(port))) s.connect((ip, int(port)))
...@@ -93,19 +94,28 @@ class ServingCommand(BaseCommand): ...@@ -93,19 +94,28 @@ class ServingCommand(BaseCommand):
@staticmethod @staticmethod
def start_serving(args): def start_serving(args):
if args.use_multiprocess is True:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
config_file = args.config config_file = args.config
if config_file is not None: if config_file is not None:
config_file = config_file[0] config_file = config_file[0]
if os.path.exists(config_file): if os.path.exists(config_file):
with open(config_file, "r") as fp: with open(config_file, "r") as fp:
configs = json.load(fp) configs = json.load(fp)
use_multiprocess = configs.get("use_multiprocess", False)
if use_multiprocess is True:
if platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode"
)
from paddlehub.serving import app_single as app
else:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
use_gpu = configs.get("use_gpu", False) use_gpu = configs.get("use_gpu", False)
port = configs.get("port", 8866) port = configs.get("port", 8866)
if ServingCommand.port_is_open("127.0.0.1", port) is True: if ServingCommand.is_port_occupied("127.0.0.1",
port) is True:
print("Port %s is occupied, please change it." % (port)) print("Port %s is occupied, please change it." % (port))
return False return False
configs = configs.get("modules_info") configs = configs.get("modules_info")
...@@ -120,11 +130,21 @@ class ServingCommand(BaseCommand): ...@@ -120,11 +130,21 @@ class ServingCommand(BaseCommand):
else: else:
print("config_file ", config_file, "not exists.") print("config_file ", config_file, "not exists.")
else: else:
if args.use_multiprocess is True:
if platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode")
from paddlehub.serving import app_single as app
else:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
module = args.modules module = args.modules
if module is not None: if module is not None:
use_gpu = args.use_gpu use_gpu = args.use_gpu
port = args.port[0] port = args.port[0]
if ServingCommand.port_is_open("127.0.0.1", port) is True: if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % (port)) print("Port %s is occupied, please change it." % (port))
return False return False
module_info = ServingCommand.preinstall_modules(module) module_info = ServingCommand.preinstall_modules(module)
...@@ -142,9 +162,10 @@ class ServingCommand(BaseCommand): ...@@ -142,9 +162,10 @@ class ServingCommand(BaseCommand):
def show_help(): def show_help():
str = "serving <option>\n" str = "serving <option>\n"
str += "\tManage PaddleHub-Serving.\n" str += "\tManage PaddleHub-Serving.\n"
str += "option:\n" str += "sub command:\n"
str += "--start\n" str += "start\n"
str += "\tStart PaddleHub-Serving if specifies this parameter.\n" str += "\tStart PaddleHub-Serving if specifies this parameter.\n"
str += "option:\n"
str += "--modules/-m [module1==version, module2==version...]\n" str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via this parameter list.\n" str += "\tPre-install modules via this parameter list.\n"
str += "--port/-p XXXX\n" str += "--port/-p XXXX\n"
...@@ -163,7 +184,7 @@ class ServingCommand(BaseCommand): ...@@ -163,7 +184,7 @@ class ServingCommand(BaseCommand):
print("Please refer to the instructions below.") print("Please refer to the instructions below.")
ServingCommand.show_help() ServingCommand.show_help()
return False return False
if args.start is True: if args.sub_command == "start":
ServingCommand.start_serving(args) ServingCommand.start_serving(args)
else: else:
ServingCommand.show_help() ServingCommand.show_help()
......
...@@ -22,6 +22,48 @@ import os ...@@ -22,6 +22,48 @@ import os
import base64 import base64
import logging import logging
nlp_module_method = {
"lac": "predict_lexical_analysis",
"simnet_bow": "predict_sentiment_analysis",
"lm_lstm": "predict_pretrained_model",
"senta_lstm": "predict_pretrained_model",
"senta_gru": "predict_pretrained_model",
"senta_cnn": "predict_pretrained_model",
"senta_bow": "predict_pretrained_model",
"senta_bilstm": "predict_pretrained_model",
"emotion_detection_textcnn": "predict_pretrained_model"
}
cv_module_method = {
"vgg19_imagenet": "predict_classification",
"vgg16_imagenet": "predict_classification",
"vgg13_imagenet": "predict_classification",
"vgg11_imagenet": "predict_classification",
"shufflenet_v2_imagenet": "predict_classification",
"se_resnext50_32x4d_imagenet": "predict_classification",
"se_resnext101_32x4d_imagenet": "predict_classification",
"resnet_v2_50_imagenet": "predict_classification",
"resnet_v2_34_imagenet": "predict_classification",
"resnet_v2_18_imagenet": "predict_classification",
"resnet_v2_152_imagenet": "predict_classification",
"resnet_v2_101_imagenet": "predict_classification",
"pnasnet_imagenet": "predict_classification",
"nasnet_imagenet": "predict_classification",
"mobilenet_v2_imagenet": "predict_classification",
"googlenet_imagenet": "predict_classification",
"alexnet_imagenet": "predict_classification",
"yolov3_coco2017": "predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_640":
"predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_320":
"predict_object_detection",
"ssd_mobilenet_v1_pascal": "predict_object_detection",
"pyramidbox_face_detection": "predict_object_detection",
"faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
"ace2p": "predict_semantic_segmentation"
}
def predict_sentiment_analysis(module, input_text, extra=None): def predict_sentiment_analysis(module, input_text, extra=None):
global use_gpu global use_gpu
...@@ -221,7 +263,7 @@ def create_app(): ...@@ -221,7 +263,7 @@ def create_app():
global use_gpu global use_gpu
img_base64 = request.form.getlist("image") img_base64 = request.form.getlist("image")
file_name_list = [] file_name_list = []
if img_base64 != "": if img_base64 != []:
for item in img_base64: for item in img_base64:
ext = item.split(";")[0].split("/")[-1] ext = item.split(";")[0].split("/")[-1]
if ext not in ["jpeg", "jpg", "png"]: if ext not in ["jpeg", "jpg", "png"]:
...@@ -241,8 +283,12 @@ def create_app(): ...@@ -241,8 +283,12 @@ def create_app():
item.save(file_name) item.save(file_name)
file_name_list.append(file_name) file_name_list.append(file_name)
module = ImageModelService.get_module(module_name) module = ImageModelService.get_module(module_name)
module_type = module.type.split("/")[-1].replace("-", "_").lower() predict_func_name = cv_module_method.get(module_name, "")
predict_func = eval("predict_" + module_type) if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
results = predict_func(module, file_name_list) results = predict_func(module, file_name_list)
r = {"results": str(results)} r = {"results": str(results)}
return r return r
...@@ -259,8 +305,12 @@ def create_app(): ...@@ -259,8 +305,12 @@ def create_app():
data = request.form.getlist("text") data = request.form.getlist("text")
file = request.files.getlist("user_dict") file = request.files.getlist("user_dict")
module = TextModelService.get_module(module_name) module = TextModelService.get_module(module_name)
module_type = module.type.split("/")[-1].replace("-", "_").lower() predict_func_name = nlp_module_method.get(module_name, "")
predict_func = eval("predict_" + module_type) if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
file_list = [] file_list = []
for item in file: for item in file:
file_path = req_id + "_" + item.filename file_path = req_id + "_" + item.filename
......
...@@ -26,5 +26,6 @@ ...@@ -26,5 +26,6 @@
} }
], ],
"use_gpu": false, "use_gpu": false,
"port": 8866 "port": 8866,
"use_multiprocess": false
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册