提交 ab419cc1 编写于 作者: S shenyuhan

1.change --start to start 2.add dict module-method 3.add use_multiprocess in config file

......@@ -19,6 +19,7 @@ from __future__ import print_function
import argparse
import os
import platform
import socket
import json
import paddlehub as hub
......@@ -39,9 +40,9 @@ class ServingCommand(BaseCommand):
usage='%(prog)s',
add_help=True)
self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.sub_parse = self.parser.add_mutually_exclusive_group(
required=False)
self.sub_parse.add_argument("--start", action="store_true")
self.parser.add_argument(
"--use_gpu", action="store_true", default=False)
self.parser.add_argument(
......@@ -51,7 +52,7 @@ class ServingCommand(BaseCommand):
self.parser.add_argument("--port", "-p", nargs="+", default=[8866])
@staticmethod
def port_is_open(ip, port):
def is_port_occupied(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
......@@ -93,19 +94,28 @@ class ServingCommand(BaseCommand):
@staticmethod
def start_serving(args):
if args.use_multiprocess is True:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
config_file = args.config
if config_file is not None:
config_file = config_file[0]
if os.path.exists(config_file):
with open(config_file, "r") as fp:
configs = json.load(fp)
use_multiprocess = configs.get("use_multiprocess", False)
if use_multiprocess is True:
if platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode"
)
from paddlehub.serving import app_single as app
else:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
use_gpu = configs.get("use_gpu", False)
port = configs.get("port", 8866)
if ServingCommand.port_is_open("127.0.0.1", port) is True:
if ServingCommand.is_port_occupied("127.0.0.1",
port) is True:
print("Port %s is occupied, please change it." % (port))
return False
configs = configs.get("modules_info")
......@@ -120,11 +130,21 @@ class ServingCommand(BaseCommand):
else:
print("config_file ", config_file, "not exists.")
else:
if args.use_multiprocess is True:
if platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode")
from paddlehub.serving import app_single as app
else:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
module = args.modules
if module is not None:
use_gpu = args.use_gpu
port = args.port[0]
if ServingCommand.port_is_open("127.0.0.1", port) is True:
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % (port))
return False
module_info = ServingCommand.preinstall_modules(module)
......@@ -142,9 +162,10 @@ class ServingCommand(BaseCommand):
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub-Serving.\n"
str += "option:\n"
str += "--start\n"
str += "sub command:\n"
str += "start\n"
str += "\tStart PaddleHub-Serving if specifies this parameter.\n"
str += "option:\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via this parameter list.\n"
str += "--port/-p XXXX\n"
......@@ -163,7 +184,7 @@ class ServingCommand(BaseCommand):
print("Please refer to the instructions below.")
ServingCommand.show_help()
return False
if args.start is True:
if args.sub_command == "start":
ServingCommand.start_serving(args)
else:
ServingCommand.show_help()
......
......@@ -22,6 +22,48 @@ import os
import base64
import logging
nlp_module_method = {
"lac": "predict_lexical_analysis",
"simnet_bow": "predict_sentiment_analysis",
"lm_lstm": "predict_pretrained_model",
"senta_lstm": "predict_pretrained_model",
"senta_gru": "predict_pretrained_model",
"senta_cnn": "predict_pretrained_model",
"senta_bow": "predict_pretrained_model",
"senta_bilstm": "predict_pretrained_model",
"emotion_detection_textcnn": "predict_pretrained_model"
}
cv_module_method = {
"vgg19_imagenet": "predict_classification",
"vgg16_imagenet": "predict_classification",
"vgg13_imagenet": "predict_classification",
"vgg11_imagenet": "predict_classification",
"shufflenet_v2_imagenet": "predict_classification",
"se_resnext50_32x4d_imagenet": "predict_classification",
"se_resnext101_32x4d_imagenet": "predict_classification",
"resnet_v2_50_imagenet": "predict_classification",
"resnet_v2_34_imagenet": "predict_classification",
"resnet_v2_18_imagenet": "predict_classification",
"resnet_v2_152_imagenet": "predict_classification",
"resnet_v2_101_imagenet": "predict_classification",
"pnasnet_imagenet": "predict_classification",
"nasnet_imagenet": "predict_classification",
"mobilenet_v2_imagenet": "predict_classification",
"googlenet_imagenet": "predict_classification",
"alexnet_imagenet": "predict_classification",
"yolov3_coco2017": "predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_640":
"predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_320":
"predict_object_detection",
"ssd_mobilenet_v1_pascal": "predict_object_detection",
"pyramidbox_face_detection": "predict_object_detection",
"faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
"ace2p": "predict_semantic_segmentation"
}
def predict_sentiment_analysis(module, input_text, extra=None):
global use_gpu
......@@ -221,7 +263,7 @@ def create_app():
global use_gpu
img_base64 = request.form.getlist("image")
file_name_list = []
if img_base64 != "":
if img_base64 != []:
for item in img_base64:
ext = item.split(";")[0].split("/")[-1]
if ext not in ["jpeg", "jpg", "png"]:
......@@ -241,8 +283,12 @@ def create_app():
item.save(file_name)
file_name_list.append(file_name)
module = ImageModelService.get_module(module_name)
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
predict_func_name = cv_module_method.get(module_name, "")
if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
results = predict_func(module, file_name_list)
r = {"results": str(results)}
return r
......@@ -259,8 +305,12 @@ def create_app():
data = request.form.getlist("text")
file = request.files.getlist("user_dict")
module = TextModelService.get_module(module_name)
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
predict_func_name = nlp_module_method.get(module_name, "")
if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
file_list = []
for item in file:
file_path = req_id + "_" + item.filename
......
......@@ -26,5 +26,6 @@
}
],
"use_gpu": false,
"port": 8866
"port": 8866,
"use_multiprocess": false
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册