提交 c223d381 编写于 作者: 走神的阿圆's avatar 走神的阿圆

update serving v2

上级 fcf87035
......@@ -27,6 +27,8 @@ from paddlehub.commands.base_command import BaseCommand, ENTRY
from paddlehub.serving import app_single as app
from paddlehub.common.dir import CONF_HOME
from paddlehub.common.hub_server import CacheUpdater
from paddlehub.serving.model_service.base_model_service import cv_module_info
from paddlehub.serving.model_service.base_model_service import nlp_module_info
import multiprocessing
import time
import signal
......@@ -105,6 +107,11 @@ class ServingCommand(BaseCommand):
self.parser.add_argument("--gpu", "-i", nargs="?", default=0)
self.parser.add_argument(
"--use_singleprocess", action="store_true", default=False)
self.parser.add_argument(
"--modules_info", "-mi", default={}, type=json.loads)
self.parser.add_argument(
"--workers", "-w", nargs="?", default=number_of_workers())
self.modules_info = {}
def dump_pid_file(self):
pid = os.getpid()
......@@ -184,76 +191,59 @@ class ServingCommand(BaseCommand):
except:
return False
@staticmethod
def preinstall_modules(modules):
configs = []
module_exist = {}
if modules is not None:
for module in modules:
module_name = module if "==" not in module else \
module.split("==")[0]
module_version = None if "==" not in module else \
module.split("==")[1]
if module_exist.get(module_name, "") != "":
print(module_name, "==", module_exist.get(module_name),
" will be ignored cause new version is specified.")
configs.pop()
module_exist.update({module_name: module_version})
try:
CacheUpdater(
"hub_serving_start",
module=module_name,
version=module_version).start()
m = hub.Module(name=module_name, version=module_version)
method_name = m.desc.attr.map.data['default_signature'].s
if method_name == "":
raise RuntimeError("{} cannot be use for "
"predicting".format(module_name))
configs.append({
"module": module_name,
"version": m.version,
"category": str(m.type).split("/")[0].upper()
})
except Exception as err:
print(err, ", start PaddleHub Serving unsuccessfully.")
exit(1)
return configs
def start_app_with_file(self, configs, workers):
port = configs.get("port", 8866)
def preinstall_modules(self):
for key, value in self.modules_info.items():
init_args = value["init_args"]
CacheUpdater(
"hub_serving_start",
module=key,
version=init_args.get("version", "0.0.0")).start()
if "dir" not in init_args:
init_args.update({"name": key})
m = hub.Module(**init_args)
method_name = m.serving_func_name
if method_name is None:
raise RuntimeError("{} cannot be use for "
"predicting".format(key))
exit(1)
category = str(m.type).split("/")[0].upper()
self.modules_info[key].update({
"method_name": method_name,
"code_version": m.code_version,
"version": m.version,
"category": category,
"module": m,
"name": m.name
})
def start_app_with_file(self):
port = self.args.config.get("port", 8866)
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % port)
return False
modules = configs.get("modules_info")
module = [str(i["module"]) + "==" + str(i["version"]) for i in modules]
module_info = ServingCommand.preinstall_modules(module)
for index in range(len(module_info)):
modules[index].update(module_info[index])
self.modules_info = self.args.config.get("modules_info")
self.preinstall_modules()
options = {
"bind": "0.0.0.0:%s" % port,
"workers": workers,
"workers": self.args.workers,
"pid": "./pid.txt"
}
configs["modules_info"] = modules
self.dump_pid_file()
StandaloneApplication(
app.create_app(init_flag=False, configs=configs), options).run()
app.create_app(init_flag=False, configs=self.modules_info),
options).run()
def start_single_app_with_file(self, configs):
use_gpu = configs.get("use_gpu", False)
port = configs.get("port", 8866)
def start_single_app_with_file(self):
port = self.args.config.get("port", 8866)
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % port)
return False
configs = configs.get("modules_info")
module = [str(i["module"]) + "==" + str(i["version"]) for i in configs]
module_info = ServingCommand.preinstall_modules(module)
for index in range(len(module_info)):
configs[index].update(module_info[index])
self.modules_info = self.args.config.get("modules_info")
self.preinstall_modules()
self.dump_pid_file()
app.run(use_gpu, configs=configs, port=port)
app.run(configs=self.modules_info, port=port)
@staticmethod
def start_multi_app_with_file(configs):
......@@ -270,23 +260,15 @@ class ServingCommand(BaseCommand):
def start_app_with_args(self, workers):
module = self.args.modules
if module is not None:
use_gpu = self.args.use_gpu
port = self.args.port
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % port)
return False
module_info = ServingCommand.preinstall_modules(module)
[
item.update({
"batch_size": 1,
"queue_size": 20
}) for item in module_info
]
self.preinstall_modules()
options = {"bind": "0.0.0.0:%s" % port, "workers": workers}
configs = {"use_gpu": use_gpu, "modules_info": module_info}
self.dump_pid_file()
StandaloneApplication(
app.create_app(init_flag=False, configs=configs),
app.create_app(init_flag=False, configs=self.modules_info),
options).run()
else:
print("Lack of necessary parameters!")
......@@ -294,41 +276,27 @@ class ServingCommand(BaseCommand):
def start_single_app_with_args(self):
module = self.args.modules
if module is not None:
use_gpu = self.args.use_gpu
port = self.args.port
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % port)
return False
module_info = ServingCommand.preinstall_modules(module)
[
item.update({
"batch_size": 1,
"queue_size": 20
}) for item in module_info
]
self.preinstall_modules()
self.dump_pid_file()
app.run(use_gpu, configs=module_info, port=port)
app.run(configs=self.modules_info, port=port)
else:
print("Lack of necessary parameters!")
def start_multi_app_with_args(self):
module = self.args.modules
if module is not None:
use_gpu = self.args.use_gpu
port = self.args.port
workers = number_of_workers()
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % port)
return False
module_info = ServingCommand.preinstall_modules(module)
[
item.update({
"batch_size": 1,
"queue_size": 20
}) for item in module_info
]
self.preinstall_modules()
options = {"bind": "0.0.0.0:%s" % port, "workers": workers}
configs = {"use_gpu": use_gpu, "modules_info": module_info}
configs = {"modules_info": self.module_info}
StandaloneApplication(
app.create_app(init_flag=False, configs=configs),
options).run()
......@@ -336,31 +304,51 @@ class ServingCommand(BaseCommand):
else:
print("Lack of necessary parameters!")
def link_module_info(self):
if self.args.config:
if os.path.exists(self.args.config):
with open(self.args.config, "r") as fp:
self.args.config = json.load(fp)
self.modules_info = self.args.config["modules_info"]
else:
raise RuntimeError("{} not exists.".format(self.args.config))
exit(1)
else:
for item in self.args.modules:
version = None
if "==" in item:
module = item.split("==")[0]
version = item.split("==")[1]
else:
module = item
self.modules_info.update({
module: {
"init_args": {
"version": version
},
"predict_args": {
"use_gpu": self.args.use_gpu
}
}
})
def start_serving(self):
config_file = self.args.config
single_mode = self.args.use_singleprocess
if config_file is not None:
if os.path.exists(config_file):
with open(config_file, "r") as fp:
configs = json.load(fp)
use_multiprocess = configs.get("use_multiprocess", False)
if single_mode is True:
ServingCommand.start_single_app_with_file(configs)
elif platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode"
)
ServingCommand.start_single_app_with_file(configs)
else:
if use_multiprocess is True:
self.start_app_with_file(configs,
if self.args.config is not None:
self.args.workers = self.args.config.get("workers",
number_of_workers())
else:
self.start_app_with_file(configs, 1)
use_multiprocess = self.args.config.get("use_multiprocess", False)
if use_multiprocess is False:
self.start_single_app_with_file()
elif platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode"
)
self.start_single_app_with_file()
else:
print("config_file ", config_file, "not exists.")
self.start_app_with_file()
else:
if single_mode is True:
self.start_single_app_with_args()
......@@ -372,7 +360,7 @@ class ServingCommand(BaseCommand):
self.start_single_app_with_args()
else:
if self.args.use_multiprocess is True:
self.start_app_with_args(number_of_workers())
self.start_app_with_args(self.args.workers)
else:
self.start_app_with_args(1)
......@@ -393,10 +381,10 @@ class ServingCommand(BaseCommand):
str += "\tPre-install modules via the parameter list.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_gpu\n"
str += "\tUse gpu for predicting if you specify the parameter.\n"
str += "--use_multiprocess\n"
str += "\tChoose multoprocess mode, cannot be use on Windows.\n"
str += "--modules_info\n"
str += "\tSet module config in PaddleHub Serving."
str += "--config/-c file_path\n"
str += "\tUse configs in file to start PaddleHub Serving. "
str += "Other parameters will be ignored if you specify the parameter.\n"
......@@ -422,6 +410,7 @@ class ServingCommand(BaseCommand):
except:
ServingCommand.show_help()
return False
self.link_module_info()
if self.args.sub_command == "start":
if self.args.bert_service == "bert_service":
ServingCommand.start_bert_serving(self.args)
......
......@@ -65,10 +65,14 @@ def base64s_to_cvmats(base64s):
return base64s
def handle_mask_results(results):
def handle_mask_results(results, data_len):
result = []
if len(results) <= 0:
return results
if len(results) <= 0 and data_len != 0:
return [{
"data": "No face.",
"id": i,
"path": ""
} for i in range(1, data_len + 1)]
_id = results[0]["id"]
_item = {
"data": [],
......@@ -87,6 +91,15 @@ def handle_mask_results(results):
"id": item.get("id", _id)
}
result.append(_item)
for index in range(1, data_len + 1):
if index > len(result):
result.append({"data": "No face.", "id": index, "path": ""})
elif result[index - 1]["id"] != index:
result.insert(index - 1, {
"data": "No face.",
"id": index,
"path": ""
})
return result
......
......@@ -134,6 +134,19 @@ def runnable(func):
return _wrapper
_module_serving_func = {}
def serving(func):
mod = func.__module__ + "." + inspect.stack()[1][3]
_module_serving_func[mod] = func.__name__
def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _wrapper
class Module(object):
_record = {}
......@@ -184,6 +197,7 @@ class Module(object):
self._run_func = getattr(self, _run_func_name)
else:
self._run_func = None
self._serving_func_name = _module_serving_func.get(mod, None)
self._code_version = "v2"
self._directory = directory
self.module_desc_path = os.path.join(self.directory, MODULE_DESC_PBNAME)
......@@ -292,6 +306,10 @@ class Module(object):
def is_runnable(self):
return self._run_func != None
@property
def serving_func_name(self):
return self._serving_func_name
def _initialize(self):
pass
......@@ -353,6 +371,11 @@ class ModuleV1(Module):
self._restore_parameter(self.program)
self._recover_variable_info(self.program)
@property
def serving_func_name(self):
serving_func_name = self.desc.attr.map.data['default_signature'].s
return serving_func_name if serving_func_name != "" else None
def _dump_processor(self):
import inspect
pymodule = inspect.getmodule(self.processor)
......@@ -576,6 +599,10 @@ class ModuleV1(Module):
def is_runnable(self):
return self.default_signature != None
@property
def code_version(self):
return self._code_version
def context(self,
sign_name=None,
for_test=False,
......
......@@ -12,62 +12,47 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from flask import Flask, request, render_template
from paddlehub.serving.model_service.model_manage import default_module_manager
from paddlehub.serving.model_service.base_model_service import cv_module_info
from paddlehub.serving.model_service.base_model_service import nlp_module_info
from paddlehub.common import utils
import functools
import time
import os
import base64
import logging
import shutil
cv_module_method = {
"vgg19_imagenet": "predict_classification",
"vgg16_imagenet": "predict_classification",
"vgg13_imagenet": "predict_classification",
"vgg11_imagenet": "predict_classification",
"shufflenet_v2_imagenet": "predict_classification",
"se_resnext50_32x4d_imagenet": "predict_classification",
"se_resnext101_32x4d_imagenet": "predict_classification",
"resnet_v2_50_imagenet": "predict_classification",
"resnet_v2_34_imagenet": "predict_classification",
"resnet_v2_18_imagenet": "predict_classification",
"resnet_v2_152_imagenet": "predict_classification",
"resnet_v2_101_imagenet": "predict_classification",
"pnasnet_imagenet": "predict_classification",
"nasnet_imagenet": "predict_classification",
"mobilenet_v2_imagenet": "predict_classification",
"googlenet_imagenet": "predict_classification",
"alexnet_imagenet": "predict_classification",
"yolov3_coco2017": "predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_640":
"predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_320":
"predict_object_detection",
"ssd_mobilenet_v1_pascal": "predict_object_detection",
"pyramidbox_face_detection": "predict_object_detection",
"faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
"ace2p": "predict_semantic_segmentation",
"pyramidbox_lite_server_mask": "predict_mask",
"pyramidbox_lite_mobile_mask": "predict_mask"
}
def predict_nlp(module, input_text, req_id, batch_size, extra=None):
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
import glob
def predict_v2(module_info, input):
serving_method_name = module_info["method_name"]
serving_method = getattr(module_info["module"], serving_method_name)
predict_args = module_info["predict_args"]
predict_args.update({"data": input})
for item in serving_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
output = serving_method(**predict_args)
return {"results": output}
def predict_nlp(module_info, input_text, req_id, extra=None):
method_name = module_info["method_name"]
predict_method = getattr(module_info["module"], method_name)
predict_args = {"data": input_text}
if isinstance(predict_method, functools.partial):
predict_method = predict_method.func
predict_args.update({"sign_name": method_name})
for item in predict_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
if module_info["name"] == "lac" and extra.get("user_dict", []) != []:
predict_args.update({"user_dict": extra.get("user_dict", [])[0]})
try:
data = input_text
if module.name == "lac" and extra.get("user_dict", []) != []:
res = predict_method(
data=data,
user_dict=extra.get("user_dict", [])[0],
use_gpu=use_gpu,
batch_size=batch_size)
else:
res = predict_method(
data=data, use_gpu=use_gpu, batch_size=batch_size)
res = predict_method(**predict_args)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
......@@ -80,35 +65,45 @@ def predict_nlp(module, input_text, req_id, batch_size, extra=None):
return {"results": res}
def predict_classification(module, input_img, id, batch_size, extra={}):
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
def predict_classification(module_info, input_img, id, extra={}):
method_name = module_info["method_name"]
module = module_info["module"]
predict_method = getattr(module, method_name)
predict_args = {"data": {"image": input_img}}
if isinstance(predict_method, functools.partial):
predict_method = predict_method.func
predict_args.update({"sign_name": method_name})
for item in predict_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
results = predict_method(**predict_args)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
finally:
for item in input_img["image"]:
for item in input_img:
if os.path.exists(item):
os.remove(item)
return results
def predict_gan(module, input_img, id, batch_size, extra={}):
output_folder = module.name.split("_")[0] + "_" + "output"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
def predict_gan(module_info, input_img, id, extra={}):
method_name = module_info["method_name"]
module = module_info["module"]
predict_method = getattr(module, method_name)
predict_args = {"data": {"image": input_img}}
predict_args["data"].update(extra)
if isinstance(predict_method, functools.partial):
predict_method = predict_method.func
predict_args.update({"sign_name": method_name})
for item in predict_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
results = predict_method(**predict_args)
try:
extra.update({"image": input_img})
input_img = {"image": input_img}
results = predict_method(
data=extra, use_gpu=use_gpu, batch_size=batch_size)
pass
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
......@@ -116,7 +111,6 @@ def predict_gan(module, input_img, id, batch_size, extra={}):
finally:
base64_list = []
results_pack = []
input_img = input_img.get("image", [])
for index in range(len(input_img)):
item = input_img[index]
output_file = results[index].split(" ")[-1]
......@@ -135,22 +129,29 @@ def predict_gan(module, input_img, id, batch_size, extra={}):
return results_pack
def predict_mask(module, input_img, id, batch_size, extra=None, r_img=False):
def predict_mask(module_info, input_img, id, extra=None, r_img=True):
output_folder = "detection_result"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
method_name = module_info["method_name"]
module = module_info["module"]
predict_method = getattr(module, method_name)
data_len = len(input_img) if input_img is not None else 0
data = {}
if input_img is not None:
input_img = {"image": input_img}
data.update(input_img)
if extra is not None:
data.update(extra)
r_img = True if "r_img" in extra.keys() else False
predict_args = {"data": data}
if isinstance(predict_method, functools.partial):
predict_method = predict_method.func
predict_args.update({"sign_name": method_name})
for item in predict_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
try:
data = {}
if input_img is not None:
input_img = {"image": input_img}
data.update(input_img)
if extra is not None:
data.update(extra)
r_img = True if "r_img" in extra.keys() else False
results = predict_method(
data=data, use_gpu=use_gpu, batch_size=batch_size)
results = utils.handle_mask_results(results)
results = predict_method(**predict_args)
results = utils.handle_mask_results(results, data_len)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
......@@ -160,43 +161,59 @@ def predict_mask(module, input_img, id, batch_size, extra=None, r_img=False):
results_pack = []
if input_img is not None:
if r_img is False:
shutil.rmtree(output_folder)
for index in range(len(results)):
results[index]["path"] = ""
results_pack = results
str_id = id + "*"
files_deleted = glob.glob(str_id)
for path in files_deleted:
if os.path.exists(path):
os.remove(path)
else:
input_img = input_img.get("image", [])
for index in range(len(input_img)):
item = input_img[index]
with open(os.path.join(output_folder, item), "rb") as fp:
b_head = "data:image/" + item.split(".")[-1] + ";base64"
b_body = base64.b64encode(fp.read())
b_body = str(b_body).replace("b'", "").replace("'", "")
b_img = b_head + "," + b_body
base64_list.append(b_img)
results[index]["path"] = results[index]["path"].replace(
id + "_", "") if results[index]["path"] != "" \
else ""
results[index].update({"base64": b_img})
file_path = os.path.join(output_folder, item)
if not os.path.exists(file_path):
results_pack.append(results[index])
os.remove(item)
os.remove(os.path.join(output_folder, item))
os.remove(item)
else:
with open(file_path, "rb") as fp:
b_head = "data:image/" + item.split(
".")[-1] + ";base64"
b_body = base64.b64encode(fp.read())
b_body = str(b_body).replace("b'", "").replace(
"'", "")
b_img = b_head + "," + b_body
base64_list.append(b_img)
results[index]["path"] = results[index]["path"].replace(
id + "_", "") if results[index]["path"] != "" \
else ""
results[index].update({"base64": b_img})
results_pack.append(results[index])
os.remove(item)
os.remove(os.path.join(output_folder, item))
else:
results_pack = results
return results_pack
def predict_object_detection(module, input_img, id, batch_size, extra={}):
def predict_object_detection(module_info, input_img, id, extra={}):
output_folder = "detection_result"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
method_name = module_info["method_name"]
module = module_info["module"]
predict_method = getattr(module, method_name)
predict_args = {"data": {"image": input_img}}
if isinstance(predict_method, functools.partial):
predict_method = predict_method.func
predict_args.update({"sign_name": method_name})
for item in predict_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
results = predict_method(**predict_args)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
......@@ -204,7 +221,6 @@ def predict_object_detection(module, input_img, id, batch_size, extra={}):
finally:
base64_list = []
results_pack = []
input_img = input_img.get("image", [])
for index in range(len(input_img)):
item = input_img[index]
with open(os.path.join(output_folder, item), "rb") as fp:
......@@ -222,15 +238,19 @@ def predict_object_detection(module, input_img, id, batch_size, extra={}):
return results_pack
def predict_semantic_segmentation(module, input_img, id, batch_size, extra={}):
output_folder = module.name.split("_")[-1] + "_" + "output"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
def predict_semantic_segmentation(module_info, input_img, id, extra={}):
method_name = module_info["method_name"]
module = module_info["module"]
predict_method = getattr(module, method_name)
predict_args = {"data": {"image": input_img}}
if isinstance(predict_method, functools.partial):
predict_method = predict_method.func
predict_args.update({"sign_name": method_name})
for item in predict_method.__code__.co_varnames:
if item in module_info.keys():
predict_args.update({item: module_info[item]})
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
results = predict_method(**predict_args)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
......@@ -238,11 +258,8 @@ def predict_semantic_segmentation(module, input_img, id, batch_size, extra={}):
finally:
base64_list = []
results_pack = []
input_img = input_img.get("image", [])
for index in range(len(input_img)):
# special
item = input_img[index]
output_file_path = ""
with open(results[index]["processed"], "rb") as fp:
b_head = "data:image/png;base64"
b_body = base64.b64encode(fp.read())
......@@ -266,13 +283,11 @@ def create_app(init_flag=False, configs=None):
if init_flag is False:
if configs is None:
raise RuntimeError("Lack of necessary configs.")
global use_gpu, time_out
time_out = 60
use_gpu = configs.get("use_gpu", False)
config_with_file(configs.get("modules_info", []))
config_with_file(configs)
app_instance = Flask(__name__)
app_instance.config["JSON_AS_ASCII"] = False
logging.basicConfig()
gunicorn_logger = logging.getLogger('gunicorn.error')
app_instance.logger.handlers = gunicorn_logger.handlers
app_instance.logger.setLevel(gunicorn_logger.level)
......@@ -287,24 +302,22 @@ def create_app(init_flag=False, configs=None):
@app_instance.route("/get/modules", methods=["GET", "POST"])
def get_modules_info():
global nlp_module, cv_module
module_info = {}
if len(nlp_module) > 0:
if len(nlp_module_info.nlp_modules) > 0:
module_info.update({"nlp_module": [{"Choose...": "Choose..."}]})
for item in nlp_module:
for item in nlp_module_info.nlp_modules:
module_info["nlp_module"].append({item: item})
if len(cv_module) > 0:
if len(cv_module_info.cv_modules) > 0:
module_info.update({"cv_module": [{"Choose...": "Choose..."}]})
for item in cv_module:
for item in cv_module_info.cv_modules:
module_info["cv_module"].append({item: item})
return {"module_info": module_info}
@app_instance.route("/predict/image/<module_name>", methods=["POST"])
def predict_image(module_name):
if request.path.split("/")[-1] not in cv_module:
if request.path.split("/")[-1] not in cv_module_info.modules_info:
return {"error": "Module {} is not available.".format(module_name)}
req_id = request.data.get("id")
global use_gpu, batch_size_dict
img_base64 = request.form.getlist("image")
extra_info = {}
for item in list(request.form.keys()):
......@@ -337,26 +350,28 @@ def create_app(init_flag=False, configs=None):
file_name = req_id + "_" + item.filename
item.save(file_name)
file_name_list.append(file_name)
module = default_module_manager.get_module(module_name)
predict_func_name = cv_module_method.get(module_name, "")
# module = default_module_manager.get_module(module_name)
# predict_func_name = cv_module_info.get_module_info(module_name)[
# "method_name"]
module_info = cv_module_info.get_module_info(module_name)
module = module_info["module"]
predict_func_name = cv_module_info.cv_module_method.get(module_name, "")
if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
batch_size = batch_size_dict.get(module_name, 1)
if file_name_list == []:
file_name_list = None
if extra_info == {}:
extra_info = None
results = predict_func(module, file_name_list, req_id, batch_size,
extra_info)
results = predict_func(module_info, file_name_list, req_id, extra_info)
r = {"results": str(results)}
return r
@app_instance.route("/predict/text/<module_name>", methods=["POST"])
def predict_text(module_name):
if request.path.split("/")[-1] not in nlp_module:
if request.path.split("/")[-1] not in nlp_module_info.nlp_modules:
return {"error": "Module {} is not available.".format(module_name)}
req_id = request.data.get("id")
inputs = {}
......@@ -369,37 +384,31 @@ def create_app(init_flag=False, configs=None):
file_name = req_id + "_" + file.filename
files[file_key].append(file_name)
file.save(file_name)
module = default_module_manager.get_module(module_name)
results = predict_nlp(
module=module,
input_text=inputs,
req_id=req_id,
batch_size=batch_size_dict.get(module_name, 1),
extra=files)
module_info = nlp_module_info.get_module_info(module_name)
if module_info["code_version"] == "v2":
results = predict_v2(module_info, inputs)
else:
results = predict_nlp(
module_info=module_info,
input_text=inputs,
req_id=req_id,
extra=files)
return results
return app_instance
def config_with_file(configs):
global nlp_module, cv_module, batch_size_dict
nlp_module = []
cv_module = []
batch_size_dict = {}
for item in configs:
print(item)
if item["category"] == "CV":
cv_module.append(item["module"])
elif item["category"] == "NLP":
nlp_module.append(item["module"])
batch_size_dict.update({item["module"]: item["batch_size"]})
default_module_manager.load_module([item["module"]])
def run(is_use_gpu=False, configs=None, port=8866, timeout=60):
global use_gpu, time_out
time_out = timeout
use_gpu = is_use_gpu
for key, value in configs.items():
if "CV" == value["category"]:
cv_module_info.add_module(key, {key: value})
elif "NLP" == value["category"]:
nlp_module_info.add_module(key, {key: value})
print(key, "==", value["version"])
def run(configs=None, port=8866):
if configs is not None:
config_with_file(configs)
else:
......
......@@ -16,6 +16,92 @@ import six
import abc
class BaseModuleInfo(object):
def __init__(self):
self._modules_info = {}
self._modules = []
def set_modules_info(self, modules_info):
# dict of modules info.
self._modules_info = modules_info
# list of modules name.
self._modules = list(self._modules_info.keys())
def get_module_info(self, module_name):
return self._modules_info[module_name]
def add_module(self, module_name, module_info):
self._modules_info.update(module_info)
self._modules.append(module_name)
def get_module(self, module_name):
return self.get_module_info(module_name).get("module", None)
@property
def modules_info(self):
return self._modules_info
class CVModuleInfo(BaseModuleInfo):
def __init__(self):
self.cv_module_method = {
"vgg19_imagenet": "predict_classification",
"vgg16_imagenet": "predict_classification",
"vgg13_imagenet": "predict_classification",
"vgg11_imagenet": "predict_classification",
"shufflenet_v2_imagenet": "predict_classification",
"se_resnext50_32x4d_imagenet": "predict_classification",
"se_resnext101_32x4d_imagenet": "predict_classification",
"resnet_v2_50_imagenet": "predict_classification",
"resnet_v2_34_imagenet": "predict_classification",
"resnet_v2_18_imagenet": "predict_classification",
"resnet_v2_152_imagenet": "predict_classification",
"resnet_v2_101_imagenet": "predict_classification",
"pnasnet_imagenet": "predict_classification",
"nasnet_imagenet": "predict_classification",
"mobilenet_v2_imagenet": "predict_classification",
"googlenet_imagenet": "predict_classification",
"alexnet_imagenet": "predict_classification",
"yolov3_coco2017": "predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_640":
"predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_320":
"predict_object_detection",
"ssd_mobilenet_v1_pascal": "predict_object_detection",
"pyramidbox_face_detection": "predict_object_detection",
"faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
"ace2p": "predict_semantic_segmentation",
"pyramidbox_lite_server_mask": "predict_mask",
"pyramidbox_lite_mobile_mask": "predict_mask"
}
super(CVModuleInfo, self).__init__()
@property
def cv_modules(self):
return self._modules
def add_module(self, module_name, module_info):
if "CV" == module_info[module_name].get("category", ""):
self._modules_info.update(module_info)
self._modules.append(module_name)
class NLPModuleInfo(BaseModuleInfo):
def __init__(self):
super(NLPModuleInfo, self).__init__()
@property
def nlp_modules(self):
return self._modules
def add_module(self, module_name, module_info):
if "NLP" == module_info[module_name].get("category", ""):
self._modules_info.update(module_info)
self._modules.append(module_name)
class BaseModelService(object):
def _initialize(self):
pass
......@@ -31,3 +117,7 @@ class BaseModelService(object):
@abc.abstractmethod
def _post_processing(self, data):
pass
cv_module_info = CVModuleInfo()
nlp_module_info = NLPModuleInfo()
{
"modules_info": [
{
"module": "lac",
"version": "1.0.0",
"batch_size": 200
"modules_info": {
"yolov3_darknet53_coco2017": {
"init_args": {
"version": "1.0.0"
},
"predict_args": {
"batch_size": 1,
"use_gpu": false
}
},
"lac-v2": {
"init_args": {
"version": "2.1.0",
"user_dict": "./dict.txt"
},
"predict_args": {
"batch_size": 1,
"use_gpu": false
}
}
},
{
"module": "senta_lstm",
"version": "1.0.0",
"batch_size": 1
},
{
"module": "yolov3_darknet53_coco2017",
"version": "1.0.0",
"batch_size": 1
},
{
"module": "faster_rcnn_coco2017",
"version": "1.0.0",
"batch_size": 1
}
],
"use_gpu": false,
"port": 8866,
"use_multiprocess": true,
"workers": 3
"use_multiprocess": false,
"workers": 2
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册