提交 3ed9bd13 编写于 作者: Z zhangxuefei

Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleHub into develop

......@@ -18,13 +18,12 @@ from __future__ import division
from __future__ import print_function
import argparse
import subprocess
import shlex
import os
import platform
import socket
import json
import paddlehub as hub
from paddlehub.commands.base_command import BaseCommand, ENTRY
from paddlehub.serving import app
class ServingCommand(BaseCommand):
......@@ -41,33 +40,56 @@ class ServingCommand(BaseCommand):
usage='%(prog)s',
add_help=True)
self.parser.add_argument("command")
self.parser.add_argument("sub_command")
self.sub_parse = self.parser.add_mutually_exclusive_group(
required=False)
self.sub_parse.add_argument("--start", action="store_true")
self.parser.add_argument(
"--use_gpu", action="store_true", default=False)
self.parser.add_argument(
"--use_multiprocess", action="store_true", default=False)
self.parser.add_argument("--modules", "-m", nargs="+")
self.parser.add_argument("--config", "-c", nargs="+")
self.parser.add_argument("--port", "-p", nargs="+", default=[8888])
self.parser.add_argument("--port", "-p", nargs="+", default=[8866])
@staticmethod
def is_port_occupied(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
@staticmethod
def preinstall_modules(modules):
configs = []
module_exist = {}
if modules is not None:
for module in modules:
module_name = module if "==" not in module else \
module.split("==")[0]
module_version = None if "==" not in module else \
module.split("==")[1]
if module_exist.get(module_name, "") != "":
print(module_name, "==", module_exist.get(module_name),
" will be ignored cause new version is specified.")
configs.pop()
module_exist.update({module_name: module_version})
try:
m = hub.Module(name=module_name, version=module_version)
method_name = m.desc.attr.map.data['default_signature'].s
if method_name == "":
raise RuntimeError("{} cannot be use for "
"predicting".format(module_name))
configs.append({
"module": module_name,
"version": m.version,
"category": str(m.type).split("/")[0].upper()
})
except Exception as err:
pass
print(err, ", start Hub-Serving unsuccessfully.")
exit(1)
return configs
@staticmethod
......@@ -78,8 +100,24 @@ class ServingCommand(BaseCommand):
if os.path.exists(config_file):
with open(config_file, "r") as fp:
configs = json.load(fp)
use_multiprocess = configs.get("use_multiprocess", False)
if use_multiprocess is True:
if platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode"
)
from paddlehub.serving import app_single as app
else:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
use_gpu = configs.get("use_gpu", False)
port = configs.get("port", 8888)
port = configs.get("port", 8866)
if ServingCommand.is_port_occupied("127.0.0.1",
port) is True:
print("Port %s is occupied, please change it." % (port))
return False
configs = configs.get("modules_info")
module = [
str(i["module"]) + "==" + str(i["version"])
......@@ -92,10 +130,23 @@ class ServingCommand(BaseCommand):
else:
print("config_file ", config_file, "not exists.")
else:
if args.use_multiprocess is True:
if platform.system() == "Windows":
print(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode")
from paddlehub.serving import app_single as app
else:
from paddlehub.serving import app
else:
from paddlehub.serving import app_single as app
module = args.modules
if module is not None:
use_gpu = args.use_gpu
port = args.port[0]
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
print("Port %s is occupied, please change it." % (port))
return False
module_info = ServingCommand.preinstall_modules(module)
[
item.update({
......@@ -111,9 +162,10 @@ class ServingCommand(BaseCommand):
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub-Serving.\n"
str += "option:\n"
str += "--start\n"
str += "sub command:\n"
str += "start\n"
str += "\tStart PaddleHub-Serving if specifies this parameter.\n"
str += "option:\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via this parameter list.\n"
str += "--port/-p XXXX\n"
......@@ -126,8 +178,13 @@ class ServingCommand(BaseCommand):
print(str)
def execute(self, argv):
try:
args = self.parser.parse_args()
if args.start is True:
except:
print("Please refer to the instructions below.")
ServingCommand.show_help()
return False
if args.sub_command == "start":
ServingCommand.start_serving(args)
else:
ServingCommand.show_help()
......
......@@ -155,7 +155,8 @@ class Module(object):
module_name=name, module_version=version, extra=extra)
if not result:
logger.error(tips)
exit(1)
raise RuntimeError(tips)
else:
logger.info(tips)
self._init_with_module_file(module_dir[0])
......@@ -165,7 +166,8 @@ class Module(object):
url, save_path=".")
if not result:
logger.error(tips)
exit(1)
raise RuntimeError(tips)
else:
self._init_with_module_file(module_dir)
def _dump_processor(self):
......
......@@ -185,24 +185,51 @@ def create_app():
@app_instance.before_request
def before_request():
request.data = {"id": str(time.time())}
request.data = {"id": utils.md5(request.remote_addr + str(time.time()))}
print(request.remote_addr)
pass
@app_instance.route("/get/modules", methods=["GET", "POST"])
def get_modules_info():
global nlp_module, cv_module
module_info = {}
if len(nlp_module) > 0:
module_info.update({"nlp_module": [{"Choose...": "Choose..."}]})
for item in nlp_module:
module_info["nlp_module"].append({item: item})
if len(cv_module) > 0:
module_info.update({"cv_module": [{"Choose...": "Choose..."}]})
for item in cv_module:
module_info["cv_module"].append({item: item})
module_info.update({"Choose...": [{"请先选择分类": "Choose..."}]})
return {"module_info": module_info}
@app_instance.route("/predict/image/<module_name>", methods=["POST"])
def predict_iamge(module_name):
global results_dict
req_id = request.data.get("id")
img_base64 = request.form.get("input_img", "")
received_file_name = request.form.get("input_file", "")
ext = received_file_name.split(".")[-1]
if ext == "":
img_base64 = request.form.get("image", "")
if img_base64 != "":
img_base64 = request.form.get("image", "")
ext = img_base64.split(";")[0].split("/")[-1]
if ext not in ["jpeg", "jpg", "png"]:
return {"result": "Unrecognized file type"}
score = time.time()
filename = utils.md5(str(time.time()) + str(img_base64)) + "." + ext
base64_head = img_base64.split(',')[0]
img_data = base64.b64decode(img_base64.split(',')[-1])
with open(filename, "wb") as fp:
fp.write(img_data)
else:
file = request.files["image"]
filename = file.filename
ext = file.filename.split(".")[-1]
if ext not in ["jpeg", "jpg", "png"]:
return {"result": "Unrecognized file type"}
base64_head = "data:image/" + ext + ";base64"
filename = utils.md5(filename) + '.' + ext
file.save(filename)
score = time.time()
file_list = [filename]
if queues_dict[module_name].qsize(
) + 1 > queues_dict[module_name].get_attribute("maxsize"):
......@@ -211,9 +238,14 @@ def create_app():
data_num = len(file_list)
results = []
result_len = 0
start_time = time.time()
while result_len != data_num:
result_len = len(results_dict.get(req_id, []))
if time.time() - start_time > time_out:
results_dict.pop(req_id, None)
return {"result": "Request time out."}
results = results_dict.get(req_id)
results_dict.pop(req_id, None)
results = [i[1] for i in sorted(results, key=lambda k: k[0])]
filename = results[0].get("path")
ext = filename.split(".")[-1]
......@@ -225,7 +257,7 @@ def create_app():
os.remove(filename)
os.remove(output_file)
results = {
"border":
"desc":
str(results[0]["data"]),
"output_img":
base64_head + "," + str(output_img_base64).replace(
......@@ -244,7 +276,7 @@ def create_app():
def predict_text(module_name):
global results_dict, queues_dict
req_id = request.data.get("id")
data_list = request.form.get("input_text")
data_list = request.form.get("text")
score = time.time()
data_list = data_list.splitlines()
data_temp = []
......@@ -261,14 +293,17 @@ def create_app():
if data_num + queues_dict[module_name].qsize(
) > queues_dict[module_name].get_attribute("maxsize"):
return {"result": "Too many visitors now, please come back later."}
start = time.time()
data_2_item(data_list, req_id, score, module_name)
results = []
result_len = 0
start_time = time.time()
while result_len != data_num:
result_len = len(results_dict.get(req_id, []))
if time.time() - start_time > time_out:
results_dict.pop(req_id, None)
return {"result": "Request time out."}
results = results_dict.get(req_id)
results_dict.pop(req_id, None)
results = [i[1] for i in sorted(results, key=lambda k: k[0])]
return {"result": results}
......@@ -302,8 +337,9 @@ def config_with_file(configs):
queue_name_list.append(item["module"])
def run(is_use_gpu=False, configs=None, port=8888):
global use_gpu
def run(is_use_gpu=False, configs=None, port=8866, timeout=60):
global use_gpu, time_out
time_out = timeout
use_gpu = is_use_gpu
if configs is not None:
config_with_file(configs)
......
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from flask import Flask, request, render_template
from paddlehub.serving.model_service.text_model_service import TextModelService
from paddlehub.serving.model_service.image_model_service import ImageModelService
from paddlehub.common import utils
# from model_service.text_model_service import TextModelService
# from model_service.image_model_service import ImageModelService
import time
import os
import base64
import logging
nlp_module_method = {
"lac": "predict_lexical_analysis",
"simnet_bow": "predict_sentiment_analysis",
"lm_lstm": "predict_pretrained_model",
"senta_lstm": "predict_pretrained_model",
"senta_gru": "predict_pretrained_model",
"senta_cnn": "predict_pretrained_model",
"senta_bow": "predict_pretrained_model",
"senta_bilstm": "predict_pretrained_model",
"emotion_detection_textcnn": "predict_pretrained_model"
}
cv_module_method = {
"vgg19_imagenet": "predict_classification",
"vgg16_imagenet": "predict_classification",
"vgg13_imagenet": "predict_classification",
"vgg11_imagenet": "predict_classification",
"shufflenet_v2_imagenet": "predict_classification",
"se_resnext50_32x4d_imagenet": "predict_classification",
"se_resnext101_32x4d_imagenet": "predict_classification",
"resnet_v2_50_imagenet": "predict_classification",
"resnet_v2_34_imagenet": "predict_classification",
"resnet_v2_18_imagenet": "predict_classification",
"resnet_v2_152_imagenet": "predict_classification",
"resnet_v2_101_imagenet": "predict_classification",
"pnasnet_imagenet": "predict_classification",
"nasnet_imagenet": "predict_classification",
"mobilenet_v2_imagenet": "predict_classification",
"googlenet_imagenet": "predict_classification",
"alexnet_imagenet": "predict_classification",
"yolov3_coco2017": "predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_640":
"predict_object_detection",
"ultra_light_fast_generic_face_detector_1mb_320":
"predict_object_detection",
"ssd_mobilenet_v1_pascal": "predict_object_detection",
"pyramidbox_face_detection": "predict_object_detection",
"faster_rcnn_coco2017": "predict_object_detection",
"cyclegan_cityscapes": "predict_gan",
"deeplabv3p_xception65_humanseg": "predict_semantic_segmentation",
"ace2p": "predict_semantic_segmentation"
}
def predict_sentiment_analysis(module, input_text, batch_size, extra=None):
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
data = input_text[0]
data.update(input_text[1])
results = predict_method(
data=data, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
return results
def predict_pretrained_model(module, input_text, batch_size, extra=None):
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
data = {"text": input_text}
results = predict_method(
data=data, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
return results
def predict_lexical_analysis(module, input_text, batch_size, extra=[]):
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
data = {"text": input_text}
try:
if extra == []:
results = predict_method(
data=data, use_gpu=use_gpu, batch_size=batch_size)
else:
user_dict = extra[0]
results = predict_method(
data=data,
user_dict=user_dict,
use_gpu=use_gpu,
batch_size=batch_size)
for path in extra:
os.remove(path)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
return results
def predict_classification(module, input_img, batch_size):
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
return results
def predict_gan(module, input_img, id, batch_size, extra={}):
# special
output_folder = module.name.split("_")[0] + "_" + "output"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
base64_list = []
results_pack = []
input_img = input_img.get("image", [])
for index in range(len(input_img)):
# special
item = input_img[index]
with open(os.path.join(output_folder, item), "rb") as fp:
# special
b_head = "data:image/" + item.split(".")[-1] + ";base64"
b_body = base64.b64encode(fp.read())
b_body = str(b_body).replace("b'", "").replace("'", "")
b_img = b_head + "," + b_body
base64_list.append(b_img)
results[index] = results[index].replace(id + "_", "")
results[index] = {"path": results[index]}
results[index].update({"base64": b_img})
results_pack.append(results[index])
os.remove(item)
os.remove(os.path.join(output_folder, item))
return results_pack
def predict_object_detection(module, input_img, id, batch_size):
output_folder = "output"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
base64_list = []
results_pack = []
input_img = input_img.get("image", [])
for index in range(len(input_img)):
item = input_img[index]
with open(os.path.join(output_folder, item), "rb") as fp:
b_head = "data:image/" + item.split(".")[-1] + ";base64"
b_body = base64.b64encode(fp.read())
b_body = str(b_body).replace("b'", "").replace("'", "")
b_img = b_head + "," + b_body
base64_list.append(b_img)
results[index]["path"] = results[index]["path"].replace(
id + "_", "")
results[index].update({"base64": b_img})
results_pack.append(results[index])
os.remove(item)
os.remove(os.path.join(output_folder, item))
return results_pack
def predict_semantic_segmentation(module, input_img, id, batch_size):
# special
output_folder = module.name.split("_")[-1] + "_" + "output"
global use_gpu
method_name = module.desc.attr.map.data['default_signature'].s
predict_method = getattr(module, method_name)
try:
input_img = {"image": input_img}
results = predict_method(
data=input_img, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
curr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(curr, " - ", err)
return {"result": "Please check data format!"}
base64_list = []
results_pack = []
input_img = input_img.get("image", [])
for index in range(len(input_img)):
# special
item = input_img[index]
with open(results[index]["processed"], "rb") as fp:
# special
b_head = "data:image/png;base64"
b_body = base64.b64encode(fp.read())
b_body = str(b_body).replace("b'", "").replace("'", "")
b_img = b_head + "," + b_body
base64_list.append(b_img)
results[index]["origin"] = results[index]["origin"].replace(
id + "_", "")
results[index]["processed"] = results[index]["processed"].replace(
id + "_", "")
results[index].update({"base64": b_img})
results_pack.append(results[index])
os.remove(item)
os.remove(results[index]["processed"])
return results_pack
def create_app():
app_instance = Flask(__name__)
app_instance.config["JSON_AS_ASCII"] = False
gunicorn_logger = logging.getLogger('gunicorn.error')
app_instance.logger.handlers = gunicorn_logger.handlers
app_instance.logger.setLevel(gunicorn_logger.level)
@app_instance.route("/", methods=["GET", "POST"])
def index():
return render_template("main.html")
@app_instance.before_request
def before_request():
request.data = {"id": utils.md5(request.remote_addr + str(time.time()))}
pass
@app_instance.route("/get/modules", methods=["GET", "POST"])
def get_modules_info():
global nlp_module, cv_module
module_info = {}
if len(nlp_module) > 0:
module_info.update({"nlp_module": [{"Choose...": "Choose..."}]})
for item in nlp_module:
module_info["nlp_module"].append({item: item})
if len(cv_module) > 0:
module_info.update({"cv_module": [{"Choose...": "Choose..."}]})
for item in cv_module:
module_info["cv_module"].append({item: item})
module_info.update({"Choose...": [{"请先选择分类": "Choose..."}]})
return {"module_info": module_info}
@app_instance.route("/predict/image/<module_name>", methods=["POST"])
def predict_image(module_name):
req_id = request.data.get("id")
global use_gpu, batch_size_dict
img_base64 = request.form.getlist("image")
file_name_list = []
if img_base64 != []:
for item in img_base64:
ext = item.split(";")[0].split("/")[-1]
if ext not in ["jpeg", "jpg", "png"]:
return {"result": "Unrecognized file type"}
filename = req_id + "_" \
+ utils.md5(str(time.time())+item[0:20]) \
+ "." \
+ ext
img_data = base64.b64decode(item.split(',')[-1])
file_name_list.append(filename)
with open(filename, "wb") as fp:
fp.write(img_data)
else:
file = request.files.getlist("image")
for item in file:
file_name = req_id + "_" + item.filename
item.save(file_name)
file_name_list.append(file_name)
module = ImageModelService.get_module(module_name)
predict_func_name = cv_module_method.get(module_name, "")
if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
batch_size = batch_size_dict.get(module_name, 1)
results = predict_func(module, file_name_list, req_id, batch_size)
r = {"results": str(results)}
return r
@app_instance.route("/predict/text/<module_name>", methods=["POST"])
def predict_text(module_name):
req_id = request.data.get("id")
global use_gpu
if module_name == "simnet_bow":
text_1 = request.form.getlist("text_1")
text_2 = request.form.getlist("text_2")
data = [{"text_1": text_1}, {"text_2": text_2}]
else:
data = request.form.getlist("text")
file = request.files.getlist("user_dict")
module = TextModelService.get_module(module_name)
predict_func_name = nlp_module_method.get(module_name, "")
if predict_func_name != "":
predict_func = eval(predict_func_name)
else:
module_type = module.type.split("/")[-1].replace("-", "_").lower()
predict_func = eval("predict_" + module_type)
file_list = []
for item in file:
file_path = req_id + "_" + item.filename
file_list.append(file_path)
item.save(file_path)
batch_size = batch_size_dict.get(module_name, 1)
results = predict_func(module, data, batch_size, file_list)
return {"results": results}
return app_instance
def config_with_file(configs):
global nlp_module, cv_module, batch_size_dict
nlp_module = []
cv_module = []
batch_size_dict = {}
for item in configs:
print(item)
if item["category"] == "CV":
cv_module.append(item["module"])
elif item["category"] == "NLP":
nlp_module.append(item["module"])
batch_size_dict.update({item["module"]: item["batch_size"]})
def run(is_use_gpu=False, configs=None, port=8866, timeout=60):
global use_gpu, time_out
time_out = timeout
use_gpu = is_use_gpu
if configs is not None:
config_with_file(configs)
else:
print("Start failed cause of missing configuration.")
return
my_app = create_app()
my_app.run(host="0.0.0.0", port=port, debug=False)
print("PaddleHub-Serving has been stopped.")
if __name__ == "__main__":
configs = [{
'category': 'NLP',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'lac',
u'batch_size': 20
},
{
'category': 'NLP',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'senta_lstm',
u'batch_size': 20
},
{
'category': 'CV',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'yolov3_coco2017',
u'batch_size': 20
},
{
'category': 'CV',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'faster_rcnn_coco2017',
u'batch_size': 20
}]
run(is_use_gpu=False, configs=configs)
......@@ -16,7 +16,8 @@
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
<title>Hub-Serving</title>
<link rel="shortcut icon" href="https://paddlepaddle-org-cn.bj.bcebos.com/paddle-site-front/favicon.ico"/>
<link href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.4.1.min.js" integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"></script>
......@@ -35,8 +36,6 @@
<select class="custom-select" id="inputGroupSelect01"
onchange="select_category(this.options[this.options.selectedIndex].value)">
<option selected>Choose...</option>
<option value="nlp_module">nlp</option>
<option value="cv_module">cv</option>
</select>
</td>
<td style="width: 6%"></td>
......@@ -61,12 +60,31 @@
</div>
</form>
<script>
module_info = {
"nlp_module":[{"Choose...":"Choose..."},{"lac":"lac"},
{"senta_lstm":"senta_lstm"}],
"cv_module":[{"Choose...":"Choose..."},{"yolov3":"yolov3_coco2017"},{"faster_rcnn":"faster_rcnn_coco2017"}],
"Choose...":[{"请先选择分类":"Choose..."}]
};
var module_info = {};
$.ajax({
type: "POST",
url: "/get/modules",
data: "",
dataType: "json",
async: false,
success: function (res) {
module_info = res.module_info;
console.log(res);
console.log("mo=", module_info);
if (module_info.hasOwnProperty("nlp_module"))
{
s = document.getElementById("inputGroupSelect01");
s.options.add(new Option("nlp", "nlp_module"));
}
if (module_info.hasOwnProperty("cv_module"))
{
s = document.getElementById("inputGroupSelect01");
s.options.add(new Option("cv", "cv_module"));
}
}
});
function get_module_option(module_categoty) {
options = module_info[module_categoty];
html = "";
......@@ -241,7 +259,7 @@
},
success: function(data) {
data = data["result"];
document.getElementById("result_text").value = data["border"];
document.getElementById("result_text").value = data["desc"];
document.getElementById("result_img").src = data["output_img"];
}
});
......
......@@ -26,5 +26,6 @@
}
],
"use_gpu": false,
"port": 8888
"port": 8866,
"use_multiprocess": false
}
......@@ -59,6 +59,11 @@ setup(
]
},
include_package_data=True,
data_files=[('paddlehub/serving/templates', [
'paddlehub/serving/templates/serving_config.json',
'paddlehub/serving/templates/main.html'
])],
include_data_files=True,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册