提交 1fda45dd 编写于 作者: 走神的阿圆's avatar 走神的阿圆 提交者: wuzewu

Serving (#195)

* add hub-serving
上级 d5a4a7f5
......@@ -26,3 +26,4 @@ from . import clear
from . import config
from . import hub
from . import autofinetune
from . import serving
# coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import subprocess
import shlex
import os
import json
import paddlehub as hub
from paddlehub.commands.base_command import BaseCommand, ENTRY
from paddlehub.serving import app
class ServingCommand(BaseCommand):
name = "serving"
module_list = []
def __init__(self, name):
super(ServingCommand, self).__init__(name)
self.show_in_help = True
self.description = "Start a service for online predicting by using PaddleHub."
self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__,
prog='%s %s [COMMAND]' % (ENTRY, name),
usage='%(prog)s',
add_help=True)
self.parser.add_argument("command")
self.sub_parse = self.parser.add_mutually_exclusive_group(
required=False)
self.sub_parse.add_argument("--start", action="store_true")
self.parser.add_argument(
"--use_gpu", action="store_true", default=False)
self.parser.add_argument("--modules", "-m", nargs="+")
self.parser.add_argument("--config", "-c", nargs="+")
self.parser.add_argument("--port", "-p", nargs="+", default=[8888])
@staticmethod
def preinstall_modules(modules):
configs = []
if modules is not None:
for module in modules:
module_name = module if "==" not in module else \
module.split("==")[0]
module_version = None if "==" not in module else \
module.split("==")[1]
try:
m = hub.Module(name=module_name, version=module_version)
configs.append({
"module": module_name,
"version": m.version,
"category": str(m.type).split("/")[0].upper()
})
except Exception as err:
pass
return configs
@staticmethod
def start_serving(args):
config_file = args.config
if config_file is not None:
config_file = config_file[0]
if os.path.exists(config_file):
with open(config_file, "r") as fp:
configs = json.load(fp)
use_gpu = configs.get("use_gpu", False)
port = configs.get("port", 8888)
configs = configs.get("modules_info")
module = [
str(i["module"]) + "==" + str(i["version"])
for i in configs
]
module_info = ServingCommand.preinstall_modules(module)
for index in range(len(module_info)):
configs[index].update(module_info[index])
app.run(use_gpu, configs=configs, port=port)
else:
print("config_file ", config_file, "not exists.")
else:
module = args.modules
if module is not None:
use_gpu = args.use_gpu
port = args.port[0]
module_info = ServingCommand.preinstall_modules(module)
[
item.update({
"batch_size": 1,
"queue_size": 20
}) for item in module_info
]
app.run(use_gpu, configs=module_info, port=port)
else:
print("Lack of necessary parameters!")
@staticmethod
def show_help():
str = "serving <option>\n"
str += "\tManage PaddleHub-Serving.\n"
str += "option:\n"
str += "--start\n"
str += "\tStart PaddleHub-Serving if specifies this parameter.\n"
str += "--modules/-m [module1==version, module2==version...]\n"
str += "\tPre-install modules via this parameter list.\n"
str += "--port/-p XXXX\n"
str += "\tUse port XXXX for serving.\n"
str += "--use_gpu\n"
str += "\tUse gpu for predicting if specifies this parameter.\n"
str += "--config/-c file_path\n"
str += "\tUse configs in file to starting paddlehub serving."
str += "Other parameter will be ignored if specifies this parameter.\n"
print(str)
def execute(self, argv):
args = self.parser.parse_args()
if args.start is True:
ServingCommand.start_serving(args)
else:
ServingCommand.show_help()
command = ServingCommand.instance()
# coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, request, render_template
from paddlehub.serving.model_service.text_model_service import TextModelService
from paddlehub.serving.model_service.image_model_service import ImageModelService
from paddlehub.common import utils
import time
import os
import base64
import logging
import cv2
import multiprocessing as mp
from multiprocessing.managers import BaseManager
import random
import six
if six.PY2:
from Queue import PriorityQueue
if six.PY3:
from queue import PriorityQueue
class MyPriorityQueue(PriorityQueue):
def get_attribute(self, name):
return getattr(self, name)
class Manager(BaseManager):
pass
Manager.register("get_priorityQueue", MyPriorityQueue)
def choose_module_category(input_data, module_name, batch_size=1):
global nlp_module, cv_module
if module_name in nlp_module:
predict_nlp(input_data, module_name, batch_size)
elif module_name in cv_module:
predict_cv(input_data, module_name, batch_size)
def predict_nlp(input_data, module_name, batch_size=1):
global use_gpu
real_input_data = []
for index in range(len(input_data)):
real_input_data.append(input_data[index][3])
module = TextModelService.get_module(module_name)
method_name = module.desc.attr.map.data['default_signature'].s
if method_name != "":
predict_method = getattr(module, method_name)
try:
real_input_data = {"text": real_input_data}
results = predict_method(
data=real_input_data, use_gpu=use_gpu, batch_size=batch_size)
except Exception as err:
return {"result": "Please check data format!"}
else:
results = "Module {} can't be use for predicting.".format(module_name)
try:
result_data = []
for index in range(len(input_data)):
result_data.append(list(input_data[index]))
result_data[-1][3] = results[index]
except Exception as err:
print("Transform error!")
for index in range(len(result_data)):
if results_dict.get(result_data[index][2]) is None:
results_dict[result_data[index][2]] = [[
result_data[index][1], result_data[index][3]
]]
else:
temp_list = results_dict[result_data[index][2]]
temp_list.append([result_data[index][1], result_data[index][3]])
results_dict[result_data[index][2]] = temp_list
return {"result": results_dict}
def predict_cv(input_data, module_name, batch_size=1):
global use_gpu
filename_list = []
for index in range(len(input_data)):
filename_list.append(input_data[index][3])
cv2.imread(input_data[index][3])
input_images = {"image": filename_list}
module = ImageModelService.get_module(module_name)
method_name = module.desc.attr.map.data['default_signature'].s
if method_name != "":
predict_method = getattr(module, method_name)
try:
results = predict_method(
data={"image": filename_list},
use_gpu=use_gpu,
batch_size=batch_size)
except Exception as err:
return {"result": "Please check data format!"}
else:
results = "Module {} can't be use for predicting.".format(module_name)
try:
result_data = []
for index in range(len(input_data)):
result_data.append(list(input_data[index]))
result_data[-1][3] = results[index]
except Exception as err:
print("Transform error!")
for index in range(len(result_data)):
if results_dict.get(result_data[index][2]) is None:
results_dict[result_data[index][2]] = [[
result_data[index][1], result_data[index][3]
]]
else:
temp_list = results_dict[result_data[index][2]]
temp_list.append([result_data[index][1], result_data[index][3]])
results_dict[result_data[index][2]] = temp_list
return {"result": results}
def worker():
global batch_size_list, name_list, queue_name_list, cv_module
latest_num = random.randrange(0, len(queue_name_list))
while True:
time.sleep(0.01)
for index in range(len(queue_name_list)):
while queues_dict[queue_name_list[latest_num]].empty() is not True:
input_data = []
lock.acquire()
try:
batch = queues_dict[
queue_name_list[latest_num]].get_attribute("maxsize")
for index2 in range(batch):
if queues_dict[
queue_name_list[latest_num]].empty() is True:
break
input_data.append(
queues_dict[queue_name_list[latest_num]].get())
finally:
lock.release()
if len(input_data) != 0:
choose_module_category(input_data,
queue_name_list[latest_num],
batch_size_list[latest_num])
else:
pass
latest_num = (latest_num + 1) % len(queue_name_list)
def init_pool(l):
global lock
lock = l
def create_app():
app_instance = Flask(__name__)
app_instance.config["JSON_AS_ASCII"] = False
gunicorn_logger = logging.getLogger('gunicorn.error')
app_instance.logger.handlers = gunicorn_logger.handlers
app_instance.logger.setLevel(gunicorn_logger.level)
global queues_dict
lock = mp.Lock()
pool = mp.Pool(
processes=(mp.cpu_count() - 1),
initializer=init_pool,
initargs=(lock, ))
for i in range(mp.cpu_count() - 1):
pool.apply_async(worker)
@app_instance.route("/", methods=["GET", "POST"])
def index():
return render_template("main.html")
@app_instance.before_request
def before_request():
request.data = {"id": str(time.time())}
pass
@app_instance.route("/predict/image/<module_name>", methods=["POST"])
def predict_iamge(module_name):
global results_dict
req_id = request.data.get("id")
img_base64 = request.form.get("input_img", "")
received_file_name = request.form.get("input_file", "")
ext = received_file_name.split(".")[-1]
if ext == "":
return {"result": "Unrecognized file type"}
score = time.time()
filename = utils.md5(str(time.time()) + str(img_base64)) + "." + ext
base64_head = img_base64.split(',')[0]
img_data = base64.b64decode(img_base64.split(',')[-1])
with open(filename, "wb") as fp:
fp.write(img_data)
file_list = [filename]
if queues_dict[module_name].qsize(
) + 1 > queues_dict[module_name].get_attribute("maxsize"):
return {"result": "Too many visitors now, please come back later."}
data_2_item(file_list, req_id, score, module_name)
data_num = len(file_list)
results = []
result_len = 0
while result_len != data_num:
result_len = len(results_dict.get(req_id, []))
results = results_dict.get(req_id)
results = [i[1] for i in sorted(results, key=lambda k: k[0])]
filename = results[0].get("path")
ext = filename.split(".")[-1]
if filename is not None:
output_file = os.path.join("./output", filename)
if output_file is not None and os.path.exists(output_file):
with open(output_file, "rb") as fp:
output_img_base64 = base64.b64encode(fp.read())
os.remove(filename)
os.remove(output_file)
results = {
"border":
str(results[0]["data"]),
"output_img":
base64_head + "," + str(output_img_base64).replace(
"b'", "").replace("'", "")
}
return {"result": results}
return {"result": str(results)}
def data_2_item(data_list, req_id, score, module_name):
global queues_dict
for index in range(len(data_list)):
queues_dict[module_name].put((score, index, req_id,
data_list[index]))
@app_instance.route("/predict/text/<module_name>", methods=["POST"])
def predict_text(module_name):
global results_dict, queues_dict
req_id = request.data.get("id")
data_list = request.form.get("input_text")
score = time.time()
data_list = data_list.splitlines()
data_temp = []
for index in range(len(data_list)):
data_list[index] = data_list[index].strip()
if data_list[index] != "":
data_temp.append(data_list[index])
data_list = data_temp
if not isinstance(data_list, list):
data_list = [data_list]
data_num = len(data_list)
if data_num > queues_dict[module_name].get_attribute("maxsize"):
return {"result": ["Too much data, please reduce the data."]}
if data_num + queues_dict[module_name].qsize(
) > queues_dict[module_name].get_attribute("maxsize"):
return {"result": "Too many visitors now, please come back later."}
start = time.time()
data_2_item(data_list, req_id, score, module_name)
results = []
result_len = 0
while result_len != data_num:
result_len = len(results_dict.get(req_id, []))
results = results_dict.get(req_id)
results = [i[1] for i in sorted(results, key=lambda k: k[0])]
return {"result": results}
return app_instance
def config_with_file(configs):
global m
global nlp_module, cv_module, queues_list, batch_size_list, name_list, \
queues_dict, queue_name_list, results_dict
m = Manager()
m.start()
nlp_module = []
cv_module = []
queues_list = []
batch_size_list = []
name_list = []
queues_dict = {}
queue_name_list = []
results_dict = mp.Manager().dict()
for item in configs:
print(item)
if item["category"] == "CV":
cv_module.append(item["module"])
elif item["category"] == "NLP":
nlp_module.append(item["module"])
queues_list.append(m.get_priorityQueue(maxsize=item["queue_size"]))
batch_size_list.append(item["batch_size"])
name_list.append(item["module"])
queues_dict.update({item["module"]: queues_list[-1]})
queue_name_list.append(item["module"])
def run(is_use_gpu=False, configs=None, port=8888):
global use_gpu
use_gpu = is_use_gpu
if configs is not None:
config_with_file(configs)
else:
print("Start failed cause of missing configuration.")
return
my_app = create_app()
my_app.run(host="0.0.0.0", port=port, debug=False)
if __name__ == "__main__":
configs = [{
'category': 'NLP',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'lac',
u'batch_size': 20
},
{
'category': 'NLP',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'senta_lstm',
u'batch_size': 20
},
{
'category': 'CV',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'yolov3_coco2017',
u'batch_size': 20
},
{
'category': 'CV',
u'queue_size': 20,
u'version': u'1.0.0',
u'module': 'faster_rcnn_coco2017',
u'batch_size': 20
}]
run(is_use_gpu=False, configs=configs)
#!/usr/bin/env python
# coding=utf-8
# coding: utf8
"""
configuration for gunicorn
"""
import multiprocessing
bind = '0.0.0.0:8888'
backlog = 2048
workers = multiprocessing.cpu_count()
threads = 2
worker_class = 'sync'
worker_connections = 1000
timeout = 30
keepalive = 2
daemon = False
loglevel = 'info'
errorlog = '-'
accesslog = '-'
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import abc
class BaseModelService(object):
def _initialize(self):
pass
@abc.abstractmethod
def _pre_processing(self, data):
pass
@abc.abstractmethod
def _inference(self, data):
pass
@abc.abstractmethod
def _post_processing(self, data):
pass
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddlehub as hub
class ImageModelService(object):
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
@classmethod
def get_module(cls, name):
module = hub.Module(name=name)
return module
def _initialize(self):
pass
def _pre_processing(self):
pass
def _inference(self):
pass
def _post_processing(self):
pass
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddlehub as hub
class TextModelService(object):
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
@classmethod
def get_module(cls, name):
module = hub.Module(name=name)
return module
def _initialize(self):
pass
def _pre_processing(self):
pass
def _inference(self):
pass
def _post_processing(self):
pass
{# coding: utf-8 #}
{# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. #}
{# Licensed under the Apache License, Version 2.0 (the "License" #}
{# you may not use this file except in compliance with the License. #}
{# You may obtain a copy of the License at #}
{# http://www.apache.org/licenses/LICENSE-2.0 #}
{# #}
{# Unless required by applicable law or agreed to in writing, software #}
{# distributed under the License is distributed on an "AS IS" BASIS, #}
{# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #}
{# See the License for the specific language governing permissions and #}
{# limitations under the License. #}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
<link href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.4.1.min.js" integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"></script>
</head>
<body>
<div class="input-group mb-3">
<table class="table table-striped table-dark table-hover">
<tr>
<td style="width: 17%"></td>
<td style="width: 5%" valign="top">
<label class="input-group-text" for="inputGroupSelect01" style="color: white;
background-color: rgba(0,0,0,0); border: 0px; font-size: 20px">分类
</label>
</td>
<td style="width: 25%">
<select class="custom-select" id="inputGroupSelect01"
onchange="select_category(this.options[this.options.selectedIndex].value)">
<option selected>Choose...</option>
<option value="nlp_module">nlp</option>
<option value="cv_module">cv</option>
</select>
</td>
<td style="width: 6%"></td>
<td style="width: 5%">
<label class="input-group-text" for="inputGroupSelect02" style="color: white;
background-color: rgba(0,0,0,0); border: 0px; font-size: 20px">模型
</label>
</td>
<td style="width: 25%">
<select class="custom-select" id="inputGroupSelect02"
onchange="select_module(this.options[this.options.selectedIndex].value)">
<option selected value="Choose...">请先选择分类...</option>
</select>
</td>
<td style="width: 17%"></td>
</tr>
</table>
</div>
<form id="main_form">
<div class="input-group mb-3" id="display_div">
</div>
</form>
<script>
module_info = {
"nlp_module":[{"Choose...":"Choose..."},{"lac":"lac"},
{"senta_lstm":"senta_lstm"}],
"cv_module":[{"Choose...":"Choose..."},{"yolov3":"yolov3_coco2017"},{"faster_rcnn":"faster_rcnn_coco2017"}],
"Choose...":[{"请先选择分类":"Choose..."}]
};
function get_module_option(module_categoty) {
options = module_info[module_categoty];
html = "";
for (var i=0, len=options.length; i<len; i++){
option_name = Object.keys(options[i])[0];
option_value = options[i][option_name];
html = html
+ "<option value='"
+ option_value
+ "'>"
+ option_name
+ "</option>";
}
return html;
}
function get_single_text_html() {
html = ""
+ '<p style="width: 5%"></p>'
+ '<p>请输入文字或者上传一个文本文件</p>'
+ '<p style="width: 2%"></p>'
+ '<input type="file" id="file" onchange="handle_text(this.files)"/>'
+
'<table class="table table-striped table-dark table-hover" style="width: 95%"; align="center">'
+ '<tr>'
+ '<td>'
+ '<textarea cols="100" rows="30"'
+ 'class="form-control"'
+ 'id="file_text"'
+ 'onblur="blur_input_text()"'
+ 'onfocus="focus_input_text()"'
+ 'name="input_text">'
+ '在此键入文本或上传文本文件'
+ '</textarea>'
+ '</td>'
+ '<td>'
+
'<input type="button" value="Go!" onclick="sub_text()" class="btn-circle"></input>'
+ '</td>'
+ '<td>'
+ '<textarea cols="100" rows="30"'
+ 'class="form-control"'
+ 'id="result_text"'
+ 'name="result_text_name">'
+ '</textarea>'
+ '</td>'
+ '</tr>'
+ '</table>';
return html;
}
function blur_input_text()
{
text = document.getElementById("file_text");
if (text.value == ""){
text.value = "在此键入文本或上传文本文件";
}
}
function focus_input_text(){
text = document.getElementById("file_text");
if (text.value == "在此键入文本或上传文本文件"){
text.value = "";
}
}
function get_single_img_html() {
html = ""
+ '<p>请上传一个图片文件</p>'
+ '<input type="file" id="file" onchange="handle_img(this.files)"/>'
+ '<table class="table table-striped table-dark table-hover">'
+ '<tr>'
+ '<td style="width: 45%">'
+ '<img width="100%" id="file_img" name="input_img" value="kitten.jpg"/>'
+ '</td>'
+ '<td>'
+ '<input type="button" value="Start!" onclick="sub_img()"></input>'
+ '</td>'
+ '<td style="width: 45%"><table>'
+ '<tr><td>'
+ '<img width="100%" id="result_img" name="output_img"/>'
+ '</td></tr>'
+ '<tr><td>'
+ '<textarea cols="100" rows="2"'
+ 'class="form-control"'
+ 'id="result_text"'
+ 'name="result_text_name">'
+ '</textarea>'
+ '</td></tr>'
+ '</table></td>'
+ '</tr>'
+ '</table>';
return html;
}
function select_category(module_categoty) {
select_module("Choose...");
option_html = get_module_option(module_categoty);
document.getElementById("inputGroupSelect02").innerHTML = option_html;
}
function select_module(module_name){
if (module_name == "Choose..."){
display_html = ""
}else{
if (document.getElementById("inputGroupSelect01").value == "nlp_module"){
display_html = get_single_text_html();
}else if (document.getElementById("inputGroupSelect01").value == "cv_module"){
display_html = get_single_img_html();
}
}
document.getElementById("display_div").innerHTML = display_html;
}
function handle_text(files) {
if (files.length){
let file = files[0];
let reader = new FileReader();
reader.onload = function(){
document.getElementById('file_text').value = this.result;
};
reader.readAsText(file);
}
}
function handle_img(files) {
if (files.length){
let file = files[0];
let reader = new FileReader();
reader.onload = function(){
document.getElementById('file_img').src = this.result;
};
reader.readAsDataURL(file);
}
}
function get_result_html(results) {
html = "";
for (var i=0, len=results.length; i<len; ++i){
html = html
+ JSON.stringify(results[i])
+ '\r\n'
}
return html;
}
function sub_text() {
var formParam = $("#main_form").serialize();
to_url = "/predict/text/" + document.getElementById("inputGroupSelect02").value;
$.ajax({
cache: true,
type: "POST",
url:to_url,
data:formParam,
async: false,
error: function(request) {
alert("Connection error:"+request.error);
},
success: function(data) {
html = get_result_html(data["result"]);
document.getElementById("result_text").value = html;
}
});
}
function sub_img() {
var formParam = {
"input_img": document.getElementById("file_img").src,
"input_file":document.getElementById("file").value
};
to_url = "/predict/image/" + document.getElementById("inputGroupSelect02").value;
$.ajax({
cache: true,
type: "POST",
url:to_url,
data:formParam,
async: false,
error: function(request) {
alert("Connection error:"+request.error);
},
success: function(data) {
data = data["result"];
document.getElementById("result_text").value = data["border"];
document.getElementById("result_img").src = data["output_img"];
}
});
}
</script>
</body>
</html>
{
"modules_info": [
{
"module": "lac",
"version": "1.0.0",
"batch_size": 200,
"queue_size": 200
},
{
"module": "senta_lstm",
"version": "1.0.0",
"batch_size": 1,
"queue_size": 200
},
{
"module": "yolov3_coco2017",
"version": "1.0.0",
"batch_size": 1,
"queue_size": 10
},
{
"module": "faster_rcnn_coco2017",
"version": "1.0.0",
"batch_size": 1,
"queue_size": 10
}
],
"use_gpu": false,
"port": 8888
}
......@@ -9,6 +9,7 @@ chardet == 3.0.4
requests
pandas < 0.25.0
flake8
flask
tb-paddle
tb-nightly
cma == 2.7.0
......@@ -32,7 +32,7 @@ max_version, mid_version, min_version = python_version()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.1.0', 'pyyaml', 'Pillow', 'requests',
'tb-paddle', 'tb-nightly', 'cma == 2.7.0'
'tb-paddle', 'tb-nightly', 'cma == 2.7.0', 'flask'
]
if max_version < 3:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册