diff --git a/paddlehub/serving/app.py b/paddlehub/serving/app.py index da7ac1804c7965b5af5d5613cf0f122309175c87..57397293be1fa8f6114bc0e9b358d0edba837d62 100644 --- a/paddlehub/serving/app.py +++ b/paddlehub/serving/app.py @@ -20,7 +20,6 @@ import time import os import base64 import logging -import cv2 import multiprocessing as mp from multiprocessing.managers import BaseManager import random @@ -93,7 +92,6 @@ def predict_cv(input_data, module_name, batch_size=1): filename_list = [] for index in range(len(input_data)): filename_list.append(input_data[index][3]) - cv2.imread(input_data[index][3]) input_images = {"image": filename_list} module = ImageModelService.get_module(module_name) method_name = module.desc.attr.map.data['default_signature'].s @@ -130,31 +128,35 @@ def predict_cv(input_data, module_name, batch_size=1): def worker(): global batch_size_list, name_list, queue_name_list, cv_module latest_num = random.randrange(0, len(queue_name_list)) - - while True: - time.sleep(0.01) - for index in range(len(queue_name_list)): - while queues_dict[queue_name_list[latest_num]].empty() is not True: - input_data = [] - lock.acquire() - try: - batch = queues_dict[ - queue_name_list[latest_num]].get_attribute("maxsize") - for index2 in range(batch): - if queues_dict[ - queue_name_list[latest_num]].empty() is True: - break - input_data.append( - queues_dict[queue_name_list[latest_num]].get()) - finally: - lock.release() - if len(input_data) != 0: - choose_module_category(input_data, - queue_name_list[latest_num], - batch_size_list[latest_num]) - else: - pass - latest_num = (latest_num + 1) % len(queue_name_list) + try: + while True: + time.sleep(0.01) + for index in range(len(queue_name_list)): + while queues_dict[ + queue_name_list[latest_num]].empty() is not True: + input_data = [] + lock.acquire() + try: + batch = queues_dict[ + queue_name_list[latest_num]].get_attribute( + "maxsize") + for index2 in range(batch): + if queues_dict[queue_name_list[latest_num]].empty( + ) is True: + break + input_data.append( + queues_dict[queue_name_list[latest_num]].get()) + finally: + lock.release() + if len(input_data) != 0: + choose_module_category(input_data, + queue_name_list[latest_num], + batch_size_list[latest_num]) + else: + pass + latest_num = (latest_num + 1) % len(queue_name_list) + except KeyboardInterrupt: + print("Process %s is end." % (os.getpid())) def init_pool(l): @@ -168,7 +170,7 @@ def create_app(): gunicorn_logger = logging.getLogger('gunicorn.error') app_instance.logger.handlers = gunicorn_logger.handlers app_instance.logger.setLevel(gunicorn_logger.level) - global queues_dict + global queues_dict, pool lock = mp.Lock() pool = mp.Pool( processes=(mp.cpu_count() - 1), @@ -310,6 +312,9 @@ def run(is_use_gpu=False, configs=None, port=8888): return my_app = create_app() my_app.run(host="0.0.0.0", port=port, debug=False) + pool.close() + pool.join() + print("PaddleHub-Serving has been stopped.") if __name__ == "__main__": diff --git a/paddlehub/serving/templates/__init__.py b/paddlehub/serving/templates/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..96c375264b4c03d172c8841f61954fabc03e6f83 --- /dev/null +++ b/paddlehub/serving/templates/__init__.py @@ -0,0 +1,14 @@ +# coding: utf-8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/setup.py b/setup.py index 7663062788dbc2aff2a4e037a5675160ceba275a..c0ca4d7905b70454a22cff4c2df22af1c7a646a6 100644 --- a/setup.py +++ b/setup.py @@ -52,6 +52,13 @@ setup( author_email='paddle-dev@baidu.com', install_requires=REQUIRED_PACKAGES, packages=find_packages(), + package_data={ + 'paddlehub/serving/templates': [ + 'paddlehub/serving/templates/serving_config.json', + 'paddlehub/serving/templates/main.html' + ] + }, + include_package_data=True, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', diff --git a/tutorial/autofinetune-nlp.md b/tutorial/autofinetune-nlp.md index bb71c2d2938ae6f9a37f67a4e0450b15d07e01ae..f38d6f382ccccd03ae7024b146725375b3a3881b 100644 --- a/tutorial/autofinetune-nlp.md +++ b/tutorial/autofinetune-nlp.md @@ -140,7 +140,7 @@ if __name__ == '__main__': if is_path_valid(args.saved_params_dir) and os.path.exists(best_model_dir): shutil.copytree(best_model_dir, args.saved_params_dir) shutil.rmtree(config.checkpoint_dir) - + # acc on dev will be used by auto finetune print("AutoFinetuneEval"+"\t"+str(float(eval_avg_score["acc"]))) ```