未验证 提交 dd81c4f5 编写于 作者: 走神的阿圆's avatar 走神的阿圆 提交者: GitHub

fix file logger filename

上级 f6fffd8c
......@@ -28,6 +28,7 @@ from paddlehub.serving import app_compat as app
from paddlehub.env import CONF_HOME
from paddlehub.serving.http_server import run_all, StandaloneApplication
from paddlehub.utils import log
from paddlehub.utils.utils import is_port_occupied
def number_of_workers():
......@@ -124,7 +125,7 @@ class ServingCommand:
log.logger.error("Error. Bert Service only support linux.")
return False
if ServingCommand.is_port_occupied("127.0.0.1", args.port) is True:
if is_port_occupied("127.0.0.1", args.port) is True:
log.logger.error("Port %s is occupied, please change it." % args.port)
return False
......@@ -134,19 +135,6 @@ class ServingCommand:
# CacheUpdater("hub_bert_service", module=args.modules[0], version="0.0.0").start()
bs.run(gpu_index=args.gpu, port=int(args.port))
@staticmethod
def is_port_occupied(ip, port):
'''
Check if port os occupied.
'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
def preinstall_modules(self):
'''
Install module by PaddleHub and get info of this module.
......@@ -180,7 +168,7 @@ class ServingCommand:
module = self.args.modules
if module is not None:
port = self.args.port
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
......@@ -196,15 +184,18 @@ class ServingCommand:
'''
if self.modules_info is not None:
front_port = self.args.port
if ServingCommand.is_port_occupied("127.0.0.1", front_port) is True:
if is_port_occupied("127.0.0.1", front_port) is True:
log.logger.error("Port %s is occupied, please change it." % front_port)
return False
back_port = int(front_port) + 1
for index in range(100):
if ServingCommand.is_port_occupied("127.0.0.1", back_port):
if is_port_occupied("127.0.0.1", back_port):
break
else:
back_port = int(back_port) + 1
else:
raise RuntimeError("Port from %s to %s is occupied, please use another port" % int(front_port) + 1,
back_port)
run_all(self.modules_info, self.args.gpu, front_port, back_port)
else:
......@@ -217,7 +208,7 @@ class ServingCommand:
module = self.args.modules
if module is not None:
port = self.args.port
if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
if is_port_occupied("127.0.0.1", port) is True:
log.logger.error("Port %s is occupied, please change it." % port)
return False
self.preinstall_modules()
......
......@@ -15,7 +15,6 @@
import traceback
import time
import logging
from flask import Flask, request
......@@ -24,6 +23,8 @@ from paddlehub.serving.model_service.base_model_service import nlp_module_info
from paddlehub.serving.model_service.base_model_service import v2_module_info
from paddlehub.utils import utils, log
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d", time.localtime())
def package_result(status: str, msg: str, data: dict):
'''
......@@ -115,10 +116,7 @@ def create_app(init_flag: bool = False, configs: dict = None):
app_instance = Flask(__name__)
app_instance.config["JSON_AS_ASCII"] = False
logging.basicConfig()
gunicorn_logger = logging.getLogger('gunicorn.error')
app_instance.logger.handlers = gunicorn_logger.handlers
app_instance.logger.setLevel(gunicorn_logger.level)
app_instance.logger = log.get_file_logger(filename)
@app_instance.route("/", methods=["GET", "POST"])
def index():
......@@ -183,7 +181,8 @@ def config_with_file(configs: dict):
elif "NLP" == value["category"]:
nlp_module_info.add_module(key, {key: value})
v2_module_info.add_module(key, {key: value})
log.logger.info("%s==%s" % (key, value["version"]))
logger = log.get_file_logger(filename)
logger.info("%s==%s" % (key, value["version"]))
def run(configs: dict = None, port: int = 8866):
......@@ -200,10 +199,11 @@ def run(configs: dict = None, port: int = 8866):
configs = {'lac': {'version': 1.0.0, 'category': nlp}}
run(configs=configs, port=8866)
'''
logger = log.get_file_logger(filename)
if configs is not None:
config_with_file(configs)
else:
log.logger.error("Start failed cause of missing configuration.")
logger.error("Start failed cause of missing configuration.")
return
my_app = create_app(init_flag=True)
my_app.run(host="0.0.0.0", port=port, debug=False, threaded=False)
......
......@@ -17,13 +17,12 @@ import zmq
import time
import os
import json
import platform
import traceback
import subprocess
from paddlehub.utils import log
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d_%H%M%S", time.localtime())
logger = log.get_file_logger(filename)
from paddlehub.utils.utils import is_port_occupied
class InferenceDevice(object):
......@@ -35,6 +34,8 @@ class InferenceDevice(object):
def __init__(self):
self.frontend = None
self.backend = None
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d", time.localtime())
self.logger = log.get_file_logger(filename)
def listen(self, frontend_addr: str, backend_addr: str):
'''
......@@ -51,7 +52,7 @@ class InferenceDevice(object):
zmq.device(zmq.QUEUE, self.frontend, self.backend)
except Exception as e:
logger.error(traceback.format_exc())
self.logger.error(traceback.format_exc())
finally:
self.frontend.close()
self.backend.close()
......@@ -95,7 +96,21 @@ class InferenceServer(object):
self.gpus = gpus
def listen(self, port: int):
backend = "ipc://backend.ipc"
start_workers(modules_info=self.modules_info, gpus=self.gpus, backend_addr=backend)
if platform.system() == "Windows":
back_port = int(port) + 1
for index in range(100):
if is_port_occupied("127.0.0.1", back_port):
break
else:
back_port = int(back_port) + 1
else:
raise RuntimeError("Port from %s to %s is occupied, please use another port" % int(port) + 1, back_port)
worker_backend = "tcp://localhost:%s" % back_port
backend = "tcp://*:%s" % back_port
else:
worker_backend = "ipc://backend.ipc"
backend = "ipc://backend.ipc"
start_workers(modules_info=self.modules_info, gpus=self.gpus, backend_addr=worker_backend)
d = InferenceDevice()
d.listen('tcp://*:%s' % port, backend)
......@@ -24,8 +24,7 @@ from paddlehub.serving.device import InferenceServer
from paddlehub.serving.client import InferenceClientProxy
from paddlehub.utils import utils, log
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d_%H%M%S", time.localtime())
logger = log.get_file_logger(filename)
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d", time.localtime())
if platform.system() == "Windows":
......@@ -111,6 +110,7 @@ def create_app(client_port: int = 5559, modules_name: list = []):
'''
app_instance = Flask(__name__)
app_instance.config["JSON_AS_ASCII"] = False
app_instance.logger = log.get_file_logger(filename)
pid = os.getpid()
@app_instance.route("/", methods=["GET", "POST"])
......@@ -183,7 +183,7 @@ def run(port: int = 8866, client_port: int = 5559, names: list = [], workers: in
options = {"bind": "0.0.0.0:%s" % port, "workers": workers, "worker_class": "sync"}
StandaloneApplication(create_app(client_port, modules_name=names), options).run()
logger.info("PaddleHub-Serving has been stopped.")
log.logger.info("PaddleHub-Serving has been stopped.")
def run_http_server(port: int = 8866, client_port: int = 5559, names: list = [], workers: int = 1):
......@@ -204,6 +204,7 @@ def run_http_server(port: int = 8866, client_port: int = 5559, names: list = [],
run_http_server(port=8866, client_port='5559', names=['lac'])
'''
names = list(names)
p = multiprocessing.Process(target=run, args=(port, client_port, names, workers))
p.start()
return p.pid
......
......@@ -14,6 +14,7 @@
# limitations under the License.
import zmq
import time
import os
import json
import traceback
......@@ -71,6 +72,10 @@ if __name__ == '__main__':
from paddlehub.serving.http_server import package_result
from paddlehub.utils import log
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d", time.localtime())
logger = log.get_file_logger(filename)
logger.logger.handlers = logger.logger.handlers[0:1]
modules_pred_info = {}
for module_name, module_info in modules_info.items():
init_args = module_info.get('init_args', {})
......
......@@ -17,6 +17,7 @@ import contextlib
import copy
import functools
import logging
import os
import sys
import time
import threading
......@@ -26,6 +27,9 @@ import colorlog
from colorama import Fore
import paddlehub.config as hubconf
from paddlehub.env import LOG_HOME
loggers = {}
log_config = {
'DEBUG': {
......@@ -488,17 +492,20 @@ def get_file_logger(filename):
logger = get_file_logger('test.log')
logger.logger.info('test_1')
'''
logger = Logger()
old_handlers = logger.logger.handlers
for handler in old_handlers:
logger.logger.removeHandler(handler)
log_name = os.path.join(LOG_HOME, filename)
if log_name in loggers:
return loggers[log_name]
logger = Logger()
logger.logger.handlers = []
format = logging.Formatter('[%(asctime)-15s] [%(levelname)8s] - %(message)s')
sh = logging.FileHandler(filename=filename, mode='a')
sh = logging.FileHandler(filename=log_name, mode='a')
sh.setFormatter(format)
logger.logger.addHandler(sh)
logger.logger.setLevel(logging.INFO)
loggers.update({log_name: logger})
return logger
......
......@@ -21,6 +21,7 @@ import importlib
import math
import os
import requests
import socket
import sys
import time
import tempfile
......@@ -302,3 +303,16 @@ def record_exception(msg: str) -> str:
def get_record_file():
return os.path.join(hubenv.LOG_HOME, time.strftime('%Y%m%d.log'))
def is_port_occupied(ip, port):
'''
Check if port os occupied.
'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册