提交 a6765a47 编写于 作者: K kinghuin 提交者: wuzewu

Colorlog and task/config.__repr__ (#253)

* colorlog

* optimize __repr__
上级 840fd096
......@@ -17,6 +17,7 @@ from __future__ import print_function
from __future__ import division
from __future__ import print_function
import colorlog
import logging
import math
import os
......@@ -26,21 +27,40 @@ from paddlehub.common.dir import CONF_HOME
class Logger(object):
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
TRAIN = 21
EVAL = 22
PLACEHOLDER = '%'
NOLOG = "NOLOG"
logging.addLevelName(TRAIN, 'TRAIN')
logging.addLevelName(EVAL, 'EVAL')
def __init__(self, name=None):
if not name:
name = "PaddleHub"
self.logger = logging.getLogger(name)
self.handler = logging.StreamHandler()
self.format = logging.Formatter(
'[%(asctime)-15s] [%(levelname)8s] - %(message)s')
self.handler.setFormatter(self.format)
self.format = colorlog.ColoredFormatter(
'%(log_color)s[%(asctime)-15s] [%(levelname)8s] - %(message)s',
log_colors={
'DEBUG': 'purple',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'bold_red',
'TRAIN': 'cyan',
'EVAL': 'blue',
})
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logLevel = "DEBUG"
self.logger.setLevel(self._get_logging_level())
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
if os.path.exists(os.path.join(CONF_HOME, "config.json")):
with open(os.path.join(CONF_HOME, "config.json"), "r") as fp:
level = json.load(fp).get("log_level", "DEBUG")
......@@ -50,9 +70,6 @@ class Logger(object):
def _is_no_log(self):
return self.getLevel() == Logger.NOLOG
def _get_logging_level(self):
return eval("logging.%s" % self.logLevel)
def setLevel(self, logLevel):
self.logLevel = logLevel.upper()
if not self._is_no_log():
......@@ -62,7 +79,7 @@ class Logger(object):
def getLevel(self):
return self.logLevel
def __call__(self, type, msg):
def __call__(self, level, msg):
def _get_log_arr(msg, len_limit=30):
ph = Logger.PLACEHOLDER
lrspace = 2
......@@ -109,24 +126,29 @@ class Logger(object):
if self._is_no_log():
return
func = eval("self.logger.%s" % type)
for msg in _get_log_arr(msg):
func(msg)
self.logger.log(level, msg)
def debug(self, msg):
self("debug", msg)
self(logger.DEBUG, msg)
def info(self, msg):
self("info", msg)
def error(self, msg):
self("error", msg)
self(logger.INFO, msg)
def warning(self, msg):
self("warning", msg)
self(logger.WARNING, msg)
def error(self, msg):
self(logger.ERROR, msg)
def critical(self, msg):
self("critical", msg)
self(logger.CRITICAL, msg)
def train(self, msg):
self(logger.TRAIN, msg)
def eval(self, msg):
self(logger.EVAL, msg)
logger = Logger()
......@@ -109,3 +109,8 @@ class RunConfig(object):
@property
def use_data_parallel(self):
return self._use_data_parallel
def __repr__(self):
return "config with num_epoch=%s, batch_size=%s, use_cuda=%s, checkpoint_dir=%s and %s" % (
self.num_epoch, self.batch_size, self.use_cuda, self.checkpoint_dir,
self.strategy)
......@@ -290,11 +290,6 @@ class BasicTask(object):
build_strategy=self.build_strategy)
self.exe.run(self.env.startup_program)
# to avoid to print logger two times in result of the logger usage of paddle-fluid 1.5
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
self._build_env_end_event()
@property
......@@ -454,7 +449,7 @@ class BasicTask(object):
self.env.score_scalar = {}
def _finetune_start_event(self):
logger.info("PaddleHub finetune start")
logger.train("PaddleHub finetune start")
def _finetune_end_event(self, run_states):
logger.info("PaddleHub finetune finished.")
......@@ -466,7 +461,7 @@ class BasicTask(object):
logger.info("PaddleHub predict finished.")
def _eval_start_event(self):
logger.info("Evaluation on {} dataset start".format(self.phase))
logger.eval("Evaluation on {} dataset start".format(self.phase))
def _eval_end_event(self, run_states):
eval_scores, eval_loss, run_speed = self._calculate_metrics(run_states)
......@@ -484,7 +479,7 @@ class BasicTask(object):
scalar_value=eval_scores[metric],
global_step=self._envs['train'].current_step)
log_scores += "%s=%.5f " % (metric, eval_scores[metric])
logger.info(
logger.eval(
"[%s dataset evaluation result] loss=%.5f %s[step/sec: %.2f]" %
(self.phase, eval_loss, log_scores, run_speed))
......@@ -502,7 +497,7 @@ class BasicTask(object):
self.best_score = main_value
model_saved_dir = os.path.join(self.config.checkpoint_dir,
"best_model")
logger.info("best model saved to %s [best %s=%.5f]" %
logger.eval("best model saved to %s [best %s=%.5f]" %
(model_saved_dir, main_metric, main_value))
save_result = fluid.io.save_persistables(
......@@ -523,7 +518,7 @@ class BasicTask(object):
scalar_value=scores[metric],
global_step=self._envs['train'].current_step)
log_scores += "%s=%.5f " % (metric, scores[metric])
logger.info("step %d / %d: loss=%.5f %s[step/sec: %.2f]" %
logger.train("step %d / %d: loss=%.5f %s[step/sec: %.2f]" %
(self.current_step, self.max_train_steps, avg_loss,
log_scores, run_speed))
......@@ -760,3 +755,8 @@ class BasicTask(object):
break
return global_run_states
def __repr__(self):
return "Task: %s with metrics_choices: %s, reader: %s, %s" % (
self.__class__.__name__, self.metrics_choices,
self._base_data_reader.__class__.__name__, self.config)
......@@ -16,3 +16,4 @@ tensorboard >= 1.15
cma == 2.7.0
sentencepiece
nltk
colorlog
......@@ -15,3 +15,4 @@ tensorboard >= 1.15
cma == 2.7.0
sentencepiece
nltk
colorlog
......@@ -33,7 +33,7 @@ max_version, mid_version, min_version = python_version()
REQUIRED_PACKAGES = [
'six >= 1.10.0', 'protobuf >= 3.6.0', 'pyyaml', 'Pillow', 'requests',
'tb-paddle', 'tensorboard >= 1.15', 'cma == 2.7.0', 'flask >= 1.1.0',
'sentencepiece', 'nltk'
'sentencepiece', 'nltk', 'colorlog'
]
if max_version < 3:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册