未验证 提交 a1ae96b2 编写于 作者: Y Yang Zhang 提交者: GitHub

Make logger less noisy (#583)

* Make logger less noisy

* Fix indent
上级 c62e6145
......@@ -70,8 +70,8 @@ class ParallelMap(object):
self._bufsize = bufsize
self._use_process = use_process
if self._use_process and sys.platform == "win32":
logger.info("Use multi-thread reader instead of "
"multi-process reader on Windows.")
logger.debug("Use multi-thread reader instead of "
"multi-process reader on Windows.")
self._use_process = False
if self._use_process and type(memsize) is str:
assert memsize[-1].lower() in ['g', 'm'], \
......
......@@ -286,8 +286,8 @@ class Reader(object):
np.random.shuffle(self.indexes)
if self._mixup_epoch > 0 and len(self.indexes) < 2:
logger.info("Disable mixup for dataset samples "
"less than 2 samples")
logger.debug("Disable mixup for dataset samples "
"less than 2 samples")
self._mixup_epoch = -1
if self._epoch < 0:
......
......@@ -154,5 +154,5 @@ class COCODataSet(DataSet):
if self.sample_num > 0 and ct >= self.sample_num:
break
assert len(records) > 0, 'not found any coco record in %s' % (anno_path)
logger.info('{} samples in file {}'.format(ct, anno_path))
logger.debug('{} samples in file {}'.format(ct, anno_path))
self.roidbs, self.cname2cid = records, cname2cid
......@@ -160,7 +160,7 @@ class VOCDataSet(DataSet):
break
assert len(records) > 0, 'not found any voc record in %s' % (
self.anno_path)
logger.info('{} samples in file {}'.format(ct, anno_path))
logger.debug('{} samples in file {}'.format(ct, anno_path))
self.roidbs, self.cname2cid = records, cname2cid
......
......@@ -102,7 +102,7 @@ class WIDERFaceDataSet(DataSet):
if self.sample_num > 0 and ct >= self.sample_num:
break
assert len(records) > 0, 'not found any widerface in %s' % (anno_path)
logger.info('{} samples in file {}'.format(ct, anno_path))
logger.debug('{} samples in file {}'.format(ct, anno_path))
self.roidbs, self.cname2cid = records, cname2cid
......
......@@ -119,7 +119,7 @@ def load_params(exe, prog, path, ignore_params=[]):
raise ValueError("Model pretrain path {} does not "
"exists.".format(path))
logger.info('Loading parameters from {}...'.format(path))
logger.debug('Loading parameters from {}...'.format(path))
ignore_set = set()
state = _load_state(path)
......@@ -208,7 +208,7 @@ def load_and_fusebn(exe, prog, path):
prog (fluid.Program): save weight from which Program object.
path (string): the path to save model.
"""
logger.info('Load model and fuse batch norm if have from {}...'.format(
logger.debug('Load model and fuse batch norm if have from {}...'.format(
path))
if is_url(path):
......
......@@ -108,8 +108,8 @@ def get_dataset_path(path, annotation, image_dir):
data_name = os.path.split(path.strip().lower())[-1]
for name, dataset in DATASETS.items():
if data_name == name:
logger.info("Parse dataset_dir {} as dataset "
"{}".format(path, name))
logger.debug("Parse dataset_dir {} as dataset "
"{}".format(path, name))
if name == 'objects365':
raise NotImplementedError(
"Dataset {} is not valid for download automatically. "
......@@ -146,7 +146,7 @@ def get_dataset_path(path, annotation, image_dir):
def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
logger.info("Create voc file list...")
logger.debug("Create voc file list...")
devkit_dir = osp.join(data_dir, devkit_subdir)
years = ['2007', '2012']
......@@ -155,7 +155,7 @@ def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
# do not generate label_list.txt here. For default
# label, see ../data/source/voc.py
create_list(devkit_dir, years, data_dir)
logger.info("Create voc file list finished")
logger.debug("Create voc file list finished")
def map_path(url, root_dir):
......@@ -197,7 +197,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True):
exist_flag = False
if osp.exists(fullpath) and check_exist:
exist_flag = True
logger.info("Found {}".format(fullpath))
logger.debug("Found {}".format(fullpath))
else:
exist_flag = False
fullname = _download(url, root_dir, md5sum)
......@@ -218,7 +218,7 @@ def download_dataset(path, dataset=None):
dataset_info = DATASETS[dataset][0]
for info in dataset_info:
get_path(info[0], path, info[1], False)
logger.info("Download dataset {} finished.".format(dataset))
logger.debug("Download dataset {} finished.".format(dataset))
def _dataset_exists(path, annotation, image_dir):
......@@ -226,23 +226,23 @@ def _dataset_exists(path, annotation, image_dir):
Check if user define dataset exists
"""
if not osp.exists(path):
logger.info("Config dataset_dir {} is not exits, "
"dataset config is not valid".format(path))
logger.debug("Config dataset_dir {} is not exits, "
"dataset config is not valid".format(path))
return False
if annotation:
annotation_path = osp.join(path, annotation)
if not osp.isfile(annotation_path):
logger.info("Config annotation {} is not a "
"file, dataset config is not "
"valid".format(annotation_path))
logger.debug("Config annotation {} is not a "
"file, dataset config is not "
"valid".format(annotation_path))
return False
if image_dir:
image_path = osp.join(path, image_dir)
if not osp.isdir(image_path):
logger.info("Config image_dir {} is not a "
"directory, dataset config is not "
"valid".format(image_path))
logger.warning("Config image_dir {} is not a "
"directory, dataset config is not "
"valid".format(image_path))
return False
return True
......@@ -300,7 +300,7 @@ def _md5check(fullname, md5sum=None):
if md5sum is None:
return True
logger.info("File {} md5 checking...".format(fullname))
logger.debug("File {} md5 checking...".format(fullname))
md5 = hashlib.md5()
with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
......@@ -308,8 +308,8 @@ def _md5check(fullname, md5sum=None):
calc_md5sum = md5.hexdigest()
if calc_md5sum != md5sum:
logger.info("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum))
logger.warning("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum))
return False
return True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册