未验证 提交 a1ae96b2 编写于 作者: Y Yang Zhang 提交者: GitHub

Make logger less noisy (#583)

* Make logger less noisy

* Fix indent
上级 c62e6145
...@@ -70,8 +70,8 @@ class ParallelMap(object): ...@@ -70,8 +70,8 @@ class ParallelMap(object):
self._bufsize = bufsize self._bufsize = bufsize
self._use_process = use_process self._use_process = use_process
if self._use_process and sys.platform == "win32": if self._use_process and sys.platform == "win32":
logger.info("Use multi-thread reader instead of " logger.debug("Use multi-thread reader instead of "
"multi-process reader on Windows.") "multi-process reader on Windows.")
self._use_process = False self._use_process = False
if self._use_process and type(memsize) is str: if self._use_process and type(memsize) is str:
assert memsize[-1].lower() in ['g', 'm'], \ assert memsize[-1].lower() in ['g', 'm'], \
......
...@@ -286,8 +286,8 @@ class Reader(object): ...@@ -286,8 +286,8 @@ class Reader(object):
np.random.shuffle(self.indexes) np.random.shuffle(self.indexes)
if self._mixup_epoch > 0 and len(self.indexes) < 2: if self._mixup_epoch > 0 and len(self.indexes) < 2:
logger.info("Disable mixup for dataset samples " logger.debug("Disable mixup for dataset samples "
"less than 2 samples") "less than 2 samples")
self._mixup_epoch = -1 self._mixup_epoch = -1
if self._epoch < 0: if self._epoch < 0:
......
...@@ -154,5 +154,5 @@ class COCODataSet(DataSet): ...@@ -154,5 +154,5 @@ class COCODataSet(DataSet):
if self.sample_num > 0 and ct >= self.sample_num: if self.sample_num > 0 and ct >= self.sample_num:
break break
assert len(records) > 0, 'not found any coco record in %s' % (anno_path) assert len(records) > 0, 'not found any coco record in %s' % (anno_path)
logger.info('{} samples in file {}'.format(ct, anno_path)) logger.debug('{} samples in file {}'.format(ct, anno_path))
self.roidbs, self.cname2cid = records, cname2cid self.roidbs, self.cname2cid = records, cname2cid
...@@ -160,7 +160,7 @@ class VOCDataSet(DataSet): ...@@ -160,7 +160,7 @@ class VOCDataSet(DataSet):
break break
assert len(records) > 0, 'not found any voc record in %s' % ( assert len(records) > 0, 'not found any voc record in %s' % (
self.anno_path) self.anno_path)
logger.info('{} samples in file {}'.format(ct, anno_path)) logger.debug('{} samples in file {}'.format(ct, anno_path))
self.roidbs, self.cname2cid = records, cname2cid self.roidbs, self.cname2cid = records, cname2cid
......
...@@ -102,7 +102,7 @@ class WIDERFaceDataSet(DataSet): ...@@ -102,7 +102,7 @@ class WIDERFaceDataSet(DataSet):
if self.sample_num > 0 and ct >= self.sample_num: if self.sample_num > 0 and ct >= self.sample_num:
break break
assert len(records) > 0, 'not found any widerface in %s' % (anno_path) assert len(records) > 0, 'not found any widerface in %s' % (anno_path)
logger.info('{} samples in file {}'.format(ct, anno_path)) logger.debug('{} samples in file {}'.format(ct, anno_path))
self.roidbs, self.cname2cid = records, cname2cid self.roidbs, self.cname2cid = records, cname2cid
......
...@@ -119,7 +119,7 @@ def load_params(exe, prog, path, ignore_params=[]): ...@@ -119,7 +119,7 @@ def load_params(exe, prog, path, ignore_params=[]):
raise ValueError("Model pretrain path {} does not " raise ValueError("Model pretrain path {} does not "
"exists.".format(path)) "exists.".format(path))
logger.info('Loading parameters from {}...'.format(path)) logger.debug('Loading parameters from {}...'.format(path))
ignore_set = set() ignore_set = set()
state = _load_state(path) state = _load_state(path)
...@@ -208,7 +208,7 @@ def load_and_fusebn(exe, prog, path): ...@@ -208,7 +208,7 @@ def load_and_fusebn(exe, prog, path):
prog (fluid.Program): save weight from which Program object. prog (fluid.Program): save weight from which Program object.
path (string): the path to save model. path (string): the path to save model.
""" """
logger.info('Load model and fuse batch norm if have from {}...'.format( logger.debug('Load model and fuse batch norm if have from {}...'.format(
path)) path))
if is_url(path): if is_url(path):
......
...@@ -108,8 +108,8 @@ def get_dataset_path(path, annotation, image_dir): ...@@ -108,8 +108,8 @@ def get_dataset_path(path, annotation, image_dir):
data_name = os.path.split(path.strip().lower())[-1] data_name = os.path.split(path.strip().lower())[-1]
for name, dataset in DATASETS.items(): for name, dataset in DATASETS.items():
if data_name == name: if data_name == name:
logger.info("Parse dataset_dir {} as dataset " logger.debug("Parse dataset_dir {} as dataset "
"{}".format(path, name)) "{}".format(path, name))
if name == 'objects365': if name == 'objects365':
raise NotImplementedError( raise NotImplementedError(
"Dataset {} is not valid for download automatically. " "Dataset {} is not valid for download automatically. "
...@@ -146,7 +146,7 @@ def get_dataset_path(path, annotation, image_dir): ...@@ -146,7 +146,7 @@ def get_dataset_path(path, annotation, image_dir):
def create_voc_list(data_dir, devkit_subdir='VOCdevkit'): def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
logger.info("Create voc file list...") logger.debug("Create voc file list...")
devkit_dir = osp.join(data_dir, devkit_subdir) devkit_dir = osp.join(data_dir, devkit_subdir)
years = ['2007', '2012'] years = ['2007', '2012']
...@@ -155,7 +155,7 @@ def create_voc_list(data_dir, devkit_subdir='VOCdevkit'): ...@@ -155,7 +155,7 @@ def create_voc_list(data_dir, devkit_subdir='VOCdevkit'):
# do not generate label_list.txt here. For default # do not generate label_list.txt here. For default
# label, see ../data/source/voc.py # label, see ../data/source/voc.py
create_list(devkit_dir, years, data_dir) create_list(devkit_dir, years, data_dir)
logger.info("Create voc file list finished") logger.debug("Create voc file list finished")
def map_path(url, root_dir): def map_path(url, root_dir):
...@@ -197,7 +197,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True): ...@@ -197,7 +197,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True):
exist_flag = False exist_flag = False
if osp.exists(fullpath) and check_exist: if osp.exists(fullpath) and check_exist:
exist_flag = True exist_flag = True
logger.info("Found {}".format(fullpath)) logger.debug("Found {}".format(fullpath))
else: else:
exist_flag = False exist_flag = False
fullname = _download(url, root_dir, md5sum) fullname = _download(url, root_dir, md5sum)
...@@ -218,7 +218,7 @@ def download_dataset(path, dataset=None): ...@@ -218,7 +218,7 @@ def download_dataset(path, dataset=None):
dataset_info = DATASETS[dataset][0] dataset_info = DATASETS[dataset][0]
for info in dataset_info: for info in dataset_info:
get_path(info[0], path, info[1], False) get_path(info[0], path, info[1], False)
logger.info("Download dataset {} finished.".format(dataset)) logger.debug("Download dataset {} finished.".format(dataset))
def _dataset_exists(path, annotation, image_dir): def _dataset_exists(path, annotation, image_dir):
...@@ -226,23 +226,23 @@ def _dataset_exists(path, annotation, image_dir): ...@@ -226,23 +226,23 @@ def _dataset_exists(path, annotation, image_dir):
Check if user define dataset exists Check if user define dataset exists
""" """
if not osp.exists(path): if not osp.exists(path):
logger.info("Config dataset_dir {} is not exits, " logger.debug("Config dataset_dir {} is not exits, "
"dataset config is not valid".format(path)) "dataset config is not valid".format(path))
return False return False
if annotation: if annotation:
annotation_path = osp.join(path, annotation) annotation_path = osp.join(path, annotation)
if not osp.isfile(annotation_path): if not osp.isfile(annotation_path):
logger.info("Config annotation {} is not a " logger.debug("Config annotation {} is not a "
"file, dataset config is not " "file, dataset config is not "
"valid".format(annotation_path)) "valid".format(annotation_path))
return False return False
if image_dir: if image_dir:
image_path = osp.join(path, image_dir) image_path = osp.join(path, image_dir)
if not osp.isdir(image_path): if not osp.isdir(image_path):
logger.info("Config image_dir {} is not a " logger.warning("Config image_dir {} is not a "
"directory, dataset config is not " "directory, dataset config is not "
"valid".format(image_path)) "valid".format(image_path))
return False return False
return True return True
...@@ -300,7 +300,7 @@ def _md5check(fullname, md5sum=None): ...@@ -300,7 +300,7 @@ def _md5check(fullname, md5sum=None):
if md5sum is None: if md5sum is None:
return True return True
logger.info("File {} md5 checking...".format(fullname)) logger.debug("File {} md5 checking...".format(fullname))
md5 = hashlib.md5() md5 = hashlib.md5()
with open(fullname, 'rb') as f: with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""): for chunk in iter(lambda: f.read(4096), b""):
...@@ -308,8 +308,8 @@ def _md5check(fullname, md5sum=None): ...@@ -308,8 +308,8 @@ def _md5check(fullname, md5sum=None):
calc_md5sum = md5.hexdigest() calc_md5sum = md5.hexdigest()
if calc_md5sum != md5sum: if calc_md5sum != md5sum:
logger.info("File {} md5 check failed, {}(calc) != " logger.warning("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum)) "{}(base)".format(fullname, calc_md5sum, md5sum))
return False return False
return True return True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册