From a1ae96b2dd2b2112736f35dc2811d4a178aa28b8 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Thu, 7 May 2020 22:01:27 +0800 Subject: [PATCH] Make logger less noisy (#583) * Make logger less noisy * Fix indent --- ppdet/data/parallel_map.py | 4 ++-- ppdet/data/reader.py | 4 ++-- ppdet/data/source/coco.py | 2 +- ppdet/data/source/voc.py | 2 +- ppdet/data/source/widerface.py | 2 +- ppdet/utils/checkpoint.py | 4 ++-- ppdet/utils/download.py | 34 +++++++++++++++++----------------- 7 files changed, 26 insertions(+), 26 deletions(-) diff --git a/ppdet/data/parallel_map.py b/ppdet/data/parallel_map.py index 0577d4c1a..789fda1f2 100644 --- a/ppdet/data/parallel_map.py +++ b/ppdet/data/parallel_map.py @@ -70,8 +70,8 @@ class ParallelMap(object): self._bufsize = bufsize self._use_process = use_process if self._use_process and sys.platform == "win32": - logger.info("Use multi-thread reader instead of " - "multi-process reader on Windows.") + logger.debug("Use multi-thread reader instead of " + "multi-process reader on Windows.") self._use_process = False if self._use_process and type(memsize) is str: assert memsize[-1].lower() in ['g', 'm'], \ diff --git a/ppdet/data/reader.py b/ppdet/data/reader.py index 335fed317..dd923506f 100644 --- a/ppdet/data/reader.py +++ b/ppdet/data/reader.py @@ -286,8 +286,8 @@ class Reader(object): np.random.shuffle(self.indexes) if self._mixup_epoch > 0 and len(self.indexes) < 2: - logger.info("Disable mixup for dataset samples " - "less than 2 samples") + logger.debug("Disable mixup for dataset samples " + "less than 2 samples") self._mixup_epoch = -1 if self._epoch < 0: diff --git a/ppdet/data/source/coco.py b/ppdet/data/source/coco.py index f79353036..1d2cbf13a 100644 --- a/ppdet/data/source/coco.py +++ b/ppdet/data/source/coco.py @@ -154,5 +154,5 @@ class COCODataSet(DataSet): if self.sample_num > 0 and ct >= self.sample_num: break assert len(records) > 0, 'not found any coco record in %s' % (anno_path) - logger.info('{} samples in file {}'.format(ct, anno_path)) + logger.debug('{} samples in file {}'.format(ct, anno_path)) self.roidbs, self.cname2cid = records, cname2cid diff --git a/ppdet/data/source/voc.py b/ppdet/data/source/voc.py index 5217491dc..77410d73f 100644 --- a/ppdet/data/source/voc.py +++ b/ppdet/data/source/voc.py @@ -160,7 +160,7 @@ class VOCDataSet(DataSet): break assert len(records) > 0, 'not found any voc record in %s' % ( self.anno_path) - logger.info('{} samples in file {}'.format(ct, anno_path)) + logger.debug('{} samples in file {}'.format(ct, anno_path)) self.roidbs, self.cname2cid = records, cname2cid diff --git a/ppdet/data/source/widerface.py b/ppdet/data/source/widerface.py index bc718a5ca..ddf4b2cb4 100644 --- a/ppdet/data/source/widerface.py +++ b/ppdet/data/source/widerface.py @@ -102,7 +102,7 @@ class WIDERFaceDataSet(DataSet): if self.sample_num > 0 and ct >= self.sample_num: break assert len(records) > 0, 'not found any widerface in %s' % (anno_path) - logger.info('{} samples in file {}'.format(ct, anno_path)) + logger.debug('{} samples in file {}'.format(ct, anno_path)) self.roidbs, self.cname2cid = records, cname2cid diff --git a/ppdet/utils/checkpoint.py b/ppdet/utils/checkpoint.py index c172c10b5..42fe8194d 100644 --- a/ppdet/utils/checkpoint.py +++ b/ppdet/utils/checkpoint.py @@ -119,7 +119,7 @@ def load_params(exe, prog, path, ignore_params=[]): raise ValueError("Model pretrain path {} does not " "exists.".format(path)) - logger.info('Loading parameters from {}...'.format(path)) + logger.debug('Loading parameters from {}...'.format(path)) ignore_set = set() state = _load_state(path) @@ -208,7 +208,7 @@ def load_and_fusebn(exe, prog, path): prog (fluid.Program): save weight from which Program object. path (string): the path to save model. """ - logger.info('Load model and fuse batch norm if have from {}...'.format( + logger.debug('Load model and fuse batch norm if have from {}...'.format( path)) if is_url(path): diff --git a/ppdet/utils/download.py b/ppdet/utils/download.py index 65a2d89d7..64bdc09fa 100644 --- a/ppdet/utils/download.py +++ b/ppdet/utils/download.py @@ -108,8 +108,8 @@ def get_dataset_path(path, annotation, image_dir): data_name = os.path.split(path.strip().lower())[-1] for name, dataset in DATASETS.items(): if data_name == name: - logger.info("Parse dataset_dir {} as dataset " - "{}".format(path, name)) + logger.debug("Parse dataset_dir {} as dataset " + "{}".format(path, name)) if name == 'objects365': raise NotImplementedError( "Dataset {} is not valid for download automatically. " @@ -146,7 +146,7 @@ def get_dataset_path(path, annotation, image_dir): def create_voc_list(data_dir, devkit_subdir='VOCdevkit'): - logger.info("Create voc file list...") + logger.debug("Create voc file list...") devkit_dir = osp.join(data_dir, devkit_subdir) years = ['2007', '2012'] @@ -155,7 +155,7 @@ def create_voc_list(data_dir, devkit_subdir='VOCdevkit'): # do not generate label_list.txt here. For default # label, see ../data/source/voc.py create_list(devkit_dir, years, data_dir) - logger.info("Create voc file list finished") + logger.debug("Create voc file list finished") def map_path(url, root_dir): @@ -197,7 +197,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True): exist_flag = False if osp.exists(fullpath) and check_exist: exist_flag = True - logger.info("Found {}".format(fullpath)) + logger.debug("Found {}".format(fullpath)) else: exist_flag = False fullname = _download(url, root_dir, md5sum) @@ -218,7 +218,7 @@ def download_dataset(path, dataset=None): dataset_info = DATASETS[dataset][0] for info in dataset_info: get_path(info[0], path, info[1], False) - logger.info("Download dataset {} finished.".format(dataset)) + logger.debug("Download dataset {} finished.".format(dataset)) def _dataset_exists(path, annotation, image_dir): @@ -226,23 +226,23 @@ def _dataset_exists(path, annotation, image_dir): Check if user define dataset exists """ if not osp.exists(path): - logger.info("Config dataset_dir {} is not exits, " - "dataset config is not valid".format(path)) + logger.debug("Config dataset_dir {} is not exits, " + "dataset config is not valid".format(path)) return False if annotation: annotation_path = osp.join(path, annotation) if not osp.isfile(annotation_path): - logger.info("Config annotation {} is not a " - "file, dataset config is not " - "valid".format(annotation_path)) + logger.debug("Config annotation {} is not a " + "file, dataset config is not " + "valid".format(annotation_path)) return False if image_dir: image_path = osp.join(path, image_dir) if not osp.isdir(image_path): - logger.info("Config image_dir {} is not a " - "directory, dataset config is not " - "valid".format(image_path)) + logger.warning("Config image_dir {} is not a " + "directory, dataset config is not " + "valid".format(image_path)) return False return True @@ -300,7 +300,7 @@ def _md5check(fullname, md5sum=None): if md5sum is None: return True - logger.info("File {} md5 checking...".format(fullname)) + logger.debug("File {} md5 checking...".format(fullname)) md5 = hashlib.md5() with open(fullname, 'rb') as f: for chunk in iter(lambda: f.read(4096), b""): @@ -308,8 +308,8 @@ def _md5check(fullname, md5sum=None): calc_md5sum = md5.hexdigest() if calc_md5sum != md5sum: - logger.info("File {} md5 check failed, {}(calc) != " - "{}(base)".format(fullname, calc_md5sum, md5sum)) + logger.warning("File {} md5 check failed, {}(calc) != " + "{}(base)".format(fullname, calc_md5sum, md5sum)) return False return True -- GitLab