From d77c236f9b23db31871670285453c86733b17d81 Mon Sep 17 00:00:00 2001 From: Manuel Garcia <31109774+Mandroide@users.noreply.github.com> Date: Wed, 7 Jul 2021 22:01:10 -0400 Subject: [PATCH] Replace deprecated methods 'warn' and 'getargspec' (#3627) --- dataset/dota_coco/dota_to_coco.py | 2 +- ppdet/core/config/schema.py | 4 ++-- ppdet/core/config/yaml_helpers.py | 2 +- ppdet/data/reader.py | 16 ++++++++-------- ppdet/data/source/category.py | 2 +- ppdet/data/source/mot.py | 8 ++++---- ppdet/data/source/voc.py | 11 ++++++----- ppdet/data/source/widerface.py | 6 +++--- ppdet/data/transform/operators.py | 6 +++--- ppdet/engine/callbacks.py | 2 +- ppdet/engine/tracker.py | 2 +- ppdet/engine/trainer.py | 2 +- static/ppdet/core/config/schema.py | 4 ++-- static/ppdet/core/config/yaml_helpers.py | 2 +- static/ppdet/core/workspace.py | 4 ++-- static/ppdet/data/parallel_map.py | 8 ++++---- static/ppdet/data/reader.py | 5 +++-- static/ppdet/data/shared_queue/queue.py | 4 ++-- static/ppdet/data/shared_queue/sharedmemory.py | 4 ++-- static/ppdet/data/source/coco.py | 16 ++++++++-------- static/ppdet/data/source/voc.py | 11 ++++++----- static/ppdet/data/source/widerface.py | 6 +++--- static/ppdet/data/transform/autoaugment_utils.py | 12 ++++++------ static/ppdet/data/transform/operators.py | 4 ++-- static/ppdet/modeling/losses/yolo_loss.py | 2 +- static/ppdet/utils/download.py | 2 +- static/slim/sensitive/sensitive.py | 2 +- static/tools/anchor_cluster.py | 6 +++--- tools/anchor_cluster.py | 6 +++--- 29 files changed, 82 insertions(+), 79 deletions(-) diff --git a/dataset/dota_coco/dota_to_coco.py b/dataset/dota_coco/dota_to_coco.py index 7c192a9dc..0905df4e0 100644 --- a/dataset/dota_coco/dota_to_coco.py +++ b/dataset/dota_coco/dota_to_coco.py @@ -93,7 +93,7 @@ def dota_2_coco(image_dir, # annotations anno_txt_path = osp.join(txt_dir, osp.splitext(basename)[0] + '.txt') if not osp.exists(anno_txt_path): - logger.warn('path of {} not exists'.format(anno_txt_path)) + logger.warning('path of {} not exists'.format(anno_txt_path)) for line in open(anno_txt_path): line = line.strip() diff --git a/ppdet/core/config/schema.py b/ppdet/core/config/schema.py index 0d2b0dabf..2e41b5c34 100644 --- a/ppdet/core/config/schema.py +++ b/ppdet/core/config/schema.py @@ -185,12 +185,12 @@ def extract_schema(cls): annotations = argspec.annotations has_kwargs = argspec.varkw is not None else: - argspec = inspect.getargspec(ctor) + argspec = inspect.getfullargspec(ctor) # python 2 type hinting workaround, see pep-3107 # however, since `typeguard` does not support python 2, type checking # is still python 3 only for now annotations = getattr(ctor, '__annotations__', {}) - has_kwargs = argspec.keywords is not None + has_kwargs = argspec.varkw is not None names = [arg for arg in argspec.args if arg != 'self'] defaults = argspec.defaults diff --git a/ppdet/core/config/yaml_helpers.py b/ppdet/core/config/yaml_helpers.py index 1545b6be7..181cfe6fc 100644 --- a/ppdet/core/config/yaml_helpers.py +++ b/ppdet/core/config/yaml_helpers.py @@ -52,7 +52,7 @@ def _make_python_representer(cls): if hasattr(inspect, 'getfullargspec'): argspec = inspect.getfullargspec(cls) else: - argspec = inspect.getargspec(cls.__init__) + argspec = inspect.getfullargspec(cls.__init__) argnames = [arg for arg in argspec.args if arg != 'self'] def python_representer(dumper, obj): diff --git a/ppdet/data/reader.py b/ppdet/data/reader.py index e87b604d4..253b9047c 100644 --- a/ppdet/data/reader.py +++ b/ppdet/data/reader.py @@ -56,9 +56,9 @@ class Compose(object): data = f(data) except Exception as e: stack_info = traceback.format_exc() - logger.warn("fail to map sample transform [{}] " - "with error: {} and stack:\n{}".format( - f, e, str(stack_info))) + logger.warning("fail to map sample transform [{}] " + "with error: {} and stack:\n{}".format( + f, e, str(stack_info))) raise e return data @@ -75,9 +75,9 @@ class BatchCompose(Compose): data = f(data) except Exception as e: stack_info = traceback.format_exc() - logger.warn("fail to map batch transform [{}] " - "with error: {} and stack:\n{}".format( - f, e, str(stack_info))) + logger.warning("fail to map batch transform [{}] " + "with error: {} and stack:\n{}".format( + f, e, str(stack_info))) raise e # remove keys which is not needed by model @@ -185,8 +185,8 @@ class BaseDataLoader(object): if use_shared_memory: shm_size = _get_shared_memory_size_in_M() if shm_size is not None and shm_size < 1024.: - logger.warn("Shared memory size is less than 1G, " - "disable shared_memory in DataLoader") + logger.warning("Shared memory size is less than 1G, " + "disable shared_memory in DataLoader") use_shared_memory = False self.dataloader = DataLoader( diff --git a/ppdet/data/source/category.py b/ppdet/data/source/category.py index ee5b71352..9e612c21b 100644 --- a/ppdet/data/source/category.py +++ b/ppdet/data/source/category.py @@ -77,7 +77,7 @@ def get_categories(metric_type, anno_file=None, arch=None): elif metric_type.lower() == 'oid': if anno_file and os.path.isfile(anno_file): - logger.warn("only default categories support for OID19") + logger.warning("only default categories support for OID19") return _oid19_category() elif metric_type.lower() == 'widerface': diff --git a/ppdet/data/source/mot.py b/ppdet/data/source/mot.py index 9e34241b4..d1becd2fb 100644 --- a/ppdet/data/source/mot.py +++ b/ppdet/data/source/mot.py @@ -175,12 +175,12 @@ class MOTDataSet(DetDataset): lbl_file = self.label_files[data_name][img_index - start_index] if not os.path.exists(img_file): - logger.warn('Illegal image file: {}, and it will be ignored'. - format(img_file)) + logger.warning('Illegal image file: {}, and it will be ignored'. + format(img_file)) continue if not os.path.isfile(lbl_file): - logger.warn('Illegal label file: {}, and it will be ignored'. - format(lbl_file)) + logger.warning('Illegal label file: {}, and it will be ignored'. + format(lbl_file)) continue labels = np.loadtxt(lbl_file, dtype=np.float32).reshape(-1, 6) diff --git a/ppdet/data/source/voc.py b/ppdet/data/source/voc.py index 56b746c14..1272626d7 100644 --- a/ppdet/data/source/voc.py +++ b/ppdet/data/source/voc.py @@ -89,13 +89,14 @@ class VOCDataSet(DetDataset): img_file, xml_file = [os.path.join(image_dir, x) \ for x in line.strip().split()[:2]] if not os.path.exists(img_file): - logger.warn( + logger.warning( 'Illegal image file: {}, and it will be ignored'.format( img_file)) continue if not os.path.isfile(xml_file): - logger.warn('Illegal xml file: {}, and it will be ignored'. - format(xml_file)) + logger.warning( + 'Illegal xml file: {}, and it will be ignored'.format( + xml_file)) continue tree = ET.parse(xml_file) if tree.find('id') is None: @@ -107,7 +108,7 @@ class VOCDataSet(DetDataset): im_w = float(tree.find('size').find('width').text) im_h = float(tree.find('size').find('height').text) if im_w < 0 or im_h < 0: - logger.warn( + logger.warning( 'Illegal width: {} or height: {} in annotation, ' 'and {} will be ignored'.format(im_w, im_h, xml_file)) continue @@ -137,7 +138,7 @@ class VOCDataSet(DetDataset): gt_score.append([1.]) difficult.append([_difficult]) else: - logger.warn( + logger.warning( 'Found an invalid bbox in annotations: xml_file: {}' ', x1: {}, y1: {}, x2: {}, y2: {}.'.format( xml_file, x1, y1, x2, y2)) diff --git a/ppdet/data/source/widerface.py b/ppdet/data/source/widerface.py index b1813b0e0..a17c2aaf8 100644 --- a/ppdet/data/source/widerface.py +++ b/ppdet/data/source/widerface.py @@ -139,9 +139,9 @@ class WIDERFaceDataSet(DetDataset): h = float(split_str[3]) # Filter out wrong labels if w < 0 or h < 0: - logger.warn('Illegal box with w: {}, h: {} in ' - 'img: {}, and it will be ignored'.format( - w, h, file_dict[num_class][0])) + logger.warning('Illegal box with w: {}, h: {} in ' + 'img: {}, and it will be ignored'.format( + w, h, file_dict[num_class][0])) continue xmin = max(0, xmin) ymin = max(0, ymin) diff --git a/ppdet/data/transform/operators.py b/ppdet/data/transform/operators.py index b729cb77a..583761204 100644 --- a/ppdet/data/transform/operators.py +++ b/ppdet/data/transform/operators.py @@ -131,7 +131,7 @@ class Decode(BaseOperator): if 'h' not in sample: sample['h'] = im.shape[0] elif sample['h'] != im.shape[0]: - logger.warn( + logger.warning( "The actual image height: {} is not equal to the " "height: {} in annotation, and update sample['h'] by actual " "image height.".format(im.shape[0], sample['h'])) @@ -139,7 +139,7 @@ class Decode(BaseOperator): if 'w' not in sample: sample['w'] = im.shape[1] elif sample['w'] != im.shape[1]: - logger.warn( + logger.warning( "The actual image width: {} is not equal to the " "width: {} in annotation, and update sample['w'] by actual " "image width.".format(im.shape[1], sample['w'])) @@ -726,7 +726,7 @@ class Resize(BaseOperator): # apply rbox if 'gt_rbox2poly' in sample: if np.array(sample['gt_rbox2poly']).shape[1] != 8: - logger.warn( + logger.warning( "gt_rbox2poly's length shoule be 8, but actually is {}". format(len(sample['gt_rbox2poly']))) sample['gt_rbox2poly'] = self.apply_bbox(sample['gt_rbox2poly'], diff --git a/ppdet/engine/callbacks.py b/ppdet/engine/callbacks.py index 2249056b8..b623e1988 100644 --- a/ppdet/engine/callbacks.py +++ b/ppdet/engine/callbacks.py @@ -175,7 +175,7 @@ class Checkpointer(Callback): else: key = 'mask' if key not in map_res: - logger.warn("Evaluation results empty, this may be due to " \ + logger.warning("Evaluation results empty, this may be due to " \ "training iterations being too few or not " \ "loading the correct weights.") return diff --git a/ppdet/engine/tracker.py b/ppdet/engine/tracker.py index 4f8a0808b..8e84f71a1 100644 --- a/ppdet/engine/tracker.py +++ b/ppdet/engine/tracker.py @@ -75,7 +75,7 @@ class Tracker(object): if self.cfg.metric == 'MOT': self._metrics = [MOTMetric(), ] else: - logger.warn("Metric not support for metric type {}".format( + logger.warning("Metric not support for metric type {}".format( self.cfg.metric)) self._metrics = [] diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index 4ce3d2635..e1a9672f4 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -246,7 +246,7 @@ class Trainer(object): elif self.cfg.metric == 'MOTDet': self._metrics = [JDEDetMetric(), ] else: - logger.warn("Metric not support for metric type {}".format( + logger.warning("Metric not support for metric type {}".format( self.cfg.metric)) self._metrics = [] diff --git a/static/ppdet/core/config/schema.py b/static/ppdet/core/config/schema.py index 0d2b0dabf..2e41b5c34 100644 --- a/static/ppdet/core/config/schema.py +++ b/static/ppdet/core/config/schema.py @@ -185,12 +185,12 @@ def extract_schema(cls): annotations = argspec.annotations has_kwargs = argspec.varkw is not None else: - argspec = inspect.getargspec(ctor) + argspec = inspect.getfullargspec(ctor) # python 2 type hinting workaround, see pep-3107 # however, since `typeguard` does not support python 2, type checking # is still python 3 only for now annotations = getattr(ctor, '__annotations__', {}) - has_kwargs = argspec.keywords is not None + has_kwargs = argspec.varkw is not None names = [arg for arg in argspec.args if arg != 'self'] defaults = argspec.defaults diff --git a/static/ppdet/core/config/yaml_helpers.py b/static/ppdet/core/config/yaml_helpers.py index 1545b6be7..181cfe6fc 100644 --- a/static/ppdet/core/config/yaml_helpers.py +++ b/static/ppdet/core/config/yaml_helpers.py @@ -52,7 +52,7 @@ def _make_python_representer(cls): if hasattr(inspect, 'getfullargspec'): argspec = inspect.getfullargspec(cls) else: - argspec = inspect.getargspec(cls.__init__) + argspec = inspect.getfullargspec(cls.__init__) argnames = [arg for arg in argspec.args if arg != 'self'] def python_representer(dumper, obj): diff --git a/static/ppdet/core/workspace.py b/static/ppdet/core/workspace.py index 93e52eea2..2b41dadfe 100644 --- a/static/ppdet/core/workspace.py +++ b/static/ppdet/core/workspace.py @@ -166,8 +166,8 @@ def make_partial(cls): if not hasattr(op_module, op_name): import logging logger = logging.getLogger(__name__) - logger.warn('{} OP not found, maybe a newer version of paddle ' - 'is required.'.format(cls.__op__)) + logger.warning('{} OP not found, maybe a newer version of paddle ' + 'is required.'.format(cls.__op__)) return cls op = getattr(op_module, op_name) diff --git a/static/ppdet/data/parallel_map.py b/static/ppdet/data/parallel_map.py index 789fda1f2..17ea51b43 100644 --- a/static/ppdet/data/parallel_map.py +++ b/static/ppdet/data/parallel_map.py @@ -211,10 +211,10 @@ class ParallelMap(object): else: errmsg = "consumer[{}] exit abnormally".format(w.ident) - logger.warn(errmsg) + logger.warning(errmsg) if abnormal_num > 0: - logger.warn("{} consumers have exited abnormally!!!" \ + logger.warning("{} consumers have exited abnormally!!!" \ .format(abnormal_num)) return abnormal_num == 0 @@ -239,7 +239,7 @@ class ParallelMap(object): if isinstance(sample, EndSignal): self._consumer_endsig[sample.id] = sample - logger.warn("recv endsignal from outq with errmsg[{}]" \ + logger.warning("recv endsignal from outq with errmsg[{}]" \ .format(sample.errmsg)) if len(self._consumer_endsig.keys()) < len(self._consumers): @@ -268,7 +268,7 @@ class ParallelMap(object): " for some consumers exited abnormally before!!!" if not self.drained(): - logger.warn("reset before epoch[{}] finishes".format( + logger.warning("reset before epoch[{}] finishes".format( self._epoch)) self._produced = self._produced - self._consumed else: diff --git a/static/ppdet/data/reader.py b/static/ppdet/data/reader.py index d19653078..02dbf1091 100644 --- a/static/ppdet/data/reader.py +++ b/static/ppdet/data/reader.py @@ -46,8 +46,9 @@ class Compose(object): data = f(data, ctx) except Exception as e: stack_info = traceback.format_exc() - logger.warn("fail to map op [{}] with error: {} and stack:\n{}". - format(f, e, str(stack_info))) + logger.warning( + "fail to map op [{}] with error: {} and stack:\n{}".format( + f, e, str(stack_info))) raise e return data diff --git a/static/ppdet/data/shared_queue/queue.py b/static/ppdet/data/shared_queue/queue.py index 8f0ba8ab4..52a40d741 100644 --- a/static/ppdet/data/shared_queue/queue.py +++ b/static/ppdet/data/shared_queue/queue.py @@ -75,7 +75,7 @@ class SharedQueue(Queue): stack_info = traceback.format_exc() err_msg = 'failed to put a element to SharedQueue '\ 'with stack info[%s]' % (stack_info) - logger.warn(err_msg) + logger.warning(err_msg) if buff is not None: buff.free() @@ -95,7 +95,7 @@ class SharedQueue(Queue): stack_info = traceback.format_exc() err_msg = 'failed to get element from SharedQueue '\ 'with stack info[%s]' % (stack_info) - logger.warn(err_msg) + logger.warning(err_msg) raise e finally: if buff is not None: diff --git a/static/ppdet/data/shared_queue/sharedmemory.py b/static/ppdet/data/shared_queue/sharedmemory.py index 8b1d3ab40..1630c7b39 100644 --- a/static/ppdet/data/shared_queue/sharedmemory.py +++ b/static/ppdet/data/shared_queue/sharedmemory.py @@ -233,7 +233,7 @@ class PageAllocator(object): fname = fname + '.' + str(uuid.uuid4())[:6] with open(fname, 'wb') as f: f.write(pickle.dumps(info, -1)) - logger.warn('dump alloc info to file[%s]' % (fname)) + logger.warning('dump alloc info to file[%s]' % (fname)) def _reset(self): alloc_page_pos = self._header_pages @@ -460,7 +460,7 @@ class SharedMemoryMgr(object): if start is None: time.sleep(0.1) if ct % 100 == 0: - logger.warn('not enough space for reason[%s]' % (errmsg)) + logger.warning('not enough space for reason[%s]' % (errmsg)) ct += 1 else: diff --git a/static/ppdet/data/source/coco.py b/static/ppdet/data/source/coco.py index 4c25875b3..f5322cf9d 100644 --- a/static/ppdet/data/source/coco.py +++ b/static/ppdet/data/source/coco.py @@ -97,8 +97,8 @@ class COCODataSet(DataSet): if 'annotations' not in coco.dataset: self.load_image_only = True - logger.warn('Annotation file: {} does not contains ground truth ' - 'and load image information only.'.format(anno_path)) + logger.warning('Annotation file: {} does not contains ground truth ' + 'and load image information only.'.format(anno_path)) for img_id in img_ids: img_anno = coco.loadImgs([img_id])[0] @@ -109,14 +109,14 @@ class COCODataSet(DataSet): im_path = os.path.join(image_dir, im_fname) if image_dir else im_fname if not os.path.exists(im_path): - logger.warn('Illegal image file: {}, and it will be ' - 'ignored'.format(im_path)) + logger.warning('Illegal image file: {}, and it will be ' + 'ignored'.format(im_path)) continue if im_w < 0 or im_h < 0: - logger.warn('Illegal width: {} or height: {} in annotation, ' - 'and im_id: {} will be ignored'.format(im_w, im_h, - img_id)) + logger.warning('Illegal width: {} or height: {} in annotation, ' + 'and im_id: {} will be ignored'.format( + im_w, im_h, img_id)) continue coco_rec = { @@ -141,7 +141,7 @@ class COCODataSet(DataSet): inst['clean_bbox'] = [x1, y1, x2, y2] bboxes.append(inst) else: - logger.warn( + logger.warning( 'Found an invalid bbox in annotations: im_id: {}, ' 'x1: {}, y1: {}, x2: {}, y2: {}.'.format( img_id, x1, y1, x2, y2)) diff --git a/static/ppdet/data/source/voc.py b/static/ppdet/data/source/voc.py index 84c5990c3..8caae3878 100644 --- a/static/ppdet/data/source/voc.py +++ b/static/ppdet/data/source/voc.py @@ -111,13 +111,14 @@ class VOCDataSet(DataSet): img_file, xml_file = [os.path.join(image_dir, x) \ for x in line.strip().split()[:2]] if not os.path.exists(img_file): - logger.warn( + logger.warning( 'Illegal image file: {}, and it will be ignored'.format( img_file)) continue if not os.path.isfile(xml_file): - logger.warn('Illegal xml file: {}, and it will be ignored'. - format(xml_file)) + logger.warning( + 'Illegal xml file: {}, and it will be ignored'.format( + xml_file)) continue tree = ET.parse(xml_file) if tree.find('id') is None: @@ -129,7 +130,7 @@ class VOCDataSet(DataSet): im_w = float(tree.find('size').find('width').text) im_h = float(tree.find('size').find('height').text) if im_w < 0 or im_h < 0: - logger.warn( + logger.warning( 'Illegal width: {} or height: {} in annotation, ' 'and {} will be ignored'.format(im_w, im_h, xml_file)) continue @@ -156,7 +157,7 @@ class VOCDataSet(DataSet): is_crowd.append([0]) difficult.append([_difficult]) else: - logger.warn( + logger.warning( 'Found an invalid bbox in annotations: xml_file: {}' ', x1: {}, y1: {}, x2: {}, y2: {}.'.format( xml_file, x1, y1, x2, y2)) diff --git a/static/ppdet/data/source/widerface.py b/static/ppdet/data/source/widerface.py index 75da05234..ada33617f 100644 --- a/static/ppdet/data/source/widerface.py +++ b/static/ppdet/data/source/widerface.py @@ -133,9 +133,9 @@ class WIDERFaceDataSet(DataSet): h = float(split_str[3]) # Filter out wrong labels if w < 0 or h < 0: - logger.warn('Illegal box with w: {}, h: {} in ' - 'img: {}, and it will be ignored'.format( - w, h, file_dict[num_class][0])) + logger.warning('Illegal box with w: {}, h: {} in ' + 'img: {}, and it will be ignored'.format( + w, h, file_dict[num_class][0])) continue xmin = max(0, xmin) ymin = max(0, ymin) diff --git a/static/ppdet/data/transform/autoaugment_utils.py b/static/ppdet/data/transform/autoaugment_utils.py index 0cd8a04ee..78e3bb36b 100644 --- a/static/ppdet/data/transform/autoaugment_utils.py +++ b/static/ppdet/data/transform/autoaugment_utils.py @@ -1453,19 +1453,19 @@ def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams): # Check to see if prob is passed into function. This is used for operations # where we alter bboxes independently. # pytype:disable=wrong-arg-types - if 'prob' in inspect.getargspec(func)[0]: + if 'prob' in inspect.getfullargspec(func)[0]: args = tuple([prob] + list(args)) # pytype:enable=wrong-arg-types # Add in replace arg if it is required for the function that is being called. - if 'replace' in inspect.getargspec(func)[0]: + if 'replace' in inspect.getfullargspec(func)[0]: # Make sure replace is the final argument - assert 'replace' == inspect.getargspec(func)[0][-1] + assert 'replace' == inspect.getfullargspec(func)[0][-1] args = tuple(list(args) + [replace_value]) # Add bboxes as the second positional argument for the function if it does # not already exist. - if 'bboxes' not in inspect.getargspec(func)[0]: + if 'bboxes' not in inspect.getfullargspec(func)[0]: func = bbox_wrapper(func) return (func, prob, args) @@ -1473,11 +1473,11 @@ def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams): def _apply_func_with_prob(func, image, args, prob, bboxes): """Apply `func` to image w/ `args` as input with probability `prob`.""" assert isinstance(args, tuple) - assert 'bboxes' == inspect.getargspec(func)[0][1] + assert 'bboxes' == inspect.getfullargspec(func)[0][1] # If prob is a function argument, then this randomness is being handled # inside the function, so make sure it is always called. - if 'prob' in inspect.getargspec(func)[0]: + if 'prob' in inspect.getfullargspec(func)[0]: prob = 1.0 # Apply the function with probability `prob`. diff --git a/static/ppdet/data/transform/operators.py b/static/ppdet/data/transform/operators.py index d51e68d17..62a9efc38 100644 --- a/static/ppdet/data/transform/operators.py +++ b/static/ppdet/data/transform/operators.py @@ -125,7 +125,7 @@ class DecodeImage(BaseOperator): if 'h' not in sample: sample['h'] = im.shape[0] elif sample['h'] != im.shape[0]: - logger.warn( + logger.warning( "The actual image height: {} is not equal to the " "height: {} in annotation, and update sample['h'] by actual " "image height.".format(im.shape[0], sample['h'])) @@ -133,7 +133,7 @@ class DecodeImage(BaseOperator): if 'w' not in sample: sample['w'] = im.shape[1] elif sample['w'] != im.shape[1]: - logger.warn( + logger.warning( "The actual image width: {} is not equal to the " "width: {} in annotation, and update sample['w'] by actual " "image width.".format(im.shape[1], sample['w'])) diff --git a/static/ppdet/modeling/losses/yolo_loss.py b/static/ppdet/modeling/losses/yolo_loss.py index 553e63322..98f6ec37d 100644 --- a/static/ppdet/modeling/losses/yolo_loss.py +++ b/static/ppdet/modeling/losses/yolo_loss.py @@ -67,7 +67,7 @@ class YOLOv3Loss(object): self.match_score = match_score if batch_size != -1: - logger.warn( + logger.warning( "config YOLOv3Loss.batch_size is deprecated, " "training batch size should be set by TrainReader.batch_size") diff --git a/static/ppdet/utils/download.py b/static/ppdet/utils/download.py index 2c53406e8..d7e367a05 100644 --- a/static/ppdet/utils/download.py +++ b/static/ppdet/utils/download.py @@ -321,7 +321,7 @@ def _download(url, path, md5sum=None): shutil.move(tmp_fullname, fullname) return fullname else: - logger.warn( + logger.warning( "Download from url imcomplete, try downloading again...") os.remove(tmp_fullname) continue diff --git a/static/slim/sensitive/sensitive.py b/static/slim/sensitive/sensitive.py index 022c7b454..a43506ebe 100644 --- a/static/slim/sensitive/sensitive.py +++ b/static/slim/sensitive/sensitive.py @@ -111,7 +111,7 @@ def main(): if cfg.weights: checkpoint.load_params(exe, eval_prog, cfg.weights) else: - logger.warn("Please set cfg.weights to load trained model.") + logger.warning("Please set cfg.weights to load trained model.") # whether output bbox is normalized in model output layer is_bbox_normalized = False diff --git a/static/tools/anchor_cluster.py b/static/tools/anchor_cluster.py index 67ad2d9cd..e5fb7147b 100644 --- a/static/tools/anchor_cluster.py +++ b/static/tools/anchor_cluster.py @@ -265,9 +265,9 @@ class YOLOv5AnchorCluster(BaseAnchorCluster): wh0 = self.whs i = (wh0 < 3.0).any(1).sum() if i: - logger.warn('Extremely small objects found. %d of %d' - 'labels are < 3 pixels in width or height' % - (i, len(wh0))) + logger.warning('Extremely small objects found. %d of %d' + 'labels are < 3 pixels in width or height' % + (i, len(wh0))) wh = wh0[(wh0 >= 2.0).any(1)] logger.info('Running kmeans for %g anchors on %g points...' % diff --git a/tools/anchor_cluster.py b/tools/anchor_cluster.py index 0b339bb36..87c7e9370 100644 --- a/tools/anchor_cluster.py +++ b/tools/anchor_cluster.py @@ -252,9 +252,9 @@ class YOLOv5AnchorCluster(BaseAnchorCluster): wh0 = self.whs i = (wh0 < 3.0).any(1).sum() if i: - logger.warn('Extremely small objects found. %d of %d' - 'labels are < 3 pixels in width or height' % - (i, len(wh0))) + logger.warning('Extremely small objects found. %d of %d' + 'labels are < 3 pixels in width or height' % + (i, len(wh0))) wh = wh0[(wh0 >= 2.0).any(1)] logger.info('Running kmeans for %g anchors on %g points...' % -- GitLab