From cb1deae4fc146c38b30ee2a5d00f2ff060401ca0 Mon Sep 17 00:00:00 2001 From: wangxinxin08 <69842442+wangxinxin08@users.noreply.github.com> Date: Sat, 6 Nov 2021 13:02:23 +0800 Subject: [PATCH] add reference of some code and remove some code (#4468) --- docs/tutorials/PrepareDataSet.md | 4 +- ppdet/data/transform/op_helper.py | 59 ----------- ppdet/data/transform/operators.py | 9 +- ppdet/ext_op/rbox_iou_op.cc | 27 +++--- ppdet/ext_op/rbox_iou_op.cu | 27 +++--- ppdet/ext_op/rbox_iou_op.h | 27 +++--- ppdet/modeling/heads/s2anet_head.py | 9 +- ppdet/modeling/necks/yolo_fpn.py | 2 +- static/configs/yolov4/README.md | 4 +- static/docs/tutorials/Custom_DataSet.md | 4 +- static/tools/anchor_cluster.py | 122 +---------------------- tools/anchor_cluster.py | 124 +----------------------- 12 files changed, 73 insertions(+), 345 deletions(-) diff --git a/docs/tutorials/PrepareDataSet.md b/docs/tutorials/PrepareDataSet.md index 5dae1b23a..ce829db69 100644 --- a/docs/tutorials/PrepareDataSet.md +++ b/docs/tutorials/PrepareDataSet.md @@ -436,7 +436,5 @@ python tools/anchor_cluster.py -c configs/ppyolo/ppyolo.yml -n 9 -s 608 -m v2 -i | -c/--config | 模型的配置文件 | 无默认值 | 必须指定 | | -n/--n | 聚类的簇数 | 9 | Anchor的数目 | | -s/--size | 图片的输入尺寸 | None | 若指定,则使用指定的尺寸,如果不指定, 则尝试从配置文件中读取图片尺寸 | -| -m/--method | 使用的Anchor聚类方法 | v2 | 目前只支持yolov2/v5的聚类算法 | +| -m/--method | 使用的Anchor聚类方法 | v2 | 目前只支持yolov2的聚类算法 | | -i/--iters | kmeans聚类算法的迭代次数 | 1000 | kmeans算法收敛或者达到迭代次数后终止 | -| -gi/--gen_iters | 遗传算法的迭代次数 | 1000 | 该参数只用于yolov5的Anchor聚类算法 | -| -t/--thresh| Anchor尺度的阈值 | 0.25 | 该参数只用于yolov5的Anchor聚类算法 | diff --git a/ppdet/data/transform/op_helper.py b/ppdet/data/transform/op_helper.py index e2167831c..6c400306d 100644 --- a/ppdet/data/transform/op_helper.py +++ b/ppdet/data/transform/op_helper.py @@ -464,65 +464,6 @@ def gaussian2D(shape, sigma_x=1, sigma_y=1): return h -def transform_bbox(sample, - M, - w, - h, - area_thr=0.25, - wh_thr=2, - ar_thr=20, - perspective=False): - """ - transfrom bbox according to tranformation matrix M, - refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py - """ - bbox = sample['gt_bbox'] - label = sample['gt_class'] - # rotate bbox - n = len(bbox) - xy = np.ones((n * 4, 3), dtype=np.float32) - xy[:, :2] = bbox[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) - # xy = xy @ M.T - xy = np.matmul(xy, M.T) - if perspective: - xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) - else: - xy = xy[:, :2].reshape(n, 8) - # get new bboxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - bbox = np.concatenate( - (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - # clip boxes - mask = filter_bbox(bbox, w, h, area_thr) - sample['gt_bbox'] = bbox[mask] - sample['gt_class'] = sample['gt_class'][mask] - if 'is_crowd' in sample: - sample['is_crowd'] = sample['is_crowd'][mask] - if 'difficult' in sample: - sample['difficult'] = sample['difficult'][mask] - return sample - - -def filter_bbox(bbox, w, h, area_thr=0.25, wh_thr=2, ar_thr=20): - """ - filter bbox, refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py - """ - # clip boxes - area1 = (bbox[:, 2:4] - bbox[:, 0:2]).prod(1) - bbox[:, [0, 2]] = bbox[:, [0, 2]].clip(0, w) - bbox[:, [1, 3]] = bbox[:, [1, 3]].clip(0, h) - # compute - area2 = (bbox[:, 2:4] - bbox[:, 0:2]).prod(1) - area_ratio = area2 / (area1 + 1e-16) - wh = bbox[:, 2:4] - bbox[:, 0:2] - ar_ratio = np.maximum(wh[:, 1] / (wh[:, 0] + 1e-16), - wh[:, 0] / (wh[:, 1] + 1e-16)) - mask = (area_ratio > area_thr) & ( - (wh > wh_thr).all(1)) & (ar_ratio < ar_thr) - return mask - - def draw_umich_gaussian(heatmap, center, radius, k=1): """ draw_umich_gaussian, refer to https://github.com/xingyizhou/CenterNet/blob/master/src/lib/utils/image.py#L126 diff --git a/ppdet/data/transform/operators.py b/ppdet/data/transform/operators.py index a5ea6fb8f..5cc14a44d 100644 --- a/ppdet/data/transform/operators.py +++ b/ppdet/data/transform/operators.py @@ -48,7 +48,7 @@ from .op_helper import (satisfy_sample_constraint, filter_and_process, generate_sample_bbox, clip_bbox, data_anchor_sampling, satisfy_sample_constraint_coverage, crop_image_sampling, generate_sample_bbox_square, bbox_area_sampling, - is_poly, transform_bbox, get_border) + is_poly, get_border) from ppdet.utils.logger import setup_logger from ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform @@ -2476,6 +2476,9 @@ class RandomSelect(BaseOperator): """ Randomly choose a transformation between transforms1 and transforms2, and the probability of choosing transforms1 is p. + + The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py + """ def __init__(self, transforms1, transforms2, p=0.5): @@ -2833,6 +2836,10 @@ class WarpAffine(BaseOperator): shift=0.1): """WarpAffine Warp affine the image + + The code is based on https://github.com/xingyizhou/CenterNet/blob/master/src/lib/datasets/sample/ctdet.py + + """ super(WarpAffine, self).__init__() self.keep_res = keep_res diff --git a/ppdet/ext_op/rbox_iou_op.cc b/ppdet/ext_op/rbox_iou_op.cc index 83f6559f5..ce27e8e91 100644 --- a/ppdet/ext_op/rbox_iou_op.cc +++ b/ppdet/ext_op/rbox_iou_op.cc @@ -1,15 +1,18 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated #include "rbox_iou_op.h" #include "paddle/extension.h" diff --git a/ppdet/ext_op/rbox_iou_op.cu b/ppdet/ext_op/rbox_iou_op.cu index 8a9778dc7..72cac2389 100644 --- a/ppdet/ext_op/rbox_iou_op.cu +++ b/ppdet/ext_op/rbox_iou_op.cu @@ -1,15 +1,18 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated #include "rbox_iou_op.h" #include "paddle/extension.h" diff --git a/ppdet/ext_op/rbox_iou_op.h b/ppdet/ext_op/rbox_iou_op.h index ca574a76f..77fb62e39 100644 --- a/ppdet/ext_op/rbox_iou_op.h +++ b/ppdet/ext_op/rbox_iou_op.h @@ -1,15 +1,18 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated #pragma once diff --git a/ppdet/modeling/heads/s2anet_head.py b/ppdet/modeling/heads/s2anet_head.py index 2db15d1de..cb62002d5 100644 --- a/ppdet/modeling/heads/s2anet_head.py +++ b/ppdet/modeling/heads/s2anet_head.py @@ -11,6 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# +# The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py + import paddle from paddle import ParamAttr import paddle.nn as nn @@ -625,7 +628,8 @@ class S2ANetHead(nn.Layer): fam_bbox_total = self.gwd_loss(fam_bbox_decode, bbox_gt_bboxes_level) fam_bbox_total = fam_bbox_total * feat_bbox_weights - fam_bbox_total = paddle.sum(fam_bbox_total) / num_total_samples + fam_bbox_total = paddle.sum( + fam_bbox_total) / num_total_samples fam_bbox_losses.append(fam_bbox_total) st_idx += feat_anchor_num @@ -739,7 +743,8 @@ class S2ANetHead(nn.Layer): odm_bbox_total = self.gwd_loss(odm_bbox_decode, bbox_gt_bboxes_level) odm_bbox_total = odm_bbox_total * feat_bbox_weights - odm_bbox_total = paddle.sum(odm_bbox_total) / num_total_samples + odm_bbox_total = paddle.sum( + odm_bbox_total) / num_total_samples odm_bbox_losses.append(odm_bbox_total) st_idx += feat_anchor_num diff --git a/ppdet/modeling/necks/yolo_fpn.py b/ppdet/modeling/necks/yolo_fpn.py index b68a72657..4af0348d2 100644 --- a/ppdet/modeling/necks/yolo_fpn.py +++ b/ppdet/modeling/necks/yolo_fpn.py @@ -180,7 +180,7 @@ class CoordConv(nn.Layer): name='', data_format='NCHW'): """ - CoordConv layer + CoordConv layer, see https://arxiv.org/abs/1807.03247 Args: ch_in (int): input channel diff --git a/static/configs/yolov4/README.md b/static/configs/yolov4/README.md index d99ce88dd..55e8a050d 100644 --- a/static/configs/yolov4/README.md +++ b/static/configs/yolov4/README.md @@ -31,10 +31,8 @@ python tools/anchor_cluster.py -c ${config} -m ${method} -s ${size} | -c/--config | 模型的配置文件 | 无默认值 | 必须指定 | | -n/--n | 聚类的簇数 | 9 | Anchor的数目 | | -s/--size | 图片的输入尺寸 | None | 若指定,则使用指定的尺寸,如果不指定, 则尝试从配置文件中读取图片尺寸 | -| -m/--method | 使用的Anchor聚类方法 | v2 | 目前只支持yolov2/v5的聚类算法 | +| -m/--method | 使用的Anchor聚类方法 | v2 | 目前只支持yolov2的聚类算法 | | -i/--iters | kmeans聚类算法的迭代次数 | 1000 | kmeans算法收敛或者达到迭代次数后终止 | -| -gi/--gen_iters | 遗传算法的迭代次数 | 1000 | 该参数只用于yolov5的Anchor聚类算法 | -| -t/--thresh| Anchor尺度的阈值 | 0.25 | 该参数只用于yolov5的Anchor聚类算法 | ## 模型库 下表中展示了当前支持的网络结构。 diff --git a/static/docs/tutorials/Custom_DataSet.md b/static/docs/tutorials/Custom_DataSet.md index a14cca69f..89ba932c0 100644 --- a/static/docs/tutorials/Custom_DataSet.md +++ b/static/docs/tutorials/Custom_DataSet.md @@ -139,10 +139,8 @@ python tools/anchor_cluster.py -c configs/ppyolo/ppyolo.yml -n 9 -s 608 -m v2 -i | -c/--config | 模型的配置文件 | 无默认值 | 必须指定 | | -n/--n | 聚类的簇数 | 9 | Anchor的数目 | | -s/--size | 图片的输入尺寸 | None | 若指定,则使用指定的尺寸,如果不指定, 则尝试从配置文件中读取图片尺寸 | -| -m/--method | 使用的Anchor聚类方法 | v2 | 目前只支持yolov2/v5的聚类算法 | +| -m/--method | 使用的Anchor聚类方法 | v2 | 目前只支持yolov2的聚类算法 | | -i/--iters | kmeans聚类算法的迭代次数 | 1000 | kmeans算法收敛或者达到迭代次数后终止 | -| -gi/--gen_iters | 遗传算法的迭代次数 | 1000 | 该参数只用于yolov5的Anchor聚类算法 | -| -t/--thresh| Anchor尺度的阈值 | 0.25 | 该参数只用于yolov5的Anchor聚类算法 | ## 4.修改参数配置 diff --git a/static/tools/anchor_cluster.py b/static/tools/anchor_cluster.py index 76b707a88..425e3003d 100644 --- a/static/tools/anchor_cluster.py +++ b/static/tools/anchor_cluster.py @@ -126,8 +126,7 @@ class YOLOv2AnchorCluster(BaseAnchorCluster): """ YOLOv2 Anchor Cluster - Reference: - https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py + The code is based on https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py Args: n (int): number of clusters @@ -196,103 +195,6 @@ class YOLOv2AnchorCluster(BaseAnchorCluster): return centers -class YOLOv5AnchorCluster(BaseAnchorCluster): - def __init__(self, - n, - dataset, - size, - cache_path, - cache, - iters=300, - gen_iters=1000, - thresh=0.25, - verbose=True): - super(YOLOv5AnchorCluster, self).__init__( - n, cache_path, cache, verbose=verbose) - """ - YOLOv5 Anchor Cluster - - Reference: - https://github.com/ultralytics/yolov5/blob/master/utils/general.py - - Args: - n (int): number of clusters - dataset (DataSet): DataSet instance, VOC or COCO - size (list): [w, h] - cache_path (str): cache directory path - cache (bool): whether using cache - iters (int): iters of kmeans algorithm - gen_iters (int): iters of genetic algorithm - threshold (float): anchor scale threshold - verbose (bool): whether print results - """ - self.dataset = dataset - self.size = size - self.iters = iters - self.gen_iters = gen_iters - self.thresh = thresh - - def print_result(self, centers): - whs = self.whs - centers = centers[np.argsort(centers.prod(1))] - x, best = self.metric(whs, centers) - bpr, aat = ( - best > self.thresh).mean(), (x > self.thresh).mean() * self.n - logger.info( - 'thresh=%.2f: %.4f best possible recall, %.2f anchors past thr' % - (self.thresh, bpr, aat)) - logger.info( - 'n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thresh=%.3f-mean: ' - % (self.n, self.size, x.mean(), best.mean(), - x[x > self.thresh].mean())) - logger.info('%d anchor cluster result: [w, h]' % self.n) - for w, h in centers: - logger.info('[%d, %d]' % (round(w), round(h))) - - def metric(self, whs, centers): - r = whs[:, None] / centers[None] - x = np.minimum(r, 1. / r).min(2) - return x, x.max(1) - - def fitness(self, whs, centers): - _, best = self.metric(whs, centers) - return (best * (best > self.thresh)).mean() - - def calc_anchors(self): - self.whs = self.whs * self.shapes / self.shapes.max( - 1, keepdims=True) * np.array([self.size]) - wh0 = self.whs - i = (wh0 < 3.0).any(1).sum() - if i: - logger.warning('Extremely small objects found. %d of %d' - 'labels are < 3 pixels in width or height' % - (i, len(wh0))) - - wh = wh0[(wh0 >= 2.0).any(1)] - logger.info('Running kmeans for %g anchors on %g points...' % - (self.n, len(wh))) - s = wh.std(0) - centers, dist = kmeans(wh / s, self.n, iter=self.iters) - centers *= s - - f, sh, mp, s = self.fitness(wh, centers), centers.shape, 0.9, 0.1 - pbar = tqdm( - range(self.gen_iters), - desc='Evolving anchors with Genetic Algorithm') - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): - v = ((np.random.random(sh) < mp) * np.random.random() * - np.random.randn(*sh) * s + 1).clip(0.3, 3.0) - new_centers = (centers.copy() * v).clip(min=2.0) - new_f = self.fitness(wh, new_centers) - if new_f > f: - f, centers = new_f, new_centers.copy() - pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f - - return centers - - def main(): parser = ArgsParser() parser.add_argument( @@ -303,18 +205,6 @@ def main(): default=1000, type=int, help='num of iterations for kmeans') - parser.add_argument( - '--gen_iters', - '-gi', - default=1000, - type=int, - help='num of iterations for genetic algorithm') - parser.add_argument( - '--thresh', - '-t', - default=0.25, - type=float, - help='anchor scale threshold') parser.add_argument( '--verbose', '-v', default=True, type=bool, help='whether print result') parser.add_argument( @@ -328,7 +218,7 @@ def main(): '-m', default='v2', type=str, - help='cluster method, [v2, v5] are supported now') + help='cluster method, v2 is only supported now') parser.add_argument( '--cache_path', default='cache', type=str, help='cache path') parser.add_argument( @@ -353,18 +243,14 @@ def main(): size = int(FLAGS.size) size = [size, size] - elif 'image_shape' in cfg['TrainReader']['inputs_def']: - size = cfg['TrainReader']['inputs_def']['image_shape'][1:] + elif 'image_shape' in cfg['TestReader']['inputs_def']: + size = cfg['TestReader']['inputs_def']['image_shape'][1:] else: raise ValueError('size is not specified') if FLAGS.method == 'v2': cluster = YOLOv2AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path, FLAGS.cache, FLAGS.iters, FLAGS.verbose) - elif FLAGS.method == 'v5': - cluster = YOLOv5AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path, - FLAGS.cache, FLAGS.iters, FLAGS.gen_iters, - FLAGS.thresh, FLAGS.verbose) else: raise ValueError('cluster method: %s is not supported' % FLAGS.method) diff --git a/tools/anchor_cluster.py b/tools/anchor_cluster.py index 688c37fad..ca4f7c094 100644 --- a/tools/anchor_cluster.py +++ b/tools/anchor_cluster.py @@ -111,8 +111,7 @@ class YOLOv2AnchorCluster(BaseAnchorCluster): """ YOLOv2 Anchor Cluster - Reference: - https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py + The code is based on https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py Args: n (int): number of clusters @@ -182,103 +181,6 @@ class YOLOv2AnchorCluster(BaseAnchorCluster): return centers -class YOLOv5AnchorCluster(BaseAnchorCluster): - def __init__(self, - n, - dataset, - size, - cache_path, - cache, - iters=300, - gen_iters=1000, - thresh=0.25, - verbose=True): - super(YOLOv5AnchorCluster, self).__init__( - n, cache_path, cache, verbose=verbose) - """ - YOLOv5 Anchor Cluster - - Reference: - https://github.com/ultralytics/yolov5/blob/master/utils/general.py - - Args: - n (int): number of clusters - dataset (DataSet): DataSet instance, VOC or COCO - size (list): [w, h] - cache_path (str): cache directory path - cache (bool): whether using cache - iters (int): iters of kmeans algorithm - gen_iters (int): iters of genetic algorithm - threshold (float): anchor scale threshold - verbose (bool): whether print results - """ - self.dataset = dataset - self.size = size - self.iters = iters - self.gen_iters = gen_iters - self.thresh = thresh - - def print_result(self, centers): - whs = self.whs - centers = centers[np.argsort(centers.prod(1))] - x, best = self.metric(whs, centers) - bpr, aat = ( - best > self.thresh).mean(), (x > self.thresh).mean() * self.n - logger.info( - 'thresh=%.2f: %.4f best possible recall, %.2f anchors past thr' % - (self.thresh, bpr, aat)) - logger.info( - 'n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thresh=%.3f-mean: ' - % (self.n, self.size, x.mean(), best.mean(), - x[x > self.thresh].mean())) - logger.info('%d anchor cluster result: [w, h]' % self.n) - for w, h in centers: - logger.info('[%d, %d]' % (round(w), round(h))) - - def metric(self, whs, centers): - r = whs[:, None] / centers[None] - x = np.minimum(r, 1. / r).min(2) - return x, x.max(1) - - def fitness(self, whs, centers): - _, best = self.metric(whs, centers) - return (best * (best > self.thresh)).mean() - - def calc_anchors(self): - self.whs = self.whs * self.shapes / self.shapes.max( - 1, keepdims=True) * np.array([self.size]) - wh0 = self.whs - i = (wh0 < 3.0).any(1).sum() - if i: - logger.warning('Extremely small objects found. %d of %d' - 'labels are < 3 pixels in width or height' % - (i, len(wh0))) - - wh = wh0[(wh0 >= 2.0).any(1)] - logger.info('Running kmeans for %g anchors on %g points...' % - (self.n, len(wh))) - s = wh.std(0) - centers, dist = kmeans(wh / s, self.n, iter=self.iters) - centers *= s - - f, sh, mp, s = self.fitness(wh, centers), centers.shape, 0.9, 0.1 - pbar = tqdm( - range(self.gen_iters), - desc='Evolving anchors with Genetic Algorithm') - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): - v = ((np.random.random(sh) < mp) * np.random.random() * - np.random.randn(*sh) * s + 1).clip(0.3, 3.0) - new_centers = (centers.copy() * v).clip(min=2.0) - new_f = self.fitness(wh, new_centers) - if new_f > f: - f, centers = new_f, new_centers.copy() - pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f - - return centers - - def main(): parser = ArgsParser() parser.add_argument( @@ -289,18 +191,6 @@ def main(): default=1000, type=int, help='num of iterations for kmeans') - parser.add_argument( - '--gen_iters', - '-gi', - default=1000, - type=int, - help='num of iterations for genetic algorithm') - parser.add_argument( - '--thresh', - '-t', - default=0.25, - type=float, - help='anchor scale threshold') parser.add_argument( '--verbose', '-v', default=True, type=bool, help='whether print result') parser.add_argument( @@ -314,7 +204,7 @@ def main(): '-m', default='v2', type=str, - help='cluster method, [v2, v5] are supported now') + help='cluster method, v2 is only supported now') parser.add_argument( '--cache_path', default='cache', type=str, help='cache path') parser.add_argument( @@ -338,19 +228,15 @@ def main(): else: size = int(FLAGS.size) size = [size, size] - elif 'inputs_def' in cfg['TrainReader'] and 'image_shape' in cfg[ - 'TrainReader']['inputs_def']: - size = cfg['TrainReader']['inputs_def']['image_shape'][1:] + elif 'inputs_def' in cfg['TestReader'] and 'image_shape' in cfg[ + 'TestReader']['inputs_def']: + size = cfg['TestReader']['inputs_def']['image_shape'][1:] else: raise ValueError('size is not specified') if FLAGS.method == 'v2': cluster = YOLOv2AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path, FLAGS.cache, FLAGS.iters, FLAGS.verbose) - elif FLAGS.method == 'v5': - cluster = YOLOv5AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path, - FLAGS.cache, FLAGS.iters, FLAGS.gen_iters, - FLAGS.thresh, FLAGS.verbose) else: raise ValueError('cluster method: %s is not supported' % FLAGS.method) -- GitLab