diff --git a/ppdet/data/source/dataset.py b/ppdet/data/source/dataset.py index 631627b3c241687b7d0b0e1737e396211afc4e60..d8361eea9c36d5d02703c7371232fe3cf4db2fe5 100644 --- a/ppdet/data/source/dataset.py +++ b/ppdet/data/source/dataset.py @@ -208,6 +208,10 @@ class ImageFolder(DetDataset): self.image_dir = images self.roidbs = self._load_images() + def get_label_list(self): + # Only VOC dataset needs label list in ImageFold + return self.anno_path + @register class CommonDataset(object): diff --git a/ppdet/engine/trainer.py b/ppdet/engine/trainer.py index dc63f83eccfcd89ce2280bd2bc25f487800950bd..4983a1eb0394f3bbd6aacf7936ed6aca1a7d4f54 100644 --- a/ppdet/engine/trainer.py +++ b/ppdet/engine/trainer.py @@ -287,12 +287,18 @@ class Trainer(object): save_prediction_only=save_prediction_only) ] elif self.cfg.metric == 'VOC': + output_eval = self.cfg['output_eval'] \ + if 'output_eval' in self.cfg else None + save_prediction_only = self.cfg.get('save_prediction_only', False) + self._metrics = [ VOCMetric( label_list=self.dataset.get_label_list(), class_num=self.cfg.num_classes, map_type=self.cfg.map_type, - classwise=classwise) + classwise=classwise, + output_eval=output_eval, + save_prediction_only=save_prediction_only) ] elif self.cfg.metric == 'WiderFace': multi_scale = self.cfg.multi_scale_eval if 'multi_scale_eval' in self.cfg else True diff --git a/ppdet/metrics/metrics.py b/ppdet/metrics/metrics.py index 270071b49499bfe8012b5640179b0eaf80310745..b20a569a0434ed7bac7f461399cb280f08ff888a 100644 --- a/ppdet/metrics/metrics.py +++ b/ppdet/metrics/metrics.py @@ -225,7 +225,9 @@ class VOCMetric(Metric): map_type='11point', is_bbox_normalized=False, evaluate_difficult=False, - classwise=False): + classwise=False, + output_eval=None, + save_prediction_only=False): assert os.path.isfile(label_list), \ "label_list {} not a file".format(label_list) self.clsid2catid, self.catid2name = get_categories('VOC', label_list) @@ -233,6 +235,8 @@ class VOCMetric(Metric): self.overlap_thresh = overlap_thresh self.map_type = map_type self.evaluate_difficult = evaluate_difficult + self.output_eval = output_eval + self.save_prediction_only = save_prediction_only self.detection_map = DetectionMAP( class_num=class_num, overlap_thresh=overlap_thresh, @@ -245,6 +249,7 @@ class VOCMetric(Metric): self.reset() def reset(self): + self.results = {'bbox': [], 'score': [], 'label': []} self.detection_map.reset() def update(self, inputs, outputs): @@ -256,8 +261,15 @@ class VOCMetric(Metric): bbox_lengths = outputs['bbox_num'].numpy() if isinstance( outputs['bbox_num'], paddle.Tensor) else outputs['bbox_num'] + self.results['bbox'].append(bboxes.tolist()) + self.results['score'].append(scores.tolist()) + self.results['label'].append(labels.tolist()) + if bboxes.shape == (1, 1) or bboxes is None: return + if self.save_prediction_only: + return + gt_boxes = inputs['gt_bbox'] gt_labels = inputs['gt_class'] difficults = inputs['difficult'] if not self.evaluate_difficult \ @@ -294,6 +306,15 @@ class VOCMetric(Metric): bbox_idx += bbox_num def accumulate(self): + output = "bbox.json" + if self.output_eval: + output = os.path.join(self.output_eval, output) + with open(output, 'w') as f: + json.dump(self.results, f) + logger.info('The bbox result is saved to bbox.json.') + if self.save_prediction_only: + return + logger.info("Accumulating evaluatation results...") self.detection_map.accumulate()