未验证 提交 ce4b9969 编写于 作者: F Feng Wang 提交者: GitHub

fix(evaluator): compat bug and update requirment (#1416)

上级 48fd2a95
......@@ -3,14 +3,11 @@ numpy
torch>=1.7
opencv_python
loguru
scikit-image
tqdm
torchvision
Pillow
thop
ninja
tabulate
tensorboard
# verified versions
# pycocotools corresponds to https://github.com/ppwwyyxx/cocoapi
......
......@@ -3,8 +3,8 @@ line_length = 100
multi_line_output = 3
balanced_wrapping = True
known_standard_library = setuptools
known_third_party = tqdm,loguru
known_data_processing = cv2,numpy,scipy,PIL,matplotlib,scikit_image
known_third_party = tqdm,loguru,tabulate
known_data_processing = cv2,numpy,scipy,PIL,matplotlib
known_datasets = pycocotools
known_deeplearning = torch,torchvision,caffe2,onnx,apex,timm,thop,torch2trt,tensorrt,openvino,onnxruntime
known_myself = yolox
......
......@@ -114,14 +114,8 @@ class COCOEvaluator:
self.per_class_AR = per_class_AR
def evaluate(
self,
model,
distributed=False,
half=False,
trt_file=None,
decoder=None,
test_size=None,
return_outputs=False
self, model, distributed=False, half=False, trt_file=None,
decoder=None, test_size=None, return_outputs=False
):
"""
COCO average precision (AP) Evaluation. Iterate inference on the test dataset
......
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Code are based on
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# Copyright (c) Bharath Hariharan.
......@@ -13,7 +12,7 @@ import numpy as np
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
"""Parse a PASCAL VOC xml file"""
tree = ET.parse(filename)
objects = []
for obj in tree.findall("object"):
......@@ -35,7 +34,7 @@ def parse_rec(filename):
def voc_ap(rec, prec, use_07_metric=False):
"""ap = voc_ap(rec, prec, [use_07_metric])
"""
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
......@@ -92,9 +91,9 @@ def voc_eval(
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print("Reading annotation for {:d}/{:d}".format(i + 1, len(imagenames)))
print(f"Reading annotation for {i + 1}/{len(imagenames)}")
# save
print("Saving cached annotations to {:s}".format(cachefile))
print(f"Saving cached annotations to {cachefile}")
with open(cachefile, "wb") as f:
pickle.dump(recs, f)
else:
......@@ -155,8 +154,7 @@ def voc_eval(
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) - inters
)
overlaps = inters / uni
......
......@@ -21,14 +21,7 @@ class VOCEvaluator:
VOC AP Evaluation class.
"""
def __init__(
self,
dataloader,
img_size,
confthre,
nmsthre,
num_classes,
):
def __init__(self, dataloader, img_size, confthre, nmsthre, num_classes):
"""
Args:
dataloader (Dataloader): evaluate dataloader.
......@@ -46,13 +39,8 @@ class VOCEvaluator:
self.num_images = len(dataloader.dataset)
def evaluate(
self,
model,
distributed=False,
half=False,
trt_file=None,
decoder=None,
test_size=None,
self, model, distributed=False, half=False, trt_file=None,
decoder=None, test_size=None, return_outputs=False,
):
"""
VOC average precision (AP) Evaluation. Iterate inference on the test dataset
......@@ -91,9 +79,7 @@ class VOCEvaluator:
model(x)
model = model_trt
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
progress_bar(self.dataloader)
):
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(progress_bar(self.dataloader)):
with torch.no_grad():
imgs = imgs.type(tensor_type)
......@@ -127,13 +113,13 @@ class VOCEvaluator:
eval_results = self.evaluate_prediction(data_list, statistics)
synchronize()
if return_outputs:
return eval_results, data_list
return eval_results
def convert_to_voc_format(self, outputs, info_imgs, ids):
predictions = {}
for (output, img_h, img_w, img_id) in zip(
outputs, info_imgs[0], info_imgs[1], ids
):
for output, img_h, img_w, img_id in zip(outputs, info_imgs[0], info_imgs[1], ids):
if output is None:
predictions[int(img_id)] = (None, None, None)
continue
......@@ -142,9 +128,7 @@ class VOCEvaluator:
bboxes = output[:, 0:4]
# preprocessing: resize
scale = min(
self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)
)
scale = min(self.img_size[0] / float(img_h), self.img_size[1] / float(img_w))
bboxes /= scale
cls = output[:, 6]
......@@ -175,7 +159,6 @@ class VOCEvaluator:
)
]
)
info = time_info + "\n"
all_boxes = [
......@@ -196,13 +179,9 @@ class VOCEvaluator:
c_dets = torch.cat((bboxes, scores.unsqueeze(1)), dim=1)
all_boxes[j][img_num] = c_dets[mask_c].numpy()
sys.stdout.write(
"im_eval: {:d}/{:d} \r".format(img_num + 1, self.num_images)
)
sys.stdout.write(f"im_eval: {img_num + 1}/{self.num_images} \r")
sys.stdout.flush()
with tempfile.TemporaryDirectory() as tempdir:
mAP50, mAP70 = self.dataloader.dataset.evaluate_detections(
all_boxes, tempdir
)
mAP50, mAP70 = self.dataloader.dataset.evaluate_detections(all_boxes, tempdir)
return mAP50, mAP70, info
......@@ -8,7 +8,6 @@ from typing import Sequence
import torch
import torch.nn as nn
from thop import profile
__all__ = [
"fuse_conv_and_bn",
......@@ -21,6 +20,8 @@ __all__ = [
def get_model_info(model: nn.Module, tsize: Sequence[int]) -> str:
from thop import profile
stride = 64
img = torch.zeros((1, 3, stride, stride), device=next(model.parameters()).device)
flops, params = profile(deepcopy(model), inputs=(img,), verbose=False)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册