提交 20230612 编写于 作者: Y Yuantao Feng 提交者: GitHub

Decoupling metrics from benchmark to allow different kinds of forward process (#14)

* create metrics for different types of behavior

* workable impl calling utils.METRICS in benchmark
上级 661ca25c
......@@ -2,61 +2,30 @@ import os
import argparse
import yaml
import tqdm
import numpy as np
import cv2 as cv
# from ..models import MODELS
from models import MODELS
from utils import METRICS
parser = argparse.ArgumentParser("Benchmarks for OpenCV Zoo.")
parser.add_argument('--cfg', '-c', type=str,
help='Benchmarking on the given config.')
args = parser.parse_args()
class Timer:
def __init__(self, warmup=0, reduction='median'):
self._warmup = warmup
self._reduction = reduction
self._tm = cv.TickMeter()
self._time_record = []
self._calls = 0
def start(self):
self._tm.start()
def stop(self):
self._tm.stop()
self._calls += 1
self._time_record.append(self._tm.getTimeMilli())
self._tm.reset()
def reset(self):
self._time_record = []
self._calls = 0
def getResult(self):
if self._reduction == 'median':
return self._getMedian(self._time_record[self._warmup:])
elif self._reduction == 'gmean':
return self._getGMean(self._time_record[self._warmup:])
else:
raise NotImplementedError()
def _getMedian(self, records):
''' Return median time
'''
l = len(records)
mid = int(l / 2)
if l % 2 == 0:
return (records[mid] + records[mid - 1]) / 2
else:
return records[mid]
def build_from_cfg(cfg, registery, key='name'):
obj_name = cfg.pop(key)
obj = registery.get(obj_name)
return obj(**cfg)
def _getGMean(self, records, drop_largest=3):
''' Return geometric mean of time
'''
time_record_sorted = sorted(records, reverse=True)
return sum(records[drop_largest:]) / (self._calls - drop_largest)
def prepend_pythonpath(cfg):
for k, v in cfg.items():
if isinstance(v, dict):
prepend_pythonpath(v)
else:
if 'path' in k.lower():
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
class Data:
def __init__(self, **kwargs):
......@@ -105,56 +74,6 @@ class Data:
else:
return self._files[idx], image
class Metric:
def __init__(self, **kwargs):
self._sizes = kwargs.pop('sizes', None)
self._warmup = kwargs.pop('warmup', 3)
self._repeat = kwargs.pop('repeat', 10)
assert self._warmup < self._repeat, 'The value of warmup must be smaller than the value of repeat.'
self._batch_size = kwargs.pop('batchSize', 1)
self._reduction = kwargs.pop('reduction', 'median')
self._timer = Timer(self._warmup, self._reduction)
def getReduction(self):
return self._reduction
def forward(self, model, *args, **kwargs):
img = args[0]
h, w, _ = img.shape
if not self._sizes:
self._sizes = [[w, h]]
results = dict()
self._timer.reset()
if len(args) == 1:
for size in self._sizes:
img_r = cv.resize(img, size)
try:
model.setInputSize(size)
except:
pass
# TODO: batched inference
# input_data = [img] * self._batch_size
input_data = img_r
for _ in range(self._repeat+self._warmup):
self._timer.start()
model.infer(input_data)
self._timer.stop()
results[str(size)] = self._timer.getResult()
else:
# TODO: batched inference
# input_data = [args] * self._batch_size
bboxes = args[1]
for idx, bbox in enumerate(bboxes):
for _ in range(self._repeat+self._warmup):
self._timer.start()
model.infer(img, bbox)
self._timer.stop()
results['bbox{}'.format(idx)] = self._timer.getResult()
return results
class Benchmark:
def __init__(self, **kwargs):
self._data_dict = kwargs.pop('data', None)
......@@ -162,7 +81,8 @@ class Benchmark:
self._data = Data(**self._data_dict)
self._metric_dict = kwargs.pop('metric', None)
self._metric = Metric(**self._metric_dict)
# self._metric = Metric(**self._metric_dict)
self._metric = build_from_cfg(self._metric_dict, registery=METRICS, key='type')
backend_id = kwargs.pop('backend', 'default')
available_backends = dict(
......@@ -206,20 +126,6 @@ class Benchmark:
total_latency += latency
print(' {}, latency ({}): {:.4f} ms'.format(key, self._metric.getReduction(), latency))
def build_from_cfg(cfg, registery):
obj_name = cfg.pop('name')
obj = registery.get(obj_name)
return obj(**cfg)
def prepend_pythonpath(cfg):
for k, v in cfg.items():
if isinstance(v, dict):
prepend_pythonpath(v)
else:
if 'path' in k.lower():
cfg[k] = os.path.join(os.environ['PYTHONPATH'], v)
if __name__ == '__main__':
assert args.cfg.endswith('yaml'), 'Currently support configs of yaml format only.'
with open(args.cfg, 'r') as f:
......
......@@ -4,13 +4,13 @@ Benchmark:
path: "benchmark/data/face/detection"
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
metric:
type: "Detection"
sizes: # [[w1, h1], ...], Omit to run at original scale
- [160, 120]
- [640, 480]
warmup: 3
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"
......
......@@ -5,10 +5,10 @@ Benchmark:
files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"]
useLabel: True
metric: # 'sizes' is omitted since this model requires input of fixed size
warmup: 3
type: "Recognition"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"
......
......@@ -6,10 +6,10 @@ Benchmark:
toRGB: True
resize: [192, 192]
metric:
warmup: 3
type: "Base"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"
......
......@@ -7,10 +7,10 @@ Benchmark:
resize: [256, 256]
centerCrop: 224
metric:
warmup: 3
type: "Base"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"
......
......@@ -4,10 +4,11 @@ Benchmark:
path: "benchmark/data/qrcode"
files: ["opencv.png", "opencv_zoo.png"]
metric:
type: "Detection"
sizes:
- [100, 100]
- [300, 300]
warmup: 3
warmup: 30
repeat: 10
reduction: "median"
backend: "default"
......@@ -15,7 +16,7 @@ Benchmark:
Model:
name: "WeChatQRCode"
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021sep.prototxt"
detect_model_path: "models/qrcode_wechatqrcode/detect_2021sep.caffemodel"
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021sep.prototxt"
sr_model_path: "models/qrcode_wechatqrcode/sr_2021sep.caffemodel"
\ No newline at end of file
detect_prototxt_path: "models/qrcode_wechatqrcode/detect_2021nov.prototxt"
detect_model_path: "models/qrcode_wechatqrcode/detect_2021nov.caffemodel"
sr_prototxt_path: "models/qrcode_wechatqrcode/sr_2021nov.prototxt"
sr_model_path: "models/qrcode_wechatqrcode/sr_2021nov.caffemodel"
\ No newline at end of file
......@@ -4,12 +4,12 @@ Benchmark:
path: "benchmark/data/text"
files: ["1.jpg", "2.jpg", "3.jpg"]
metric:
type: "Detection"
sizes: # [[w1, h1], ...], Omit to run at original scale
- [640, 480]
warmup: 3
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"
......
......@@ -5,10 +5,10 @@ Benchmark:
files: ["1.jpg", "2.jpg", "3.jpg"]
useLabel: True
metric: # 'sizes' is omitted since this model requires input of fixed size
warmup: 3
type: "Recognition"
warmup: 30
repeat: 10
batchSize: 1
reduction: 'median'
reduction: "median"
backend: "default"
target: "cpu"
......
numpy==1.21.2
numpy
opencv-python==4.5.4.58
tqdm
pyyaml
requests
\ No newline at end of file
from .factory import (METRICS, DATALOADERS)
from .metrics import *
__all__ = ['METRICS', 'DATALOADERS']
\ No newline at end of file
class Registery:
def __init__(self, name):
self._name = name
self._dict = dict()
def get(self, key):
return self._dict[key]
def register(self, item):
self._dict[item.__name__] = item
METRICS = Registery('Metrics')
DATALOADERS = Registery('DataLoaders')
\ No newline at end of file
from .base import Base
from .detection import Detection
from .recognition import Recognition
__all__ = ['Base', 'Detection', 'Recognition']
\ No newline at end of file
import cv2 as cv
from .base_metric import BaseMetric
from ..factory import METRICS
@METRICS.register
class Base(BaseMetric):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, model, *args, **kwargs):
img = args[0]
if not self._sizes:
h, w, _ = img.shape
self._sizes.append([w, h])
results = dict()
self._timer.reset()
for size in self._sizes:
input_data = cv.resize(img, size)
for _ in range(self._warmup):
model.infer(input_data)
for _ in range(self._repeat):
self._timer.start()
model.infer(input_data)
self._timer.stop()
results[str(size)] = self._getResult()
return results
\ No newline at end of file
import cv2 as cv
from ..timer import Timer
class BaseMetric:
def __init__(self, **kwargs):
self._sizes = kwargs.pop('sizes', None)
if self._sizes is None:
self._sizes = []
self._warmup = kwargs.pop('warmup', 3)
self._repeat = kwargs.pop('repeat', 10)
self._reduction = kwargs.pop('reduction', 'median')
self._timer = Timer()
def _calcMedian(self, records):
''' Return the median of records
'''
l = len(records)
mid = int(l / 2)
if l % 2 == 0:
return (records[mid] + records[mid - 1]) / 2
else:
return records[mid]
def _calcGMean(self, records, drop_largest=3):
''' Return the geometric mean of records after drop the first drop_largest
'''
l = len(records)
if l <= drop_largest:
print('len(records)({}) <= drop_largest({}), stop dropping.'.format(l, drop_largest))
records_sorted = sorted(records, reverse=True)
return sum(records_sorted[drop_largest:]) / (l - drop_largest)
def _getResult(self):
records = self._timer.getRecords()
if self._reduction == 'median':
return self._calcMedian(records)
elif self._reduction == 'gmean':
return self._calcGMean(records)
else:
raise NotImplementedError('Reduction {} is not supported'.format(self._reduction))
def getReduction(self):
return self._reduction
def forward(self, model, *args, **kwargs):
raise NotImplementedError('Not implemented')
\ No newline at end of file
import cv2 as cv
from .base_metric import BaseMetric
from ..factory import METRICS
@METRICS.register
class Detection(BaseMetric):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, model, *args, **kwargs):
img = args[0]
if not self._sizes:
h, w, _ = img.shape
self._sizes.append([w, h])
results = dict()
self._timer.reset()
for size in self._sizes:
input_data = cv.resize(img, size)
try:
model.setInputSize(size)
except:
pass
for _ in range(self._warmup):
model.infer(input_data)
for _ in range(self._repeat):
self._timer.start()
model.infer(input_data)
self._timer.stop()
results[str(size)] = self._getResult()
return results
\ No newline at end of file
import cv2 as cv
from .base_metric import BaseMetric
from ..factory import METRICS
@METRICS.register
class Recognition(BaseMetric):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, model, *args, **kwargs):
img, bboxes = args
if not self._sizes:
h, w, _ = img.shape
self._sizes.append([w, h])
results = dict()
self._timer.reset()
for idx, bbox in enumerate(bboxes):
for _ in range(self._warmup):
model.infer(img, bbox)
for _ in range(self._repeat):
self._timer.start()
model.infer(img, bbox)
self._timer.stop()
results['bbox{}'.format(idx)] = self._getResult()
return results
\ No newline at end of file
import cv2 as cv
class Timer:
def __init__(self):
self._tm = cv.TickMeter()
self._record = []
def start(self):
self._tm.start()
def stop(self):
self._tm.stop()
self._record.append(self._tm.getTimeMilli())
self._tm.reset()
def reset(self):
self._record = []
def getRecords(self):
return self._record
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册