提交 8e754247 编写于 作者: C chenkai

Merge branch 'hd/report_accuracy' into 'master'

Hd/report accuracy

See merge request open-mmlab/mmaction-lite!253
jf7RDuUTrsQ 300 325
JTlatknwOrY 301 233
8UxlDNur-Z0 300 262
y9r115bgfNk 300 320
ZnIDviwA8CE 300 244
c8ln_nWYMyM 300 333
9GFfKVeoGm0 300 98
F5Y_gGsg4x8 300 193
AuqIu3x_lhY 300 36
1Hi5GMotrjs 300 26
czhL0iDbNT8 300 46
DYpTE_n-Wvk 177 208
R-xmgefs-M4 300 101
KPP2qRzMdos 300 131
PmgfU9ocx5A 300 193
GI7nIyMEQi4 300 173
A8TIWMvJVDU 300 72
ustVqWMM56c 300 289
03dk7mneDU0 300 254
jqkyelS4GJk 300 279
a58tBGuDIg0 231 382
5l1ajLjqaPo 300 226
-5wLopwbGX0 300 132
NUG7kwJ-614 300 103
wHUvw_R2iv8 300 97
44Mak5_s6Fk 300 256
y5vsk8Mj-3w 300 77
TEj_A_BC-aU 300 393
fUdu6hpMt_c 299 40
C5Z1sRArUR0 300 254
-orecnYvpNw 300 284
Urmbp1ulIXI 300 319
bLgdi4w7OAk 299 36
cVv_XMw4W2U 300 27
dV8JmKwDUzM 300 312
yZ9hIqW4bRc 300 239
9ykbMdR9Jss 213 257
G8fEnqIOkiA 300 158
6P2eVJ-Qp1g 300 131
Y-acp_jXG1Q 302 315
xthWPdx21r8 301 62
LExCUx4STW0 300 9
p2UMwzWsY0U 300 248
c0UI7f3Plro 300 383
1MmjE51PeIE 300 93
OU5dJpNHATk 300 342
38Uv6dbQkWc 281 44
5ZNdkbmv274 300 59
DrSL3Uddj6s 300 283
aNJ1-bvRox8 175 384
b5U7A_crvE0 194 377
xeWO9Bl9aWA 300 86
Zy8Ta83mrXo 300 223
AXnDRH7o2DQ 300 146
fTPDXmcygjw 300 11
EhRxb8-cNzQ 164 325
iO8RYYQzNiE 299 191
XbCncZcXuTI 300 55
pSCunaRn45A 300 265
UqI--TBQRgg 300 165
yD42KW6cm-A 300 186
VseX7hoxhbM 300 61
1FEcfy-moBM 300 8
BUT8oefH9Nw 300 120
-49tMSUTnZg 300 227
cZKPTt_FcFs 300 85
fiKJm0eavfw 300 323
gJcVljRRxGE 302 87
de1rSoht9t4 300 253
UAIJnI7fQYo 300 284
c4eIDxmVmCw 300 95
3LGce3efz7M 300 332
EC8iyn_q-NM 300 92
eo15donXwmY 300 351
NsG31u7Pd2Q 300 87
ILkPWpZYlPE 300 137
n5ZHSJRZl1U 300 338
UoQE44FEqLQ 300 260
5I-4meP_5wY 300 185
udLMOf77S3U 300 209
a4Ye18Mnblk 262 172
QbDMgHWwt_s 236 395
S6iAYBBMnwk 300 267
DNMfmNV8Uug 300 131
AJdp07pp43c 300 293
tVuop87KbDY 300 103
o79s5eOAF-c 300 246
dMt_nuBNdeY 300 168
RJU9NV1R4Fw 300 128
Zhux7Vy-hHc 300 82
47Cj6jwQKjo 300 228
a7Mc-0lwAuE 300 129
taZtEzvkg3M 300 264
bVDZohQJhBI 240 129
sBJk5li0O5o 216 154
DQUNZmbQI_g 300 29
-zpKHNrNsn4 300 244
Dcz0r8q-sx0 300 249
hfRKTH9pOMA 165 116
8CdUbOHDtes 300 222
文件已添加
文件已添加
from .accuracy import (average_recall_at_avg_proposals, confusion_matrix,
mean_average_precision, mean_class_accuracy,
pairwise_temporal_iou, top_k_accuracy)
get_weighted_score, mean_average_precision,
mean_class_accuracy, pairwise_temporal_iou,
top_k_accuracy)
from .eval_hooks import DistEvalHook, EvalHook
__all__ = [
'DistEvalHook', 'EvalHook', 'top_k_accuracy', 'mean_class_accuracy',
'confusion_matrix', 'mean_average_precision',
'confusion_matrix', 'mean_average_precision', 'get_weighted_score',
'average_recall_at_avg_proposals', 'pairwise_temporal_iou'
]
......@@ -305,3 +305,30 @@ def average_recall_at_avg_proposals(ground_truth,
area_under_curve = np.trapz(avg_recall, proposals_per_video)
auc = 100. * float(area_under_curve) / proposals_per_video[-1]
return recall, avg_recall, proposals_per_video, auc
def get_weighted_score(score_list, coeff_list):
"""Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Return:
list[np.ndarray]: List of weighted scores.
"""
assert len(score_list) == len(coeff_list)
num_samples = len(score_list[0])
for i in range(1, len(score_list)):
assert len(score_list[i]) == num_samples
scores = np.array(score_list) # (num_coeff, num_samples, num_classes)
coeff = np.array(coeff_list) # (num_coeff, )
weighted_scores = list(np.dot(scores.T, coeff).T)
return weighted_scores
......@@ -19,6 +19,6 @@ line_length = 79
multi_line_output = 0
known_standard_library = pkg_resources,setuptools
known_first_party = mmaction
known_third_party = cv2,matplotlib,mmcv,numpy,pytest,seaborn,torch,torchvision
known_third_party = cv2,matplotlib,mmcv,numpy,pytest,scipy,seaborn,torch,torchvision
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
......@@ -5,8 +5,9 @@ import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (average_recall_at_avg_proposals, confusion_matrix,
mean_average_precision, mean_class_accuracy,
pairwise_temporal_iou, top_k_accuracy)
get_weighted_score, mean_average_precision,
mean_class_accuracy, pairwise_temporal_iou,
top_k_accuracy)
def gt_confusion_matrix(gt_labels, pred_labels):
......@@ -183,3 +184,26 @@ def test_average_recall_at_avg_proposals():
assert_array_almost_equal(
proposals_per_video, np.arange(1, 101, 1), decimal=10)
assert auc == 99.0
def test_get_weighted_score():
score_a = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
score_b = [
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]),
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])
]
weighted_score = get_weighted_score([score_a], [1])
assert np.all(np.isclose(np.array(score_a), np.array(weighted_score)))
coeff_a, coeff_b = 2., 1.
weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b])
ground_truth = [
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))
import argparse
from mmcv import load
from scipy.special import softmax
from mmaction.core.evaluation import (get_weighted_score, mean_class_accuracy,
top_k_accuracy)
def parse_args():
parser = argparse.ArgumentParser(description='Fusing multiple scores')
parser.add_argument(
'--scores',
nargs='+',
help='list of scores',
default=['demo/fuse/rgb.pkl', 'demo/fuse/flow.pkl'])
parser.add_argument(
'--coefficients',
nargs='+',
type=float,
help='coefficients of each score file',
default=[1.0, 1.0])
parser.add_argument(
'--datalist',
help='list of testing data',
default='demo/fuse/data_list.txt')
parser.add_argument('--apply-softmax', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert len(args.scores) == len(args.coefficients)
score_list = args.scores
score_list = [load(f) for f in score_list]
if args.apply_softmax:
def apply_softmax(scores):
return [softmax(score) for score in scores]
score_list = [apply_softmax(scores) for scores in score_list]
weighted_scores = get_weighted_score(score_list, args.coefficients)
data = open(args.datalist).readlines()
labels = [int(x.strip().split()[-1]) for x in data]
mean_class_acc = mean_class_accuracy(weighted_scores, labels)
top_1_acc, top_5_acc = top_k_accuracy(weighted_scores, labels, [1, 5])
print(f'Mean Class Accuracy: {mean_class_acc:.04f}')
print(f'Top 1 Accuracy: {top_1_acc:.04f}')
print(f'Top 5 Accuracy: {top_5_acc:.04f}')
if __name__ == '__main__':
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册