Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
8deaf352
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8deaf352
编写于
3月 31, 2022
作者:
Y
YixinKristy
提交者:
GitHub
3月 31, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'PaddlePaddle:release/2.4' into release/2.4
上级
f932438d
3b35926d
变更
15
展开全部
隐藏空白更改
内联
并排
Showing
15 changed file
with
247 addition
and
224 deletion
+247
-224
configs/datasets/coco_detection.yml
configs/datasets/coco_detection.yml
+1
-0
configs/datasets/coco_instance.yml
configs/datasets/coco_instance.yml
+1
-0
configs/datasets/dota.yml
configs/datasets/dota.yml
+1
-0
configs/mot/bytetrack/_base_/mot17.yml
configs/mot/bytetrack/_base_/mot17.yml
+1
-0
configs/mot/deepsort/_base_/mot17.yml
configs/mot/deepsort/_base_/mot17.yml
+1
-0
configs/picodet/README.md
configs/picodet/README.md
+95
-91
configs/picodet/README_en.md
configs/picodet/README_en.md
+92
-95
deploy/pphuman/datacollector.py
deploy/pphuman/datacollector.py
+3
-1
deploy/pphuman/mtmct.py
deploy/pphuman/mtmct.py
+3
-4
deploy/pphuman/pipeline.py
deploy/pphuman/pipeline.py
+3
-1
deploy/python/infer.py
deploy/python/infer.py
+3
-3
ppdet/data/source/category.py
ppdet/data/source/category.py
+10
-1
ppdet/metrics/mcmot_metrics.py
ppdet/metrics/mcmot_metrics.py
+3
-3
ppdet/metrics/mot_metrics.py
ppdet/metrics/mot_metrics.py
+8
-7
ppdet/modeling/architectures/meta_arch.py
ppdet/modeling/architectures/meta_arch.py
+22
-18
未找到文件。
configs/datasets/coco_detection.yml
浏览文件 @
8deaf352
...
...
@@ -17,3 +17,4 @@ EvalDataset:
TestDataset
:
!ImageFolder
anno_path
:
annotations/instances_val2017.json
dataset_dir
:
dataset/coco
configs/datasets/coco_instance.yml
浏览文件 @
8deaf352
...
...
@@ -17,3 +17,4 @@ EvalDataset:
TestDataset
:
!ImageFolder
anno_path
:
annotations/instances_val2017.json
dataset_dir
:
dataset/coco
configs/datasets/dota.yml
浏览文件 @
8deaf352
...
...
@@ -17,3 +17,4 @@ EvalDataset:
TestDataset
:
!ImageFolder
anno_path
:
trainval_split/s2anet_trainval_paddle_coco.json
dataset_dir
:
dataset/DOTA_1024_s2anet/
configs/mot/bytetrack/_base_/mot17.yml
浏览文件 @
8deaf352
...
...
@@ -17,6 +17,7 @@ EvalDataset:
TestDataset
:
!ImageFolder
dataset_dir
:
dataset/mot/MOT17
anno_path
:
annotations/val_half.json
...
...
configs/mot/deepsort/_base_/mot17.yml
浏览文件 @
8deaf352
...
...
@@ -17,6 +17,7 @@ EvalDataset:
TestDataset
:
!ImageFolder
dataset_dir
:
dataset/mot/MOT17
anno_path
:
annotations/val_half.json
...
...
configs/picodet/README.md
浏览文件 @
8deaf352
此差异已折叠。
点击以展开。
configs/picodet/README_
c
n.md
→
configs/picodet/README_
e
n.md
浏览文件 @
8deaf352
此差异已折叠。
点击以展开。
deploy/pphuman/datacollector.py
浏览文件 @
8deaf352
...
...
@@ -35,6 +35,9 @@ class Result(object):
return
self
.
res_dict
[
name
]
return
None
def
clear
(
self
,
name
):
self
.
res_dict
[
name
].
clear
()
class
DataCollector
(
object
):
"""
...
...
@@ -80,7 +83,6 @@ class DataCollector(object):
ids
=
int
(
mot_item
[
0
])
if
ids
not
in
self
.
collector
:
self
.
collector
[
ids
]
=
copy
.
deepcopy
(
self
.
mots
)
self
.
collector
[
ids
][
"frames"
].
append
(
frameid
)
self
.
collector
[
ids
][
"rects"
].
append
([
mot_item
[
2
:]])
if
attr_res
:
...
...
deploy/pphuman/mtmct.py
浏览文件 @
8deaf352
...
...
@@ -297,10 +297,9 @@ def distill_idfeat(mot_res):
feature_new
=
feature_list
#if available frames number is more than 200, take one frame data per 20 frames
if
len
(
qualities_new
)
>
200
:
skipf
=
20
else
:
skipf
=
max
(
10
,
len
(
qualities_new
)
//
10
)
skipf
=
1
if
len
(
qualities_new
)
>
20
:
skipf
=
2
quality_skip
=
np
.
array
(
qualities_new
[::
skipf
])
feature_skip
=
np
.
array
(
feature_new
[::
skipf
])
...
...
deploy/pphuman/pipeline.py
浏览文件 @
8deaf352
...
...
@@ -587,7 +587,7 @@ class PipePredictor(object):
if
self
.
cfg
[
'visual'
]:
self
.
action_visual_helper
.
update
(
action_res
)
if
self
.
with_mtmct
:
if
self
.
with_mtmct
and
frame_id
%
10
==
0
:
crop_input
,
img_qualities
,
rects
=
self
.
reid_predictor
.
crop_image_with_mot
(
frame
,
mot_res
)
if
frame_id
>
self
.
warmup_frame
:
...
...
@@ -603,6 +603,8 @@ class PipePredictor(object):
"rects"
:
rects
}
self
.
pipeline_res
.
update
(
reid_res_dict
,
'reid'
)
else
:
self
.
pipeline_res
.
clear
(
'reid'
)
self
.
collector
.
append
(
frame_id
,
self
.
pipeline_res
)
...
...
deploy/python/infer.py
浏览文件 @
8deaf352
...
...
@@ -231,7 +231,7 @@ class Detector(object):
self
.
det_times
.
preprocess_time_s
.
end
()
# model prediction
result
=
self
.
predict
(
repeats
=
repeats
)
# warmup
result
=
self
.
predict
(
repeats
=
50
)
# warmup
self
.
det_times
.
inference_time_s
.
start
()
result
=
self
.
predict
(
repeats
=
repeats
)
self
.
det_times
.
inference_time_s
.
end
(
repeats
=
repeats
)
...
...
@@ -296,7 +296,7 @@ class Detector(object):
if
not
os
.
path
.
exists
(
self
.
output_dir
):
os
.
makedirs
(
self
.
output_dir
)
out_path
=
os
.
path
.
join
(
self
.
output_dir
,
video_out_name
)
fourcc
=
cv2
.
VideoWriter_fourcc
(
*
'mp4v'
)
fourcc
=
cv2
.
VideoWriter_fourcc
(
*
'mp4v'
)
writer
=
cv2
.
VideoWriter
(
out_path
,
fourcc
,
fps
,
(
width
,
height
))
index
=
1
while
(
1
):
...
...
@@ -790,7 +790,7 @@ def main():
if
FLAGS
.
image_dir
is
None
and
FLAGS
.
image_file
is
not
None
:
assert
FLAGS
.
batch_size
==
1
,
"batch_size should be 1, when image_file is not None"
img_list
=
get_test_images
(
FLAGS
.
image_dir
,
FLAGS
.
image_file
)
detector
.
predict_image
(
img_list
,
FLAGS
.
run_benchmark
,
repeats
=
10
)
detector
.
predict_image
(
img_list
,
FLAGS
.
run_benchmark
,
repeats
=
10
0
)
if
not
FLAGS
.
run_benchmark
:
detector
.
det_times
.
info
(
average
=
True
)
else
:
...
...
ppdet/data/source/category.py
浏览文件 @
8deaf352
...
...
@@ -39,6 +39,11 @@ def get_categories(metric_type, anno_file=None, arch=None):
if
arch
==
'keypoint_arch'
:
return
(
None
,
{
'id'
:
'keypoint'
})
if
anno_file
==
None
or
(
not
os
.
path
.
isfile
(
anno_file
)):
logger
.
warning
(
"anno_file '{}' is None or not set or not exist, "
"please recheck TrainDataset/EvalDataset/TestDataset.anno_path, "
"otherwise the default categories will be used by metric_type."
.
format
(
anno_file
))
if
metric_type
.
lower
()
==
'coco'
or
metric_type
.
lower
(
)
==
'rbox'
or
metric_type
.
lower
()
==
'snipercoco'
:
if
anno_file
and
os
.
path
.
isfile
(
anno_file
):
...
...
@@ -55,8 +60,9 @@ def get_categories(metric_type, anno_file=None, arch=None):
# anno file not exist, load default categories of COCO17
else
:
if
metric_type
.
lower
()
==
'rbox'
:
logger
.
warning
(
"metric_type: {}, load default categories of DOTA."
.
format
(
metric_type
))
return
_dota_category
()
logger
.
warning
(
"metric_type: {}, load default categories of COCO."
.
format
(
metric_type
))
return
_coco17_category
()
elif
metric_type
.
lower
()
==
'voc'
:
...
...
@@ -77,6 +83,7 @@ def get_categories(metric_type, anno_file=None, arch=None):
# anno file not exist, load default categories of
# VOC all 20 categories
else
:
logger
.
warning
(
"metric_type: {}, load default categories of VOC."
.
format
(
metric_type
))
return
_vocall_category
()
elif
metric_type
.
lower
()
==
'oid'
:
...
...
@@ -104,6 +111,7 @@ def get_categories(metric_type, anno_file=None, arch=None):
return
clsid2catid
,
catid2name
# anno file not exist, load default category 'pedestrian'.
else
:
logger
.
warning
(
"metric_type: {}, load default categories of pedestrian MOT."
.
format
(
metric_type
))
return
_mot_category
(
category
=
'pedestrian'
)
elif
metric_type
.
lower
()
in
[
'kitti'
,
'bdd100kmot'
]:
...
...
@@ -122,6 +130,7 @@ def get_categories(metric_type, anno_file=None, arch=None):
return
clsid2catid
,
catid2name
# anno file not exist, load default categories of visdrone all 10 categories
else
:
logger
.
warning
(
"metric_type: {}, load default categories of VisDrone."
.
format
(
metric_type
))
return
_visdrone_category
()
else
:
...
...
ppdet/metrics/mcmot_metrics.py
浏览文件 @
8deaf352
...
...
@@ -26,8 +26,6 @@ from motmetrics.math_util import quiet_divide
import
numpy
as
np
import
pandas
as
pd
import
paddle
import
paddle.nn.functional
as
F
from
.metrics
import
Metric
import
motmetrics
as
mm
import
openpyxl
...
...
@@ -311,7 +309,9 @@ class MCMOTEvaluator(object):
self
.
gt_filename
=
os
.
path
.
join
(
self
.
data_root
,
'../'
,
'sequences'
,
'{}.txt'
.
format
(
self
.
seq_name
))
if
not
os
.
path
.
exists
(
self
.
gt_filename
):
logger
.
warning
(
"gt_filename '{}' of MCMOTEvaluator is not exist, so the MOTA will be -inf."
)
def
reset_accumulator
(
self
):
import
motmetrics
as
mm
mm
.
lap
.
default_solver
=
'lap'
...
...
ppdet/metrics/mot_metrics.py
浏览文件 @
8deaf352
...
...
@@ -22,8 +22,7 @@ import sys
import
math
from
collections
import
defaultdict
import
numpy
as
np
import
paddle
import
paddle.nn.functional
as
F
from
ppdet.modeling.bbox_utils
import
bbox_iou_np_expand
from
.map_utils
import
ap_per_class
from
.metrics
import
Metric
...
...
@@ -36,8 +35,10 @@ __all__ = ['MOTEvaluator', 'MOTMetric', 'JDEDetMetric', 'KITTIMOTMetric']
def
read_mot_results
(
filename
,
is_gt
=
False
,
is_ignore
=
False
):
valid_labels
=
{
1
}
ignore_labels
=
{
2
,
7
,
8
,
12
}
# only in motchallenge datasets like 'MOT16'
valid_label
=
[
1
]
ignore_labels
=
[
2
,
7
,
8
,
12
]
# only in motchallenge datasets like 'MOT16'
logger
.
info
(
"In MOT16/17 dataset the valid_label of ground truth is '{}', "
"in other dataset it should be '0' for single classs MOT."
.
format
(
valid_label
[
0
]))
results_dict
=
dict
()
if
os
.
path
.
isfile
(
filename
):
with
open
(
filename
,
'r'
)
as
f
:
...
...
@@ -50,12 +51,10 @@ def read_mot_results(filename, is_gt=False, is_ignore=False):
continue
results_dict
.
setdefault
(
fid
,
list
())
box_size
=
float
(
linelist
[
4
])
*
float
(
linelist
[
5
])
if
is_gt
:
label
=
int
(
float
(
linelist
[
7
]))
mark
=
int
(
float
(
linelist
[
6
]))
if
mark
==
0
or
label
not
in
valid_label
s
:
if
mark
==
0
or
label
not
in
valid_label
:
continue
score
=
1
elif
is_ignore
:
...
...
@@ -118,6 +117,8 @@ class MOTEvaluator(object):
assert
self
.
data_type
==
'mot'
gt_filename
=
os
.
path
.
join
(
self
.
data_root
,
self
.
seq_name
,
'gt'
,
'gt.txt'
)
if
not
os
.
path
.
exists
(
gt_filename
):
logger
.
warning
(
"gt_filename '{}' of MOTEvaluator is not exist, so the MOTA will be -inf."
)
self
.
gt_frame_dict
=
read_mot_results
(
gt_filename
,
is_gt
=
True
)
self
.
gt_ignore_frame_dict
=
read_mot_results
(
gt_filename
,
is_ignore
=
True
)
...
...
ppdet/modeling/architectures/meta_arch.py
浏览文件 @
8deaf352
...
...
@@ -22,22 +22,23 @@ class BaseArch(nn.Layer):
self
.
fuse_norm
=
False
def
load_meanstd
(
self
,
cfg_transform
):
self
.
scale
=
1.
self
.
mean
=
paddle
.
to_tensor
([
0.485
,
0.456
,
0.406
]).
reshape
(
(
1
,
3
,
1
,
1
))
self
.
std
=
paddle
.
to_tensor
([
0.229
,
0.224
,
0.225
]).
reshape
((
1
,
3
,
1
,
1
))
scale
=
1.
mean
=
np
.
array
([
0.485
,
0.456
,
0.406
],
dtype
=
np
.
float32
)
std
=
np
.
array
([
0.229
,
0.224
,
0.225
],
dtype
=
np
.
float32
)
for
item
in
cfg_transform
:
if
'NormalizeImage'
in
item
:
self
.
mean
=
paddle
.
to_tensor
(
item
[
'NormalizeImage'
][
'mean'
]).
reshape
((
1
,
3
,
1
,
1
))
self
.
std
=
paddle
.
to_tensor
(
item
[
'NormalizeImage'
][
'std'
]).
reshape
((
1
,
3
,
1
,
1
))
mean
=
np
.
array
(
item
[
'NormalizeImage'
][
'mean'
],
dtype
=
np
.
float32
)
std
=
np
.
array
(
item
[
'NormalizeImage'
][
'std'
],
dtype
=
np
.
float32
)
if
item
[
'NormalizeImage'
].
get
(
'is_scale'
,
True
):
s
elf
.
s
cale
=
1.
/
255.
scale
=
1.
/
255.
break
if
self
.
data_format
==
'NHWC'
:
self
.
mean
=
self
.
mean
.
reshape
(
1
,
1
,
1
,
3
)
self
.
std
=
self
.
std
.
reshape
(
1
,
1
,
1
,
3
)
self
.
scale
=
paddle
.
to_tensor
(
scale
/
std
).
reshape
((
1
,
1
,
1
,
3
))
self
.
bias
=
paddle
.
to_tensor
(
-
mean
/
std
).
reshape
((
1
,
1
,
1
,
3
))
else
:
self
.
scale
=
paddle
.
to_tensor
(
scale
/
std
).
reshape
((
1
,
3
,
1
,
1
))
self
.
bias
=
paddle
.
to_tensor
(
-
mean
/
std
).
reshape
((
1
,
3
,
1
,
1
))
def
forward
(
self
,
inputs
):
if
self
.
data_format
==
'NHWC'
:
...
...
@@ -46,7 +47,7 @@ class BaseArch(nn.Layer):
if
self
.
fuse_norm
:
image
=
inputs
[
'image'
]
self
.
inputs
[
'image'
]
=
(
image
*
self
.
scale
-
self
.
mean
)
/
self
.
std
self
.
inputs
[
'image'
]
=
image
*
self
.
scale
+
self
.
bias
self
.
inputs
[
'im_shape'
]
=
inputs
[
'im_shape'
]
self
.
inputs
[
'scale_factor'
]
=
inputs
[
'scale_factor'
]
else
:
...
...
@@ -66,8 +67,7 @@ class BaseArch(nn.Layer):
outs
=
[]
for
inp
in
inputs_list
:
if
self
.
fuse_norm
:
self
.
inputs
[
'image'
]
=
(
inp
[
'image'
]
*
self
.
scale
-
self
.
mean
)
/
self
.
std
self
.
inputs
[
'image'
]
=
inp
[
'image'
]
*
self
.
scale
+
self
.
bias
self
.
inputs
[
'im_shape'
]
=
inp
[
'im_shape'
]
self
.
inputs
[
'scale_factor'
]
=
inp
[
'scale_factor'
]
else
:
...
...
@@ -75,7 +75,7 @@ class BaseArch(nn.Layer):
outs
.
append
(
self
.
get_pred
())
# multi-scale test
if
len
(
outs
)
>
1
:
if
len
(
outs
)
>
1
:
out
=
self
.
merge_multi_scale_predictions
(
outs
)
else
:
out
=
outs
[
0
]
...
...
@@ -92,7 +92,9 @@ class BaseArch(nn.Layer):
keep_top_k
=
self
.
bbox_post_process
.
nms
.
keep_top_k
nms_threshold
=
self
.
bbox_post_process
.
nms
.
nms_threshold
else
:
raise
Exception
(
"Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now"
)
raise
Exception
(
"Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now"
)
final_boxes
=
[]
all_scale_outs
=
paddle
.
concat
([
o
[
'bbox'
]
for
o
in
outs
]).
numpy
()
...
...
@@ -101,9 +103,11 @@ class BaseArch(nn.Layer):
if
np
.
count_nonzero
(
idxs
)
==
0
:
continue
r
=
nms
(
all_scale_outs
[
idxs
,
1
:],
nms_threshold
)
final_boxes
.
append
(
np
.
concatenate
([
np
.
full
((
r
.
shape
[
0
],
1
),
c
),
r
],
1
))
final_boxes
.
append
(
np
.
concatenate
([
np
.
full
((
r
.
shape
[
0
],
1
),
c
),
r
],
1
))
out
=
np
.
concatenate
(
final_boxes
)
out
=
np
.
concatenate
(
sorted
(
out
,
key
=
lambda
e
:
e
[
1
])[
-
keep_top_k
:]).
reshape
((
-
1
,
6
))
out
=
np
.
concatenate
(
sorted
(
out
,
key
=
lambda
e
:
e
[
1
])[
-
keep_top_k
:]).
reshape
((
-
1
,
6
))
out
=
{
'bbox'
:
paddle
.
to_tensor
(
out
),
'bbox_num'
:
paddle
.
to_tensor
(
np
.
array
([
out
.
shape
[
0
],
]))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录