Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
19124833
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
19124833
编写于
5月 20, 2021
作者:
Z
zhiboniu
提交者:
GitHub
5月 20, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add mot_pose_demo;sych with det benchmark codes (#3079)
上级
47101cfb
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
56 addition
and
45 deletion
+56
-45
README_cn.md
README_cn.md
+1
-0
configs/keypoint/README.md
configs/keypoint/README.md
+1
-1
deploy/python/infer.py
deploy/python/infer.py
+7
-7
deploy/python/keypoint_det_unite_infer.py
deploy/python/keypoint_det_unite_infer.py
+4
-7
deploy/python/keypoint_infer.py
deploy/python/keypoint_infer.py
+29
-15
deploy/python/keypoint_visualize.py
deploy/python/keypoint_visualize.py
+14
-15
docs/images/mot_pose_demo_640x360.gif
docs/images/mot_pose_demo_640x360.gif
+0
-0
未找到文件。
README_cn.md
浏览文件 @
19124833
...
...
@@ -18,6 +18,7 @@ PaddleDetection模块化地实现了多种主流目标检测算法,提供了
<div
align=
"center"
>
<img
src=
"static/docs/images/football.gif"
width=
'800'
/>
<img
src=
"docs/images/mot_pose_demo_640x360.gif"
width=
'800'
/>
</div>
### 产品动态
...
...
configs/keypoint/README.md
浏览文件 @
19124833
...
...
@@ -76,5 +76,5 @@ python deploy/python/keypoint_infer.py --model_dir=output_inference/higherhrnet_
python deploy/python/keypoint_infer.py
--model_dir
=
output_inference/hrnet_w32_384x288/
--image_file
=
./demo/hrnet_demo.jpg
--use_gpu
=
True
--threshold
=
0.5
#keypoint top-down模型 + detector 检测联合部署推理(联合推理只支持top-down方式)
python deploy/python/keypoint_det_unite_infer.py
--det_model_dir
=
output_inference/ppyolo_r50vd_dcn_2x_coco/
--keypoint_model_dir
=
output_inference/hrnet_w32_384x288/
--video_file
=
../video/xxx.mp4
python deploy/python/keypoint_det_unite_infer.py
--det_model_dir
=
output_inference/ppyolo_r50vd_dcn_2x_coco/
--keypoint_model_dir
=
output_inference/hrnet_w32_384x288/
--video_file
=
../video/xxx.mp4
--use_gpu
=
True
```
deploy/python/infer.py
浏览文件 @
19124833
...
...
@@ -191,7 +191,7 @@ class DetectorSOLOv2(Detector):
cpu_threads
=
1
,
enable_mkldnn
=
False
):
self
.
pred_config
=
pred_config
self
.
predictor
,
self
.
config
=
load_predictor
(
self
.
predictor
,
self
.
config
=
load_predictor
(
model_dir
,
run_mode
=
run_mode
,
min_subgraph_size
=
self
.
pred_config
.
min_subgraph_size
,
...
...
@@ -541,8 +541,8 @@ def main():
detector
.
det_times
.
info
(
average
=
True
)
else
:
mems
=
{
'cpu_rss'
:
detector
.
cpu_mem
/
len
(
img_list
),
'gpu_rss'
:
detector
.
gpu_mem
/
len
(
img_list
),
'cpu_rss
_mb
'
:
detector
.
cpu_mem
/
len
(
img_list
),
'gpu_rss
_mb
'
:
detector
.
gpu_mem
/
len
(
img_list
),
'gpu_util'
:
detector
.
gpu_util
*
100
/
len
(
img_list
)
}
...
...
@@ -550,16 +550,16 @@ def main():
model_dir
=
FLAGS
.
model_dir
mode
=
FLAGS
.
run_mode
model_info
=
{
'model_name'
:
model_dir
.
strip
(
'/'
).
split
(
'/'
)[
-
1
],
'precision'
:
mode
.
split
(
'_'
)[
-
1
]
'model_name'
:
model_dir
.
strip
(
'/'
).
split
(
'/'
)[
-
1
],
'precision'
:
mode
.
split
(
'_'
)[
-
1
]
}
data_info
=
{
'batch_size'
:
1
,
'shape'
:
"dynamic_shape"
,
'data_num'
:
perf_info
[
'img_num'
]
}
det_log
=
PaddleInferBenchmark
(
detector
.
config
,
model_info
,
data_info
,
perf_info
,
mems
)
det_log
=
PaddleInferBenchmark
(
detector
.
config
,
model_info
,
data_info
,
perf_info
,
mems
)
det_log
(
'Det'
)
...
...
deploy/python/keypoint_det_unite_infer.py
浏览文件 @
19124833
...
...
@@ -13,7 +13,6 @@
# limitations under the License.
import
os
from
PIL
import
Image
import
cv2
import
numpy
as
np
...
...
@@ -52,7 +51,7 @@ def get_person_from_rect(images, results):
org_rects
=
[]
for
rect
in
valid_rects
:
rect_image
,
new_rect
,
org_rect
=
expand_crop
(
images
,
rect
)
if
rect_image
is
None
:
if
rect_image
is
None
or
rect_image
.
size
==
0
:
continue
image_buff
.
append
([
rect_image
,
new_rect
])
org_rects
.
append
(
org_rect
)
...
...
@@ -113,13 +112,13 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):
os
.
makedirs
(
FLAGS
.
output_dir
)
out_path
=
os
.
path
.
join
(
FLAGS
.
output_dir
,
video_name
)
writer
=
cv2
.
VideoWriter
(
out_path
,
fourcc
,
fps
,
(
width
,
height
))
index
=
1
index
=
0
while
(
1
):
ret
,
frame
=
capture
.
read
()
if
not
ret
:
break
print
(
'detect frame:%d'
%
(
index
))
index
+=
1
print
(
'detect frame:%d'
%
(
index
))
frame2
=
cv2
.
cvtColor
(
frame
,
cv2
.
COLOR_BGR2RGB
)
results
=
detector
.
predict
(
frame2
,
FLAGS
.
det_threshold
)
...
...
@@ -136,7 +135,7 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):
keypoint_res
=
{}
keypoint_res
[
'keypoint'
]
=
[
np
.
vstack
(
keypoint_vector
),
np
.
vstack
(
score_vector
)
]
]
if
len
(
keypoint_vector
)
>
0
else
[[],
[]]
keypoint_res
[
'bbox'
]
=
rect_vecotr
im
=
draw_pose
(
frame
,
...
...
@@ -189,8 +188,6 @@ def main():
# predict from image
img_list
=
get_test_images
(
FLAGS
.
image_dir
,
FLAGS
.
image_file
)
topdown_unite_predict
(
detector
,
topdown_keypoint_detector
,
img_list
)
detector
.
det_times
.
info
(
average
=
True
)
topdown_keypoint_detector
.
det_times
.
info
(
average
=
True
)
if
__name__
==
'__main__'
:
...
...
deploy/python/keypoint_infer.py
浏览文件 @
19124833
...
...
@@ -28,7 +28,8 @@ from keypoint_postprocess import HrHRNetPostProcess, HRNetPostProcess
from
keypoint_visualize
import
draw_pose
from
paddle.inference
import
Config
from
paddle.inference
import
create_predictor
from
utils
import
argsparser
,
Timer
,
get_current_memory_mb
,
LoggerHelper
from
utils
import
argsparser
,
Timer
,
get_current_memory_mb
from
benchmark_utils
import
PaddleInferBenchmark
from
infer
import
get_test_images
,
print_arguments
# Global dictionary
...
...
@@ -66,7 +67,7 @@ class KeyPoint_Detector(object):
cpu_threads
=
1
,
enable_mkldnn
=
False
):
self
.
pred_config
=
pred_config
self
.
predictor
=
load_predictor
(
self
.
predictor
,
self
.
config
=
load_predictor
(
model_dir
,
run_mode
=
run_mode
,
min_subgraph_size
=
self
.
pred_config
.
min_subgraph_size
,
...
...
@@ -129,7 +130,7 @@ class KeyPoint_Detector(object):
MaskRCNN's results include 'masks': np.ndarray:
shape: [N, im_h, im_w]
'''
self
.
det_times
.
preprocess_time
.
start
()
self
.
det_times
.
preprocess_time
_s
.
start
()
inputs
=
self
.
preprocess
(
image
)
np_boxes
,
np_masks
=
None
,
None
input_names
=
self
.
predictor
.
get_input_names
()
...
...
@@ -137,7 +138,7 @@ class KeyPoint_Detector(object):
for
i
in
range
(
len
(
input_names
)):
input_tensor
=
self
.
predictor
.
get_input_handle
(
input_names
[
i
])
input_tensor
.
copy_from_cpu
(
inputs
[
input_names
[
i
]])
self
.
det_times
.
preprocess_time
.
end
()
self
.
det_times
.
preprocess_time
_s
.
end
()
for
i
in
range
(
warmup
):
self
.
predictor
.
run
()
output_names
=
self
.
predictor
.
get_output_names
()
...
...
@@ -152,7 +153,7 @@ class KeyPoint_Detector(object):
inds_k
.
copy_to_cpu
()
]
self
.
det_times
.
inference_time
.
start
()
self
.
det_times
.
inference_time
_s
.
start
()
for
i
in
range
(
repeats
):
self
.
predictor
.
run
()
output_names
=
self
.
predictor
.
get_output_names
()
...
...
@@ -166,12 +167,12 @@ class KeyPoint_Detector(object):
masks_tensor
.
copy_to_cpu
(),
heat_k
.
copy_to_cpu
(),
inds_k
.
copy_to_cpu
()
]
self
.
det_times
.
inference_time
.
end
(
repeats
=
repeats
)
self
.
det_times
.
inference_time
_s
.
end
(
repeats
=
repeats
)
self
.
det_times
.
postprocess_time
.
start
()
self
.
det_times
.
postprocess_time
_s
.
start
()
results
=
self
.
postprocess
(
np_boxes
,
np_masks
,
inputs
,
threshold
=
threshold
)
self
.
det_times
.
postprocess_time
.
end
()
self
.
det_times
.
postprocess_time
_s
.
end
()
self
.
det_times
.
img_num
+=
1
return
results
...
...
@@ -318,7 +319,7 @@ def load_predictor(model_dir,
# disable feed, fetch OP, needed by zero_copy_run
config
.
switch_use_feed_fetch_ops
(
False
)
predictor
=
create_predictor
(
config
)
return
predictor
return
predictor
,
config
def
predict_image
(
detector
,
image_list
):
...
...
@@ -347,7 +348,8 @@ def predict_video(detector, camera_id):
video_name
=
'output.mp4'
else
:
capture
=
cv2
.
VideoCapture
(
FLAGS
.
video_file
)
video_name
=
os
.
path
.
basename
(
os
.
path
.
split
(
FLAGS
.
video_file
)[
-
1
])
video_name
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
FLAGS
.
video_file
))[
0
]
+
'.mp4'
fps
=
30
width
=
int
(
capture
.
get
(
cv2
.
CAP_PROP_FRAME_WIDTH
))
height
=
int
(
capture
.
get
(
cv2
.
CAP_PROP_FRAME_HEIGHT
))
...
...
@@ -403,13 +405,25 @@ def main():
detector
.
det_times
.
info
(
average
=
True
)
else
:
mems
=
{
'cpu_rss'
:
detector
.
cpu_mem
/
len
(
img_list
),
'gpu_rss'
:
detector
.
gpu_mem
/
len
(
img_list
),
'cpu_rss
_mb
'
:
detector
.
cpu_mem
/
len
(
img_list
),
'gpu_rss
_mb
'
:
detector
.
gpu_mem
/
len
(
img_list
),
'gpu_util'
:
detector
.
gpu_util
*
100
/
len
(
img_list
)
}
det_logger
=
LoggerHelper
(
FLAGS
,
detector
.
det_times
.
report
(
average
=
True
),
mems
)
det_logger
.
report
()
perf_info
=
detector
.
det_times
.
report
(
average
=
True
)
model_dir
=
FLAGS
.
model_dir
mode
=
FLAGS
.
run_mode
model_info
=
{
'model_name'
:
model_dir
.
strip
(
'/'
).
split
(
'/'
)[
-
1
],
'precision'
:
mode
.
split
(
'_'
)[
-
1
]
}
data_info
=
{
'batch_size'
:
1
,
'shape'
:
"dynamic_shape"
,
'data_num'
:
perf_info
[
'img_num'
]
}
det_log
=
PaddleInferBenchmark
(
detector
.
config
,
model_info
,
data_info
,
perf_info
,
mems
)
det_log
(
'KeyPoint'
)
if
__name__
==
'__main__'
:
...
...
deploy/python/keypoint_visualize.py
浏览文件 @
19124833
...
...
@@ -19,11 +19,6 @@ import numpy as np
import
math
def
map_coco_to_personlab
(
keypoints
):
permute
=
[
0
,
6
,
8
,
10
,
5
,
7
,
9
,
12
,
14
,
16
,
11
,
13
,
15
,
2
,
1
,
4
,
3
]
return
keypoints
[:,
permute
,
:]
def
draw_pose
(
imgfile
,
results
,
visual_thread
=
0.6
,
...
...
@@ -39,9 +34,9 @@ def draw_pose(imgfile,
'for example: `pip install matplotlib`.'
)
raise
e
EDGES
=
[(
0
,
1
4
),
(
0
,
13
),
(
0
,
4
),
(
0
,
1
),
(
14
,
16
),
(
13
,
15
),
(
4
,
10
),
(
1
,
7
),
(
10
,
11
),
(
7
,
8
),
(
11
,
12
),
(
8
,
9
),
(
4
,
5
),
(
1
,
2
),
(
5
,
6
),
(
2
,
3
)]
EDGES
=
[(
0
,
1
),
(
0
,
2
),
(
1
,
3
),
(
2
,
4
),
(
3
,
5
),
(
4
,
6
),
(
5
,
7
),
(
6
,
8
),
(
7
,
9
),
(
8
,
10
),
(
5
,
11
),
(
6
,
12
),
(
11
,
13
),
(
12
,
14
),
(
13
,
15
),
(
14
,
16
),
(
11
,
12
)]
NUM_EDGES
=
len
(
EDGES
)
colors
=
[[
255
,
0
,
0
],
[
255
,
85
,
0
],
[
255
,
170
,
0
],
[
255
,
255
,
0
],
[
170
,
255
,
0
],
[
85
,
255
,
0
],
[
0
,
255
,
0
],
\
...
...
@@ -52,25 +47,28 @@ def draw_pose(imgfile,
img
=
cv2
.
imread
(
imgfile
)
if
type
(
imgfile
)
==
str
else
imgfile
skeletons
,
scores
=
results
[
'keypoint'
]
color_set
=
results
[
'colors'
]
if
'colors'
in
results
else
None
if
'bbox'
in
results
:
bboxs
=
results
[
'bbox'
]
for
idx
,
rect
in
enumerate
(
bboxs
):
for
j
,
rect
in
enumerate
(
bboxs
):
xmin
,
ymin
,
xmax
,
ymax
=
rect
cv2
.
rectangle
(
img
,
(
xmin
,
ymin
),
(
xmax
,
ymax
),
colors
[
0
],
1
)
color
=
colors
[
0
]
if
color_set
is
None
else
colors
[
color_set
[
j
]
%
len
(
colors
)]
cv2
.
rectangle
(
img
,
(
xmin
,
ymin
),
(
xmax
,
ymax
),
color
,
1
)
canvas
=
img
.
copy
()
for
i
in
range
(
17
):
rgba
=
np
.
array
(
cmap
(
1
-
i
/
17.
-
1.
/
34
))
rgba
[
0
:
3
]
*=
255
for
j
in
range
(
len
(
skeletons
)):
if
skeletons
[
j
][
i
,
2
]
<
visual_thread
:
continue
color
=
colors
[
i
]
if
color_set
is
None
else
colors
[
color_set
[
j
]
%
len
(
colors
)]
cv2
.
circle
(
canvas
,
tuple
(
skeletons
[
j
][
i
,
0
:
2
].
astype
(
'int32'
)),
2
,
color
s
[
i
]
,
color
,
thickness
=-
1
)
to_plot
=
cv2
.
addWeighted
(
img
,
0.3
,
canvas
,
0.7
,
0
)
...
...
@@ -78,7 +76,6 @@ def draw_pose(imgfile,
stickwidth
=
2
skeletons
=
map_coco_to_personlab
(
skeletons
)
for
i
in
range
(
NUM_EDGES
):
for
j
in
range
(
len
(
skeletons
)):
edge
=
EDGES
[
i
]
...
...
@@ -96,7 +93,9 @@ def draw_pose(imgfile,
polygon
=
cv2
.
ellipse2Poly
((
int
(
mY
),
int
(
mX
)),
(
int
(
length
/
2
),
stickwidth
),
int
(
angle
),
0
,
360
,
1
)
cv2
.
fillConvexPoly
(
cur_canvas
,
polygon
,
colors
[
i
])
color
=
colors
[
i
]
if
color_set
is
None
else
colors
[
color_set
[
j
]
%
len
(
colors
)]
cv2
.
fillConvexPoly
(
cur_canvas
,
polygon
,
color
)
canvas
=
cv2
.
addWeighted
(
canvas
,
0.4
,
cur_canvas
,
0.6
,
0
)
if
returnimg
:
return
canvas
...
...
docs/images/mot_pose_demo_640x360.gif
0 → 100644
浏览文件 @
19124833
因为 它太大了无法显示 image diff 。你可以改为
查看blob
。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录