Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
5f9e8f01
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5f9e8f01
编写于
6月 20, 2022
作者:
Z
zhiboniu
提交者:
zhiboniu
6月 21, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
move vehicle plate to new path
上级
ac252a34
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
46 addition
and
179 deletion
+46
-179
deploy/pphuman/pipe_utils.py
deploy/pphuman/pipe_utils.py
+12
-0
deploy/pphuman/ppvechile/rec_word_dict.txt
deploy/pphuman/ppvechile/rec_word_dict.txt
+0
-0
deploy/pphuman/ppvechile/vechile_plate.py
deploy/pphuman/ppvechile/vechile_plate.py
+9
-92
deploy/pphuman/ppvechile/vechile_plateutils.py
deploy/pphuman/ppvechile/vechile_plateutils.py
+25
-79
deploy/pphuman/ppvechile/vecplatepostprocess.py
deploy/pphuman/ppvechile/vecplatepostprocess.py
+0
-0
deploy/python/utils.py
deploy/python/utils.py
+0
-8
未找到文件。
deploy/pphuman/pipe_utils.py
浏览文件 @
5f9e8f01
...
...
@@ -32,6 +32,18 @@ def argsparser():
default
=
None
,
help
=
(
"Path of configure"
),
required
=
True
)
parser
.
add_argument
(
"--det_algorithm"
,
type
=
str
,
default
=
'DB'
)
parser
.
add_argument
(
"--det_model_dir"
,
type
=
str
)
parser
.
add_argument
(
"--det_limit_side_len"
,
type
=
float
,
default
=
960
)
parser
.
add_argument
(
"--det_limit_type"
,
type
=
str
,
default
=
'max'
)
parser
.
add_argument
(
"--rec_algorithm"
,
type
=
str
,
default
=
'SVTR_LCNet'
)
parser
.
add_argument
(
"--rec_model_dir"
,
type
=
str
)
parser
.
add_argument
(
"--rec_image_shape"
,
type
=
str
,
default
=
"3, 48, 320"
)
parser
.
add_argument
(
"--rec_batch_num"
,
type
=
int
,
default
=
6
)
parser
.
add_argument
(
"--word_dict_path"
,
type
=
str
,
default
=
"deploy/pphuman/rec_word_dict.txt"
)
parser
.
add_argument
(
"--image_file"
,
type
=
str
,
default
=
None
,
help
=
"Path of image file."
)
parser
.
add_argument
(
...
...
deploy/pphuman/rec_word_dict.txt
→
deploy/pphuman/
ppvechile/
rec_word_dict.txt
浏览文件 @
5f9e8f01
文件已移动
deploy/p
ython
/vechile_plate.py
→
deploy/p
phuman/ppvechile
/vechile_plate.py
浏览文件 @
5f9e8f01
...
...
@@ -25,13 +25,15 @@ import paddle
import
sys
# add deploy path of PadleDetection to sys.path
parent_path
=
os
.
path
.
abspath
(
os
.
path
.
join
(
__file__
,
*
([
'..'
])))
# add deploy path of PadleDetection to sys.path
parent_path
=
os
.
path
.
abspath
(
os
.
path
.
join
(
__file__
,
*
([
'..'
]
*
3
)))
sys
.
path
.
insert
(
0
,
parent_path
)
from
infer
import
get_test_images
,
print_arguments
from
vechile_plateutils
import
create_predictor
,
get_infer_gpuid
,
argsparser
,
get_rotate_crop_image
,
draw_boxes
from
python.
infer
import
get_test_images
,
print_arguments
from
vechile_plateutils
import
create_predictor
,
get_infer_gpuid
,
get_rotate_crop_image
,
draw_boxes
from
vecplatepostprocess
import
build_post_process
from
preprocess
import
preprocess
,
NormalizeImage
,
Permute
,
Resize_Mult32
from
python.preprocess
import
preprocess
,
NormalizeImage
,
Permute
,
Resize_Mult32
from
vechile_plateutils
import
argsparser
class
PlateDetector
(
object
):
...
...
@@ -63,25 +65,6 @@ class PlateDetector(object):
self
.
predictor
,
self
.
input_tensor
,
self
.
output_tensors
,
self
.
config
=
create_predictor
(
args
,
'det'
)
if
args
.
run_benchmark
:
import
auto_log
pid
=
os
.
getpid
()
gpu_id
=
get_infer_gpuid
()
self
.
autolog
=
auto_log
.
AutoLogger
(
model_name
=
"det"
,
model_precision
=
"fp32"
,
batch_size
=
1
,
data_shape
=
"dynamic"
,
save_path
=
None
,
inference_config
=
self
.
config
,
pids
=
pid
,
process_name
=
None
,
gpu_ids
=
gpu_id
if
args
.
device
==
"GPU"
else
None
,
time_keys
=
[
'preprocess_time'
,
'inference_time'
,
'postprocess_time'
],
warmup
=
2
,
)
def
preprocess
(
self
,
image_list
):
preprocess_ops
=
[]
for
op_type
,
new_op_info
in
self
.
pre_process_list
.
items
():
...
...
@@ -139,24 +122,16 @@ class PlateDetector(object):
def
predict_image
(
self
,
img_list
):
st
=
time
.
time
()
if
self
.
args
.
run_benchmark
:
self
.
autolog
.
times
.
start
()
img
,
shape_list
=
self
.
preprocess
(
img_list
)
if
img
is
None
:
return
None
,
0
if
self
.
args
.
run_benchmark
:
self
.
autolog
.
times
.
stamp
()
self
.
input_tensor
.
copy_from_cpu
(
img
)
self
.
predictor
.
run
()
outputs
=
[]
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
outputs
.
append
(
output
)
if
self
.
args
.
run_benchmark
:
self
.
autolog
.
times
.
stamp
()
preds
=
{}
preds
[
'maps'
]
=
outputs
[
0
]
...
...
@@ -171,14 +146,12 @@ class PlateDetector(object):
dt_boxes
=
self
.
filter_tag_det_res
(
dt_boxes
,
org_shape
)
dt_batch_boxes
.
append
(
dt_boxes
)
if
self
.
args
.
run_benchmark
:
self
.
autolog
.
times
.
end
(
stamp
=
True
)
et
=
time
.
time
()
return
dt_batch_boxes
,
et
-
st
class
TextRecognizer
(
object
):
def
__init__
(
self
,
FLAGS
,
use_gpu
=
True
,
benchmark
=
False
):
def
__init__
(
self
,
FLAGS
,
use_gpu
=
True
):
self
.
rec_image_shape
=
[
int
(
v
)
for
v
in
FLAGS
.
rec_image_shape
.
split
(
","
)
]
...
...
@@ -219,26 +192,7 @@ class TextRecognizer(object):
self
.
postprocess_op
=
build_post_process
(
postprocess_params
)
self
.
predictor
,
self
.
input_tensor
,
self
.
output_tensors
,
self
.
config
=
\
create_predictor
(
FLAGS
,
'rec'
)
self
.
benchmark
=
benchmark
self
.
use_onnx
=
False
if
benchmark
:
import
auto_log
pid
=
os
.
getpid
()
gpu_id
=
get_infer_gpuid
()
self
.
autolog
=
auto_log
.
AutoLogger
(
model_name
=
"rec"
,
model_precision
=
'fp32'
,
batch_size
=
batch_size
,
data_shape
=
"dynamic"
,
save_path
=
None
,
#save_log_path,
inference_config
=
self
.
config
,
pids
=
pid
,
process_name
=
None
,
gpu_ids
=
gpu_id
if
use_gpu
else
None
,
time_keys
=
[
'preprocess_time'
,
'inference_time'
,
'postprocess_time'
],
warmup
=
0
)
def
resize_norm_img
(
self
,
img
,
max_wh_ratio
):
imgC
,
imgH
,
imgW
=
self
.
rec_image_shape
...
...
@@ -407,8 +361,6 @@ class TextRecognizer(object):
rec_res
=
[[
''
,
0.0
]]
*
img_num
batch_num
=
self
.
rec_batch_num
st
=
time
.
time
()
if
self
.
benchmark
:
self
.
autolog
.
times
.
start
()
for
beg_img_no
in
range
(
0
,
img_num
,
batch_num
):
end_img_no
=
min
(
img_num
,
beg_img_no
+
batch_num
)
norm_img_batch
=
[]
...
...
@@ -453,8 +405,6 @@ class TextRecognizer(object):
norm_img_batch
.
append
(
norm_img
)
norm_img_batch
=
np
.
concatenate
(
norm_img_batch
)
norm_img_batch
=
norm_img_batch
.
copy
()
if
self
.
benchmark
:
self
.
autolog
.
times
.
stamp
()
if
self
.
rec_algorithm
==
"SRN"
:
encoder_word_pos_list
=
np
.
concatenate
(
encoder_word_pos_list
)
...
...
@@ -488,8 +438,6 @@ class TextRecognizer(object):
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
outputs
.
append
(
output
)
if
self
.
benchmark
:
self
.
autolog
.
times
.
stamp
()
preds
=
{
"predict"
:
outputs
[
2
]}
elif
self
.
rec_algorithm
==
"SAR"
:
valid_ratios
=
np
.
concatenate
(
valid_ratios
)
...
...
@@ -514,8 +462,6 @@ class TextRecognizer(object):
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
outputs
.
append
(
output
)
if
self
.
benchmark
:
self
.
autolog
.
times
.
stamp
()
preds
=
outputs
[
0
]
else
:
if
self
.
use_onnx
:
...
...
@@ -531,8 +477,6 @@ class TextRecognizer(object):
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
outputs
.
append
(
output
)
if
self
.
benchmark
:
self
.
autolog
.
times
.
stamp
()
if
len
(
outputs
)
!=
1
:
preds
=
outputs
else
:
...
...
@@ -540,8 +484,6 @@ class TextRecognizer(object):
rec_result
=
self
.
postprocess_op
(
preds
)
for
rno
in
range
(
len
(
rec_result
)):
rec_res
[
indices
[
beg_img_no
+
rno
]]
=
rec_result
[
rno
]
if
self
.
benchmark
:
self
.
autolog
.
times
.
end
(
stamp
=
True
)
return
rec_res
,
time
.
time
()
-
st
...
...
@@ -549,8 +491,7 @@ class PlateRecognizer(object):
def
__init__
(
self
):
use_gpu
=
FLAGS
.
device
.
lower
()
==
"gpu"
self
.
platedetector
=
PlateDetector
(
FLAGS
)
self
.
textrecognizer
=
TextRecognizer
(
FLAGS
,
use_gpu
=
use_gpu
,
benchmark
=
FLAGS
.
run_benchmark
)
self
.
textrecognizer
=
TextRecognizer
(
FLAGS
,
use_gpu
=
use_gpu
)
def
get_platelicense
(
self
,
image_list
):
plate_text_list
=
[]
...
...
@@ -582,35 +523,11 @@ class PlateRecognizer(object):
def
main
():
detector
=
PlateRecognizer
()
# predict from image
if
FLAGS
.
image_dir
is
None
and
FLAGS
.
image_file
is
not
None
:
assert
FLAGS
.
batch_size
==
1
,
"batch_size should be 1, when image_file is not None"
img_list
=
get_test_images
(
FLAGS
.
image_dir
,
FLAGS
.
image_file
)
for
img
in
img_list
:
image
=
cv2
.
imread
(
img
)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results
=
detector
.
get_platelicense
([
image
])
if
FLAGS
.
run_benchmark
:
mems
=
{
'cpu_rss_mb'
:
detector
.
cpu_mem
/
len
(
img_list
),
'gpu_rss_mb'
:
detector
.
gpu_mem
/
len
(
img_list
),
'gpu_util'
:
detector
.
gpu_util
*
100
/
len
(
img_list
)
}
perf_info
=
detector
.
self
.
autolog
.
times
.
report
(
average
=
True
)
model_dir
=
FLAGS
.
model_dir
mode
=
FLAGS
.
run_mode
model_info
=
{
'model_name'
:
model_dir
.
strip
(
'/'
).
split
(
'/'
)[
-
1
],
'precision'
:
mode
.
split
(
'_'
)[
-
1
]
}
data_info
=
{
'batch_size'
:
FLAGS
.
batch_size
,
'shape'
:
"dynamic_shape"
,
'data_num'
:
perf_info
[
'img_num'
]
}
det_log
=
PaddleInferBenchmark
(
detector
.
config
,
model_info
,
data_info
,
perf_info
,
mems
)
det_log
(
'Attr'
)
if
__name__
==
'__main__'
:
...
...
@@ -621,6 +538,6 @@ if __name__ == '__main__':
FLAGS
.
device
=
FLAGS
.
device
.
upper
()
assert
FLAGS
.
device
in
[
'CPU'
,
'GPU'
,
'XPU'
],
"device should be CPU, GPU or XPU"
assert
not
FLAGS
.
use_gpu
,
"use_gpu has been deprecated, please use --device"
#
assert not FLAGS.use_gpu, "use_gpu has been deprecated, please use --device"
main
()
deploy/p
ython
/vechile_plateutils.py
→
deploy/p
phuman/ppvechile
/vechile_plateutils.py
浏览文件 @
5f9e8f01
...
...
@@ -26,12 +26,10 @@ import time
import
ast
def
str2bool
(
v
):
return
v
.
lower
()
in
(
"true"
,
"t"
,
"1"
)
def
argsparser
():
parser
=
argparse
.
ArgumentParser
(
description
=
__doc__
)
parser
.
add_argument
(
"--config"
,
type
=
str
,
default
=
None
,
help
=
(
"Path of configure"
))
parser
.
add_argument
(
"--det_algorithm"
,
type
=
str
,
default
=
'DB'
)
parser
.
add_argument
(
"--det_model_dir"
,
type
=
str
)
parser
.
add_argument
(
"--det_limit_side_len"
,
type
=
float
,
default
=
960
)
...
...
@@ -51,21 +49,24 @@ def argsparser():
type
=
str
,
default
=
None
,
help
=
"Dir of image file, `image_file` has a higher priority."
)
parser
.
add_argument
(
"--batch_size"
,
type
=
int
,
default
=
1
,
help
=
"batch_size for inference."
)
parser
.
add_argument
(
"--video_file"
,
type
=
str
,
default
=
None
,
help
=
"Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser
.
add_argument
(
"--video_dir"
,
type
=
str
,
default
=
None
,
help
=
"Dir of video file, `video_file` has a higher priority."
)
parser
.
add_argument
(
"--model_dir"
,
nargs
=
'*'
,
help
=
"set model dir in pipeline"
)
parser
.
add_argument
(
"--camera_id"
,
type
=
int
,
default
=-
1
,
help
=
"device id of camera to predict."
)
parser
.
add_argument
(
"--threshold"
,
type
=
float
,
default
=
0.5
,
help
=
"Threshold of score."
)
parser
.
add_argument
(
"--output_dir"
,
type
=
str
,
...
...
@@ -82,26 +83,11 @@ def argsparser():
default
=
'cpu'
,
help
=
"Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
)
parser
.
add_argument
(
"--use_gpu"
,
type
=
ast
.
literal_eval
,
default
=
False
,
help
=
"Deprecated, please use `--device`."
)
parser
.
add_argument
(
"--run_benchmark"
,
type
=
ast
.
literal_eval
,
default
=
False
,
help
=
"Whether to predict a image_file repeatedly for benchmark"
)
parser
.
add_argument
(
"--enable_mkldnn"
,
type
=
ast
.
literal_eval
,
default
=
False
,
help
=
"Whether use mkldnn with CPU."
)
parser
.
add_argument
(
"--enable_mkldnn_bfloat16"
,
type
=
ast
.
literal_eval
,
default
=
False
,
help
=
"Whether use mkldnn bfloat16 inference with CPU."
)
parser
.
add_argument
(
"--cpu_threads"
,
type
=
int
,
default
=
1
,
help
=
"Num of threads with CPU."
)
parser
.
add_argument
(
...
...
@@ -123,62 +109,20 @@ def argsparser():
help
=
"If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True."
)
parser
.
add_argument
(
'--save_images'
,
action
=
'store_true'
,
help
=
'Save visualization image results.'
)
parser
.
add_argument
(
'--save_mot_txts'
,
action
=
'store_true'
,
help
=
'Save tracking results (txt).'
)
parser
.
add_argument
(
'--save_mot_txt_per_img'
,
"--do_entrance_counting"
,
action
=
'store_true'
,
help
=
'Save tracking results (txt) for each image.'
)
help
=
"Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support one-class"
"counting, multi-class counting is coming soon."
)
parser
.
add_argument
(
'--scaled'
,
type
=
bool
,
default
=
False
,
help
=
"Whether coords after detector outputs are scaled, False in JDE YOLOv3 "
"True in general detector."
)
parser
.
add_argument
(
"--tracker_config"
,
type
=
str
,
default
=
None
,
help
=
(
"tracker donfig"
))
parser
.
add_argument
(
"--reid_model_dir"
,
type
=
str
,
default
=
None
,
help
=
(
"Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."
))
parser
.
add_argument
(
"--reid_batch_size"
,
type
=
int
,
default
=
50
,
help
=
"max batch_size for reid model inference."
)
parser
.
add_argument
(
'--use_dark'
,
type
=
ast
.
literal_eval
,
default
=
True
,
help
=
'whether to use darkpose to get better keypoint position predict '
)
parser
.
add_argument
(
"--action_file"
,
type
=
str
,
default
=
None
,
help
=
"Path of input file for action recognition."
)
parser
.
add_argument
(
"--window_size"
,
"--secs_interval"
,
type
=
int
,
default
=
50
,
help
=
"Temporal size of skeleton feature for action recognition."
)
parser
.
add_argument
(
"--random_pad"
,
type
=
ast
.
literal_eval
,
default
=
False
,
help
=
"Whether do random padding for action recognition."
)
default
=
2
,
help
=
"The seconds interval to count after tracking"
)
parser
.
add_argument
(
"--save_results"
,
type
=
bool
,
default
=
False
,
help
=
"Whether save detection result to file using coco format"
)
"--draw_center_traj"
,
action
=
'store_true'
,
help
=
"Whether drawing the trajectory of center"
)
return
parser
...
...
@@ -208,6 +152,8 @@ def create_predictor(args, mode):
config
=
inference
.
Config
(
model_file_path
,
params_file_path
)
batch_size
=
1
if
args
.
device
==
"GPU"
:
gpu_id
=
get_infer_gpuid
()
if
gpu_id
is
None
:
...
...
@@ -299,12 +245,12 @@ def create_predictor(args, mode):
elif
mode
==
"rec"
:
imgH
=
int
(
args
.
rec_image_shape
.
split
(
','
)[
-
2
])
min_input_shape
=
{
"x"
:
[
1
,
3
,
imgH
,
10
]}
max_input_shape
=
{
"x"
:
[
args
.
batch_size
,
3
,
imgH
,
2304
]}
opt_input_shape
=
{
"x"
:
[
args
.
batch_size
,
3
,
imgH
,
320
]}
max_input_shape
=
{
"x"
:
[
batch_size
,
3
,
imgH
,
2304
]}
opt_input_shape
=
{
"x"
:
[
batch_size
,
3
,
imgH
,
320
]}
elif
mode
==
"cls"
:
min_input_shape
=
{
"x"
:
[
1
,
3
,
48
,
10
]}
max_input_shape
=
{
"x"
:
[
args
.
batch_size
,
3
,
48
,
1024
]}
opt_input_shape
=
{
"x"
:
[
args
.
batch_size
,
3
,
48
,
320
]}
max_input_shape
=
{
"x"
:
[
batch_size
,
3
,
48
,
1024
]}
opt_input_shape
=
{
"x"
:
[
batch_size
,
3
,
48
,
320
]}
else
:
use_dynamic_shape
=
False
if
use_dynamic_shape
:
...
...
deploy/p
ython
/vecplatepostprocess.py
→
deploy/p
phuman/ppvechile
/vecplatepostprocess.py
浏览文件 @
5f9e8f01
文件已移动
deploy/python/utils.py
浏览文件 @
5f9e8f01
...
...
@@ -27,14 +27,6 @@ def argsparser():
help
=
(
"Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."
),
required
=
True
)
parser
.
add_argument
(
"--det_algorithm"
,
type
=
str
,
default
=
'DB'
)
parser
.
add_argument
(
"--det_model_dir"
,
type
=
str
)
parser
.
add_argument
(
"--det_limit_side_len"
,
type
=
float
,
default
=
960
)
parser
.
add_argument
(
"--det_limit_type"
,
type
=
str
,
default
=
'max'
)
parser
.
add_argument
(
"--rec_algorithm"
,
type
=
str
,
default
=
'SVTR_LCNet'
)
parser
.
add_argument
(
"--rec_model_dir"
,
type
=
str
)
parser
.
add_argument
(
"--rec_image_shape"
,
type
=
str
,
default
=
"3, 48, 320"
)
parser
.
add_argument
(
"--rec_batch_num"
,
type
=
int
,
default
=
6
)
parser
.
add_argument
(
"--image_file"
,
type
=
str
,
default
=
None
,
help
=
"Path of image file."
)
parser
.
add_argument
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录