Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleOCR
提交
7588fc08
P
PaddleOCR
项目概览
s920243400
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7588fc08
编写于
6月 17, 2021
作者:
L
LDOUBLEV
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete benchmark utils
上级
0466fb0c
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
5 addition
and
55 deletion
+5
-55
tools/infer/predict_det.py
tools/infer/predict_det.py
+5
-55
未找到文件。
tools/infer/predict_det.py
浏览文件 @
7588fc08
...
...
@@ -31,7 +31,7 @@ from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from
ppocr.data
import
create_operators
,
transform
from
ppocr.postprocess
import
build_post_process
import
tools.infer.benchmark_utils
as
benchmark_utils
#
import tools.infer.benchmark_utils as benchmark_utils
logger
=
get_logger
()
...
...
@@ -100,8 +100,6 @@ class TextDetector(object):
self
.
predictor
,
self
.
input_tensor
,
self
.
output_tensors
,
self
.
config
=
utility
.
create_predictor
(
args
,
'det'
,
logger
)
self
.
det_times
=
utility
.
Timer
()
def
order_points_clockwise
(
self
,
pts
):
"""
reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py
...
...
@@ -158,8 +156,8 @@ class TextDetector(object):
def
__call__
(
self
,
img
):
ori_im
=
img
.
copy
()
data
=
{
'image'
:
img
}
self
.
det_times
.
total_time
.
start
()
s
elf
.
det_times
.
preprocess_time
.
start
()
s
t
=
time
.
time
()
data
=
transform
(
data
,
self
.
preprocess_op
)
img
,
shape_list
=
data
if
img
is
None
:
...
...
@@ -168,16 +166,12 @@ class TextDetector(object):
shape_list
=
np
.
expand_dims
(
shape_list
,
axis
=
0
)
img
=
img
.
copy
()
self
.
det_times
.
preprocess_time
.
end
()
self
.
det_times
.
inference_time
.
start
()
self
.
input_tensor
.
copy_from_cpu
(
img
)
self
.
predictor
.
run
()
outputs
=
[]
for
output_tensor
in
self
.
output_tensors
:
output
=
output_tensor
.
copy_to_cpu
()
outputs
.
append
(
output
)
self
.
det_times
.
inference_time
.
end
()
preds
=
{}
if
self
.
det_algorithm
==
"EAST"
:
...
...
@@ -193,8 +187,6 @@ class TextDetector(object):
else
:
raise
NotImplementedError
self
.
det_times
.
postprocess_time
.
start
()
self
.
predictor
.
try_shrink_memory
()
post_result
=
self
.
postprocess_op
(
preds
,
shape_list
)
dt_boxes
=
post_result
[
0
][
'points'
]
...
...
@@ -203,10 +195,8 @@ class TextDetector(object):
else
:
dt_boxes
=
self
.
filter_tag_det_res
(
dt_boxes
,
ori_im
.
shape
)
self
.
det_times
.
postprocess_time
.
end
()
self
.
det_times
.
total_time
.
end
()
self
.
det_times
.
img_num
+=
1
return
dt_boxes
,
self
.
det_times
.
total_time
.
value
()
et
=
time
.
time
()
return
dt_boxes
,
et
-
st
if
__name__
==
"__main__"
:
...
...
@@ -216,7 +206,6 @@ if __name__ == "__main__":
count
=
0
total_time
=
0
draw_img_save
=
"./inference_results"
cpu_mem
,
gpu_mem
,
gpu_util
=
0
,
0
,
0
# warmup 10 times
fake_img
=
np
.
random
.
uniform
(
-
1
,
1
,
[
640
,
640
,
3
]).
astype
(
np
.
float32
)
...
...
@@ -239,12 +228,6 @@ if __name__ == "__main__":
total_time
+=
elapse
count
+=
1
if
args
.
benchmark
:
cm
,
gm
,
gu
=
utility
.
get_current_memory_mb
(
0
)
cpu_mem
+=
cm
gpu_mem
+=
gm
gpu_util
+=
gu
logger
.
info
(
"Predict time of {}: {}"
.
format
(
image_file
,
elapse
))
src_im
=
utility
.
draw_text_det_res
(
dt_boxes
,
image_file
)
img_name_pure
=
os
.
path
.
split
(
image_file
)[
-
1
]
...
...
@@ -252,36 +235,3 @@ if __name__ == "__main__":
"det_res_{}"
.
format
(
img_name_pure
))
logger
.
info
(
"The visualized image saved in {}"
.
format
(
img_path
))
# print the information about memory and time-spent
if
args
.
benchmark
:
mems
=
{
'cpu_rss_mb'
:
cpu_mem
/
count
,
'gpu_rss_mb'
:
gpu_mem
/
count
,
'gpu_util'
:
gpu_util
*
100
/
count
}
else
:
mems
=
None
logger
.
info
(
"The predict time about detection module is as follows: "
)
det_time_dict
=
text_detector
.
det_times
.
report
(
average
=
True
)
det_model_name
=
args
.
det_model_dir
if
args
.
benchmark
:
# construct log information
model_info
=
{
'model_name'
:
args
.
det_model_dir
.
split
(
'/'
)[
-
1
],
'precision'
:
args
.
precision
}
data_info
=
{
'batch_size'
:
1
,
'shape'
:
'dynamic_shape'
,
'data_num'
:
det_time_dict
[
'img_num'
]
}
perf_info
=
{
'preprocess_time_s'
:
det_time_dict
[
'preprocess_time'
],
'inference_time_s'
:
det_time_dict
[
'inference_time'
],
'postprocess_time_s'
:
det_time_dict
[
'postprocess_time'
],
'total_time_s'
:
det_time_dict
[
'total_time'
]
}
benchmark_log
=
benchmark_utils
.
PaddleInferBenchmark
(
text_detector
.
config
,
model_info
,
data_info
,
perf_info
,
mems
)
benchmark_log
(
"Det"
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录