Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
316ca2f8
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
316ca2f8
编写于
3月 11, 2022
作者:
Z
zhoujun
提交者:
GitHub
3月 11, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5693 from WenmuZhou/fix_cpp_lite_android
[Cpp Infer] fix bug in mem copy
上级
0ed88f83
6fcc2e71
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
111 addition
and
110 deletion
+111
-110
deploy/cpp_infer/src/ocr_rec.cpp
deploy/cpp_infer/src/ocr_rec.cpp
+111
-110
未找到文件。
deploy/cpp_infer/src/ocr_rec.cpp
浏览文件 @
316ca2f8
...
@@ -15,108 +15,115 @@
...
@@ -15,108 +15,115 @@
#include <include/ocr_rec.h>
#include <include/ocr_rec.h>
namespace
PaddleOCR
{
namespace
PaddleOCR
{
void
CRNNRecognizer
::
Run
(
std
::
vector
<
cv
::
Mat
>
img_list
,
std
::
vector
<
double
>
*
times
)
{
void
CRNNRecognizer
::
Run
(
std
::
vector
<
cv
::
Mat
>
img_list
,
std
::
chrono
::
duration
<
float
>
preprocess_diff
=
std
::
chrono
::
steady_clock
::
now
()
-
std
::
chrono
::
steady_clock
::
now
();
std
::
vector
<
double
>
*
times
)
{
std
::
chrono
::
duration
<
float
>
inference_diff
=
std
::
chrono
::
steady_clock
::
now
()
-
std
::
chrono
::
steady_clock
::
now
();
std
::
chrono
::
duration
<
float
>
preprocess_diff
=
std
::
chrono
::
duration
<
float
>
postprocess_diff
=
std
::
chrono
::
steady_clock
::
now
()
-
std
::
chrono
::
steady_clock
::
now
();
std
::
chrono
::
steady_clock
::
now
()
-
std
::
chrono
::
steady_clock
::
now
();
std
::
chrono
::
duration
<
float
>
inference_diff
=
int
img_num
=
img_list
.
size
();
std
::
chrono
::
steady_clock
::
now
()
-
std
::
chrono
::
steady_clock
::
now
();
std
::
vector
<
float
>
width_list
;
std
::
chrono
::
duration
<
float
>
postprocess_diff
=
for
(
int
i
=
0
;
i
<
img_num
;
i
++
)
{
std
::
chrono
::
steady_clock
::
now
()
-
std
::
chrono
::
steady_clock
::
now
();
width_list
.
push_back
(
float
(
img_list
[
i
].
cols
)
/
img_list
[
i
].
rows
);
int
img_num
=
img_list
.
size
();
std
::
vector
<
float
>
width_list
;
for
(
int
i
=
0
;
i
<
img_num
;
i
++
)
{
width_list
.
push_back
(
float
(
img_list
[
i
].
cols
)
/
img_list
[
i
].
rows
);
}
std
::
vector
<
int
>
indices
=
Utility
::
argsort
(
width_list
);
for
(
int
beg_img_no
=
0
;
beg_img_no
<
img_num
;
beg_img_no
+=
this
->
rec_batch_num_
)
{
auto
preprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
int
end_img_no
=
min
(
img_num
,
beg_img_no
+
this
->
rec_batch_num_
);
float
max_wh_ratio
=
0
;
for
(
int
ino
=
beg_img_no
;
ino
<
end_img_no
;
ino
++
)
{
int
h
=
img_list
[
indices
[
ino
]].
rows
;
int
w
=
img_list
[
indices
[
ino
]].
cols
;
float
wh_ratio
=
w
*
1.0
/
h
;
max_wh_ratio
=
max
(
max_wh_ratio
,
wh_ratio
);
}
}
std
::
vector
<
int
>
indices
=
Utility
::
argsort
(
width_list
);
int
batch_width
=
0
;
std
::
vector
<
cv
::
Mat
>
norm_img_batch
;
for
(
int
beg_img_no
=
0
;
beg_img_no
<
img_num
;
beg_img_no
+=
this
->
rec_batch_num_
)
{
for
(
int
ino
=
beg_img_no
;
ino
<
end_img_no
;
ino
++
)
{
auto
preprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
cv
::
Mat
srcimg
;
int
end_img_no
=
min
(
img_num
,
beg_img_no
+
this
->
rec_batch_num_
);
img_list
[
indices
[
ino
]].
copyTo
(
srcimg
);
float
max_wh_ratio
=
0
;
cv
::
Mat
resize_img
;
for
(
int
ino
=
beg_img_no
;
ino
<
end_img_no
;
ino
++
)
{
this
->
resize_op_
.
Run
(
srcimg
,
resize_img
,
max_wh_ratio
,
int
h
=
img_list
[
indices
[
ino
]].
rows
;
this
->
use_tensorrt_
);
int
w
=
img_list
[
indices
[
ino
]].
cols
;
this
->
normalize_op_
.
Run
(
&
resize_img
,
this
->
mean_
,
this
->
scale_
,
float
wh_ratio
=
w
*
1.0
/
h
;
this
->
is_scale_
);
max_wh_ratio
=
max
(
max_wh_ratio
,
wh_ratio
);
norm_img_batch
.
push_back
(
resize_img
);
}
batch_width
=
max
(
resize_img
.
cols
,
batch_width
);
std
::
vector
<
cv
::
Mat
>
norm_img_batch
;
}
for
(
int
ino
=
beg_img_no
;
ino
<
end_img_no
;
ino
++
)
{
cv
::
Mat
srcimg
;
std
::
vector
<
float
>
input
(
this
->
rec_batch_num_
*
3
*
32
*
batch_width
,
0.0
f
);
img_list
[
indices
[
ino
]].
copyTo
(
srcimg
);
this
->
permute_op_
.
Run
(
norm_img_batch
,
input
.
data
());
cv
::
Mat
resize_img
;
auto
preprocess_end
=
std
::
chrono
::
steady_clock
::
now
();
this
->
resize_op_
.
Run
(
srcimg
,
resize_img
,
max_wh_ratio
,
this
->
use_tensorrt_
);
preprocess_diff
+=
preprocess_end
-
preprocess_start
;
this
->
normalize_op_
.
Run
(
&
resize_img
,
this
->
mean_
,
this
->
scale_
,
this
->
is_scale_
);
norm_img_batch
.
push_back
(
resize_img
);
// Inference.
}
auto
input_names
=
this
->
predictor_
->
GetInputNames
();
auto
input_t
=
this
->
predictor_
->
GetInputHandle
(
input_names
[
0
]);
int
batch_width
=
int
(
ceilf
(
32
*
max_wh_ratio
))
-
1
;
input_t
->
Reshape
({
this
->
rec_batch_num_
,
3
,
32
,
batch_width
});
std
::
vector
<
float
>
input
(
this
->
rec_batch_num_
*
3
*
32
*
batch_width
,
0.0
f
);
auto
inference_start
=
std
::
chrono
::
steady_clock
::
now
();
this
->
permute_op_
.
Run
(
norm_img_batch
,
input
.
data
());
input_t
->
CopyFromCpu
(
input
.
data
());
auto
preprocess_end
=
std
::
chrono
::
steady_clock
::
now
();
this
->
predictor_
->
Run
();
preprocess_diff
+=
preprocess_end
-
preprocess_start
;
std
::
vector
<
float
>
predict_batch
;
// Inference.
auto
output_names
=
this
->
predictor_
->
GetOutputNames
();
auto
input_names
=
this
->
predictor_
->
GetInputNames
();
auto
output_t
=
this
->
predictor_
->
GetOutputHandle
(
output_names
[
0
]);
auto
input_t
=
this
->
predictor_
->
GetInputHandle
(
input_names
[
0
]);
auto
predict_shape
=
output_t
->
shape
();
input_t
->
Reshape
({
this
->
rec_batch_num_
,
3
,
32
,
batch_width
});
auto
inference_start
=
std
::
chrono
::
steady_clock
::
now
();
int
out_num
=
std
::
accumulate
(
predict_shape
.
begin
(),
predict_shape
.
end
(),
1
,
input_t
->
CopyFromCpu
(
input
.
data
());
std
::
multiplies
<
int
>
());
this
->
predictor_
->
Run
();
predict_batch
.
resize
(
out_num
);
std
::
vector
<
float
>
predict_batch
;
output_t
->
CopyToCpu
(
predict_batch
.
data
());
auto
output_names
=
this
->
predictor_
->
GetOutputNames
();
auto
inference_end
=
std
::
chrono
::
steady_clock
::
now
();
auto
output_t
=
this
->
predictor_
->
GetOutputHandle
(
output_names
[
0
]);
inference_diff
+=
inference_end
-
inference_start
;
auto
predict_shape
=
output_t
->
shape
();
// ctc decode
int
out_num
=
std
::
accumulate
(
predict_shape
.
begin
(),
predict_shape
.
end
(),
1
,
auto
postprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
std
::
multiplies
<
int
>
());
for
(
int
m
=
0
;
m
<
predict_shape
[
0
];
m
++
)
{
predict_batch
.
resize
(
out_num
);
std
::
vector
<
std
::
string
>
str_res
;
int
argmax_idx
;
output_t
->
CopyToCpu
(
predict_batch
.
data
());
int
last_index
=
0
;
auto
inference_end
=
std
::
chrono
::
steady_clock
::
now
();
float
score
=
0.
f
;
inference_diff
+=
inference_end
-
inference_start
;
int
count
=
0
;
float
max_value
=
0.0
f
;
// ctc decode
auto
postprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
for
(
int
n
=
0
;
n
<
predict_shape
[
1
];
n
++
)
{
for
(
int
m
=
0
;
m
<
predict_shape
[
0
];
m
++
)
{
argmax_idx
=
int
(
Utility
::
argmax
(
std
::
vector
<
std
::
string
>
str_res
;
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
)
*
predict_shape
[
2
]],
int
argmax_idx
;
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
+
1
)
*
predict_shape
[
2
]]));
int
last_index
=
0
;
max_value
=
float
(
*
std
::
max_element
(
float
score
=
0.
f
;
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
)
*
predict_shape
[
2
]],
int
count
=
0
;
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
+
1
)
*
predict_shape
[
2
]]));
float
max_value
=
0.0
f
;
if
(
argmax_idx
>
0
&&
(
!
(
n
>
0
&&
argmax_idx
==
last_index
)))
{
for
(
int
n
=
0
;
n
<
predict_shape
[
1
];
n
++
)
{
score
+=
max_value
;
argmax_idx
=
count
+=
1
;
int
(
Utility
::
argmax
(
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
)
*
predict_shape
[
2
]],
str_res
.
push_back
(
label_list_
[
argmax_idx
]);
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
+
1
)
*
predict_shape
[
2
]]));
max_value
=
float
(
*
std
::
max_element
(
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
)
*
predict_shape
[
2
]],
&
predict_batch
[(
m
*
predict_shape
[
1
]
+
n
+
1
)
*
predict_shape
[
2
]]));
if
(
argmax_idx
>
0
&&
(
!
(
n
>
0
&&
argmax_idx
==
last_index
)))
{
score
+=
max_value
;
count
+=
1
;
str_res
.
push_back
(
label_list_
[
argmax_idx
]);
}
last_index
=
argmax_idx
;
}
score
/=
count
;
if
(
isnan
(
score
))
continue
;
for
(
int
i
=
0
;
i
<
str_res
.
size
();
i
++
)
{
std
::
cout
<<
str_res
[
i
];
}
std
::
cout
<<
"
\t
score: "
<<
score
<<
std
::
endl
;
}
}
auto
postprocess_end
=
std
::
chrono
::
steady_clock
::
now
();
last_index
=
argmax_idx
;
postprocess_diff
+=
postprocess_end
-
postprocess_start
;
}
score
/=
count
;
if
(
isnan
(
score
))
continue
;
for
(
int
i
=
0
;
i
<
str_res
.
size
();
i
++
)
{
std
::
cout
<<
str_res
[
i
];
}
std
::
cout
<<
"
\t
score: "
<<
score
<<
std
::
endl
;
}
}
times
->
push_back
(
double
(
preprocess_diff
.
count
()
*
1000
));
auto
postprocess_end
=
std
::
chrono
::
steady_clock
::
now
();
times
->
push_back
(
double
(
inference_diff
.
count
()
*
1000
));
postprocess_diff
+=
postprocess_end
-
postprocess_start
;
times
->
push_back
(
double
(
postprocess_diff
.
count
()
*
1000
));
}
times
->
push_back
(
double
(
preprocess_diff
.
count
()
*
1000
));
times
->
push_back
(
double
(
inference_diff
.
count
()
*
1000
));
times
->
push_back
(
double
(
postprocess_diff
.
count
()
*
1000
));
}
}
void
CRNNRecognizer
::
LoadModel
(
const
std
::
string
&
model_dir
)
{
void
CRNNRecognizer
::
LoadModel
(
const
std
::
string
&
model_dir
)
{
// AnalysisConfig config;
// AnalysisConfig config;
paddle_infer
::
Config
config
;
paddle_infer
::
Config
config
;
...
@@ -130,23 +137,17 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
...
@@ -130,23 +137,17 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
if
(
this
->
precision_
==
"fp16"
)
{
if
(
this
->
precision_
==
"fp16"
)
{
precision
=
paddle_infer
::
Config
::
Precision
::
kHalf
;
precision
=
paddle_infer
::
Config
::
Precision
::
kHalf
;
}
}
if
(
this
->
precision_
==
"int8"
)
{
if
(
this
->
precision_
==
"int8"
)
{
precision
=
paddle_infer
::
Config
::
Precision
::
kInt8
;
precision
=
paddle_infer
::
Config
::
Precision
::
kInt8
;
}
}
config
.
EnableTensorRtEngine
(
config
.
EnableTensorRtEngine
(
1
<<
20
,
10
,
3
,
precision
,
false
,
false
);
1
<<
20
,
10
,
3
,
precision
,
false
,
false
);
std
::
map
<
std
::
string
,
std
::
vector
<
int
>>
min_input_shape
=
{
std
::
map
<
std
::
string
,
std
::
vector
<
int
>>
min_input_shape
=
{
{
"x"
,
{
1
,
3
,
32
,
10
}},
{
"x"
,
{
1
,
3
,
32
,
10
}},
{
"lstm_0.tmp_0"
,
{
10
,
1
,
96
}}};
{
"lstm_0.tmp_0"
,
{
10
,
1
,
96
}}};
std
::
map
<
std
::
string
,
std
::
vector
<
int
>>
max_input_shape
=
{
std
::
map
<
std
::
string
,
std
::
vector
<
int
>>
max_input_shape
=
{
{
"x"
,
{
1
,
3
,
32
,
2000
}},
{
"x"
,
{
1
,
3
,
32
,
2000
}},
{
"lstm_0.tmp_0"
,
{
1000
,
1
,
96
}}};
{
"lstm_0.tmp_0"
,
{
1000
,
1
,
96
}}};
std
::
map
<
std
::
string
,
std
::
vector
<
int
>>
opt_input_shape
=
{
std
::
map
<
std
::
string
,
std
::
vector
<
int
>>
opt_input_shape
=
{
{
"x"
,
{
1
,
3
,
32
,
320
}},
{
"x"
,
{
1
,
3
,
32
,
320
}},
{
"lstm_0.tmp_0"
,
{
25
,
1
,
96
}}};
{
"lstm_0.tmp_0"
,
{
25
,
1
,
96
}}};
config
.
SetTRTDynamicShapeInfo
(
min_input_shape
,
max_input_shape
,
config
.
SetTRTDynamicShapeInfo
(
min_input_shape
,
max_input_shape
,
opt_input_shape
);
opt_input_shape
);
...
@@ -168,7 +169,7 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
...
@@ -168,7 +169,7 @@ void CRNNRecognizer::LoadModel(const std::string &model_dir) {
config
.
SwitchIrOptim
(
true
);
config
.
SwitchIrOptim
(
true
);
config
.
EnableMemoryOptim
();
config
.
EnableMemoryOptim
();
// config.DisableGlogInfo();
// config.DisableGlogInfo();
this
->
predictor_
=
CreatePredictor
(
config
);
this
->
predictor_
=
CreatePredictor
(
config
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录