Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
f6097cbd
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f6097cbd
编写于
11月 18, 2021
作者:
C
cuicheng01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add tipc lite multi-predictor & arm_gpu_opencl chains
上级
1c2c2698
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
398 addition
and
84 deletion
+398
-84
deploy/lite/ocr_db_crnn.cc
deploy/lite/ocr_db_crnn.cc
+67
-11
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
...mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
+3
-2
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
...model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
+13
-0
test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
...mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
+13
-0
test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
...model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
+13
-0
test_tipc/docs/test_lite_arm_cpp.md
test_tipc/docs/test_lite_arm_cpp.md
+29
-9
test_tipc/prepare_lite_cpp.sh
test_tipc/prepare_lite_cpp.sh
+95
-0
test_tipc/readme.md
test_tipc/readme.md
+3
-2
test_tipc/test_lite_arm_cpp.sh
test_tipc/test_lite_arm_cpp.sh
+162
-0
test_tipc/test_lite_arm_cpu_cpp.sh
test_tipc/test_lite_arm_cpu_cpp.sh
+0
-60
未找到文件。
deploy/lite/ocr_db_crnn.cc
浏览文件 @
f6097cbd
...
...
@@ -172,7 +172,10 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
cv
::
Mat
resize_img
;
int
index
=
0
;
std
::
vector
<
double
>
time_info
=
{
0
,
0
,
0
};
for
(
int
i
=
boxes
.
size
()
-
1
;
i
>=
0
;
i
--
)
{
auto
preprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
crop_img
=
GetRotateCropImage
(
srcimg
,
boxes
[
i
]);
if
(
use_direction_classify
>=
1
)
{
crop_img
=
RunClsModel
(
crop_img
,
predictor_cls
);
...
...
@@ -191,7 +194,9 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
auto
*
data0
=
input_tensor0
->
mutable_data
<
float
>
();
NeonMeanScale
(
dimg
,
data0
,
resize_img
.
rows
*
resize_img
.
cols
,
mean
,
scale
);
auto
preprocess_end
=
std
::
chrono
::
steady_clock
::
now
();
//// Run CRNN predictor
auto
inference_start
=
std
::
chrono
::
steady_clock
::
now
();
predictor_crnn
->
Run
();
// Get output and run postprocess
...
...
@@ -199,8 +204,10 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
std
::
move
(
predictor_crnn
->
GetOutput
(
0
)));
auto
*
predict_batch
=
output_tensor0
->
data
<
float
>
();
auto
predict_shape
=
output_tensor0
->
shape
();
auto
inference_end
=
std
::
chrono
::
steady_clock
::
now
();
// ctc decode
auto
postprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
std
::
string
str_res
;
int
argmax_idx
;
int
last_index
=
0
;
...
...
@@ -224,7 +231,20 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
score
/=
count
;
rec_text
.
push_back
(
str_res
);
rec_text_score
.
push_back
(
score
);
auto
postprocess_end
=
std
::
chrono
::
steady_clock
::
now
();
std
::
chrono
::
duration
<
float
>
preprocess_diff
=
preprocess_end
-
preprocess_start
;
time_info
[
0
]
+=
double
(
preprocess_diff
.
count
()
*
1000
);
std
::
chrono
::
duration
<
float
>
inference_diff
=
inference_end
-
inference_start
;
time_info
[
1
]
+=
double
(
inference_diff
.
count
()
*
1000
);
std
::
chrono
::
duration
<
float
>
postprocess_diff
=
postprocess_end
-
postprocess_start
;
time_info
[
2
]
+=
double
(
postprocess_diff
.
count
()
*
1000
);
}
times
->
push_back
(
time_info
[
0
]);
times
->
push_back
(
time_info
[
1
]);
times
->
push_back
(
time_info
[
2
]);
}
std
::
vector
<
std
::
vector
<
std
::
vector
<
int
>>>
...
...
@@ -312,7 +332,7 @@ std::shared_ptr<PaddlePredictor> loadModel(std::string model_file, int num_threa
config
.
set_model_from_file
(
model_file
);
config
.
set_threads
(
num_threads
);
std
::
cout
<<
num_threads
<<
std
::
endl
;
std
::
shared_ptr
<
PaddlePredictor
>
predictor
=
CreatePaddlePredictor
<
MobileConfig
>
(
config
);
return
predictor
;
...
...
@@ -434,6 +454,9 @@ void system(char **argv){
auto
rec_predictor
=
loadModel
(
rec_model_file
,
std
::
stoi
(
num_threads
));
auto
cls_predictor
=
loadModel
(
cls_model_file
,
std
::
stoi
(
num_threads
));
std
::
vector
<
double
>
det_time_info
=
{
0
,
0
,
0
};
std
::
vector
<
double
>
rec_time_info
=
{
0
,
0
,
0
};
for
(
int
i
=
0
;
i
<
cv_all_img_names
.
size
();
++
i
)
{
std
::
cout
<<
"The predict img: "
<<
cv_all_img_names
[
i
]
<<
std
::
endl
;
cv
::
Mat
srcimg
=
cv
::
imread
(
cv_all_img_names
[
i
],
cv
::
IMREAD_COLOR
);
...
...
@@ -460,7 +483,37 @@ void system(char **argv){
for
(
int
i
=
0
;
i
<
rec_text
.
size
();
i
++
)
{
std
::
cout
<<
i
<<
"
\t
"
<<
rec_text
[
i
]
<<
"
\t
"
<<
rec_text_score
[
i
]
<<
std
::
endl
;
}
det_time_info
[
0
]
+=
det_times
[
0
];
det_time_info
[
1
]
+=
det_times
[
1
];
det_time_info
[
2
]
+=
det_times
[
2
];
rec_time_info
[
0
]
+=
rec_times
[
0
];
rec_time_info
[
1
]
+=
rec_times
[
1
];
rec_time_info
[
2
]
+=
rec_times
[
2
];
}
if
(
strcmp
(
argv
[
12
],
"True"
)
==
0
)
{
AutoLogger
autolog_det
(
det_model_file
,
runtime_device
,
std
::
stoi
(
num_threads
),
std
::
stoi
(
batchsize
),
"dynamic"
,
precision
,
det_time_info
,
cv_all_img_names
.
size
());
AutoLogger
autolog_rec
(
rec_model_file
,
runtime_device
,
std
::
stoi
(
num_threads
),
std
::
stoi
(
batchsize
),
"dynamic"
,
precision
,
rec_time_info
,
cv_all_img_names
.
size
());
autolog_det
.
report
();
std
::
cout
<<
std
::
endl
;
autolog_rec
.
report
();
}
}
...
...
@@ -503,15 +556,15 @@ void det(int argc, char **argv) {
auto
img_vis
=
Visualization
(
srcimg
,
boxes
);
std
::
cout
<<
boxes
.
size
()
<<
" bboxes have detected:"
<<
std
::
endl
;
//
for (int i=0; i<boxes.size(); i++){
//
std::cout << "The " << i << " box:" << std::endl;
//
for (int j=0; j<4; j++){
//
for (int k=0; k<2; k++){
//
std::cout << boxes[i][j][k] << "\t";
//
}
//
}
//
std::cout << std::endl;
//
}
for
(
int
i
=
0
;
i
<
boxes
.
size
();
i
++
){
std
::
cout
<<
"The "
<<
i
<<
" box:"
<<
std
::
endl
;
for
(
int
j
=
0
;
j
<
4
;
j
++
){
for
(
int
k
=
0
;
k
<
2
;
k
++
){
std
::
cout
<<
boxes
[
i
][
j
][
k
]
<<
"
\t
"
;
}
}
std
::
cout
<<
std
::
endl
;
}
time_info
[
0
]
+=
times
[
0
];
time_info
[
1
]
+=
times
[
1
];
time_info
[
2
]
+=
times
[
2
];
...
...
@@ -585,6 +638,9 @@ void rec(int argc, char **argv) {
std
::
cout
<<
i
<<
"
\t
"
<<
rec_text
[
i
]
<<
"
\t
"
<<
rec_text_score
[
i
]
<<
std
::
endl
;
}
time_info
[
0
]
+=
times
[
0
];
time_info
[
1
]
+=
times
[
1
];
time_info
[
2
]
+=
times
[
2
];
}
// TODO: support autolog
if
(
strcmp
(
argv
[
9
],
"True"
)
==
0
)
{
...
...
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
浏览文件 @
f6097cbd
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
runtime_device:ARM_CPU
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
rec_infer_model:ch_PP-OCRv2_rec_infer|ch_PP-OCRv2_rec_slim_quant_infer
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
--cpu_threads:1|4
--det_batch_size:1
--rec_batch_size:1
--system_batch_size:1
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
...
...
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
0 → 100644
浏览文件 @
f6097cbd
===========================lite_params===========================
inference:./ocr_db_crnn det
runtime_device:ARM_GPU_OPENCL
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
rec_infer_model:ch_PP-OCRv2_rec_infer|ch_PP-OCRv2_rec_slim_quant_infer
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
--cpu_threads:1|4
--det_batch_size:1
--rec_batch_size:1
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
0 → 100644
浏览文件 @
f6097cbd
===========================lite_params===========================
inference:./ocr_db_crnn system
runtime_device:ARM_CPU
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
rec_infer_model:ch_PP-OCRv2_rec_infer|ch_PP-OCRv2_rec_slim_quant_infer
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
--cpu_threads:1|4
--det_batch_size:1
--rec_batch_size:1
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
0 → 100644
浏览文件 @
f6097cbd
===========================lite_params===========================
inference:./ocr_db_crnn system
runtime_device:ARM_GPU_OPENCL
det_infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
rec_infer_model:ch_PP-OCRv2_rec_infer|ch_PP-OCRv2_rec_slim_quant_infer
cls_infer_model:ch_ppocr_mobile_v2.0_cls_infer|ch_ppocr_mobile_v2.0_cls_slim_infer
--cpu_threads:1|4
--det_batch_size:1
--rec_batch_size:1
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/docs/test_lite_arm_cp
u_cp
p.md
→
test_tipc/docs/test_lite_arm_cpp.md
浏览文件 @
f6097cbd
# Lite\_arm\_cp
u\_cp
p预测功能测试
# Lite\_arm\_cpp预测功能测试
Lite
\_
arm
\_
cp
u
\_
cpp预测功能测试的主程序为
`test_lite_arm_cpu
_cpp.sh`
,可以在ARM CPU上基于Lite预测库测试模型的C++推理功能。
Lite
\_
arm
\_
cp
p预测功能测试的主程序为
`test_lite_arm
_cpp.sh`
,可以在ARM CPU上基于Lite预测库测试模型的C++推理功能。
## 1. 测试结论汇总
...
...
@@ -12,10 +12,11 @@ Lite\_arm\_cpu\_cpp预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`
-
threads:包括1和4
-
predictor数量:包括多predictor预测和单predictor预测
-
预测库来源:包括下载方式和编译方式
-
测试硬件:ARM
\_
CPU/ARM
\_
GPU_OPENCL
| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 |
| :----: | :----: | :----: | :----: | :----: |
| 正常模型/量化模型 | 1 | 1/4 | 1
| 下载方式
|
| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 |
测试硬件 |
| :----: | :----: | :----: | :----: | :----: |
:----: |
| 正常模型/量化模型 | 1 | 1/4 | 1
/2 | 下载方式 | ARM
\_
CPU/ARM
\_
GPU_OPENCL
|
## 2. 测试流程
...
...
@@ -23,19 +24,38 @@ Lite\_arm\_cpu\_cpp预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`
### 2.1 功能测试
先运行
`prepare_lite.sh`
,运行后会在当前路径下生成
`test_lite.tar`
,其中包含了测试数据、测试模型和用于预测的可执行文件。将
`test_lite.tar`
上传到被测试的手机上,在手机的终端解压该文件,进入
`test_lite`
目录中,然后运行
`test_lite_arm_cpu_cpp.sh`
进行测试,最终在
`test_lite/output`
目录下生成
`lite_*.log`
后缀的日志文件。
先运行
`prepare_lite_cpp.sh`
,运行后会在当前路径下生成
`test_lite.tar`
,其中包含了测试数据、测试模型和用于预测的可执行文件。将
`test_lite.tar`
上传到被测试的手机上,在手机的终端解压该文件,进入
`test_lite`
目录中,然后运行
`test_lite_arm_cpp.sh`
进行测试,最终在
`test_lite/output`
目录下生成
`lite_*.log`
后缀的日志文件。
#### 2.1.1 测试ARM\_CPU
```
shell
# 数据和模型准备
bash test_tipc/prepare_lite.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
bash test_tipc/prepare_lite
_cpp
.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
# 手机端测试:
bash test_lite_arm_cp
u_cp
p.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
```
**注意**
:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考
[
安卓手机termux连接电脑
](
./termux_for_android.md
)
。
#### 2.1.2 ARM\_GPU\_OPENCL
```
shell
# 数据和模型准备
bash test_tipc/prepare_lite_cpp.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
# 手机端测试:
bash test_lite_arm_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_gpu_opencl.txt
```
**注意**
:
1.
由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考
[
安卓手机termux连接电脑
](
./termux_for_android.md
)
。
2.
如果测试文本检测和识别完整的pipeline,在执行
`prepare_lite_cpp.sh`
时,配置文件需替换为
`test_tipc/configs/ppocr_system_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.tx`
。在手机端测试阶段,配置文件同样修改为该文件。
#### 运行结果
...
...
test_tipc/prepare_lite.sh
→
test_tipc/prepare_lite
_cpp
.sh
浏览文件 @
f6097cbd
...
...
@@ -6,22 +6,59 @@ dataline=$(cat ${FILENAME})
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
IFS
=
$'
\n
'
lite_model_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
inference_cmd
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
DEVICE
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
det_lite_model_list
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
rec_lite_model_list
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
cls_lite_model_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
if
[[
$inference_cmd
=
~
"det"
]]
;
then
lite_model_list
=
${
det_lite_model_list
}
elif
[[
$inference_cmd
=
~
"rec"
]]
;
then
lite_model_list
=(
${
rec_lite_model_list
[*]
}
${
cls_lite_model_list
[*]
}
)
elif
[[
$inference_cmd
=
~
"system"
]]
;
then
lite_model_list
=(
${
det_lite_model_list
[*]
}
${
rec_lite_model_list
[*]
}
${
cls_lite_model_list
[*]
}
)
else
echo
"inference_cmd is wrong, please check."
exit
1
fi
if
[
${
DEVICE
}
=
"ARM_CPU"
]
;
then
valid_targets
=
"arm"
paddlelite_url
=
"https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz"
end_index
=
"66"
elif
[
${
DEVICE
}
=
"ARM_GPU_OPENCL"
]
;
then
valid_targets
=
"opencl"
paddlelite_url
=
"https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz"
end_index
=
"71"
else
echo
"DEVICE only suport ARM_CPU, ARM_GPU_OPENCL."
exit
2
fi
# prepare lite .nb model
pip
install
paddlelite
==
2.
9
pip
install
paddlelite
==
2.
10-rc
current_dir
=
${
PWD
}
IFS
=
"|"
model_path
=
./inference_models
for
model
in
${
lite_model_list
[*]
}
;
do
if
[[
$model
=
~
"PP-OCRv2"
]]
;
then
inference_model_url
=
https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/
${
model
}
.tar
elif
[[
$model
=
~
"v2.0"
]]
;
then
inference_model_url
=
https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/
${
model
}
.tar
else
echo
"Model is wrong, please check."
exit
3
fi
inference_model
=
${
inference_model_url
##*/
}
wget
-nc
-P
${
model_path
}
${
inference_model_url
}
cd
${
model_path
}
&&
tar
-xf
${
inference_model
}
&&
cd
../
model_dir
=
${
model_path
}
/
${
inference_model
%.*
}
model_file
=
${
model_dir
}
/inference.pdmodel
param_file
=
${
model_dir
}
/inference.pdiparams
paddle_lite_opt
--model_dir
=
${
model_dir
}
--model_file
=
${
model_file
}
--param_file
=
${
param_file
}
--valid_targets
=
arm
--optimize_out
=
${
model_dir
}
_opt
paddle_lite_opt
--model_dir
=
${
model_dir
}
--model_file
=
${
model_file
}
--param_file
=
${
param_file
}
--valid_targets
=
${
valid_targets
}
--optimize_out
=
${
model_dir
}
_opt
done
# prepare test data
...
...
@@ -35,18 +72,21 @@ cd ./inference_models && tar -xf ${inference_model} && cd ../
cd
./test_data
&&
tar
-xf
${
data_file
}
&&
rm
${
data_file
}
&&
cd
../
# prepare lite env
paddlelite_url
=
https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
export
http_proxy
=
http://172.19.57.45:3128
export
https_proxy
=
http://172.19.57.45:3128
paddlelite_zipfile
=
$(
echo
$paddlelite_url
|
awk
-F
"/"
'{print $NF}'
)
paddlelite_file
=
${
paddlelite_zipfile
:0:
66
}
paddlelite_file
=
${
paddlelite_zipfile
:0:
${
end_index
}
}
wget
${
paddlelite_url
}
&&
tar
-xf
${
paddlelite_zipfile
}
mkdir
-p
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
-r
${
model_path
}
/
*
_opt.nb test_data
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
-r
./deploy/lite/
*
${
paddlelite_file
}
/demo/cxx/ocr/
cp
${
paddlelite_file
}
/cxx/lib/libpaddle_light_api_shared.so
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
${
FILENAME
}
test_tipc/test_lite_arm_cp
u_cp
p.sh test_tipc/common_func.sh
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
${
FILENAME
}
test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cd
${
paddlelite_file
}
/demo/cxx/ocr/
git clone https://github.com/cuicheng01/AutoLog.git
unset
http_proxy
unset
https_proxy
make
-j
sleep
1
make
-j
...
...
test_tipc/readme.md
浏览文件 @
f6097cbd
...
...
@@ -81,10 +81,11 @@ test_tipc/
├── cpp_ppocr_det_mobile_results_fp16.txt
# 预存的mobile版ppocr检测模型c++预测的fp16精度的结果
├── ...
├── prepare.sh
# 完成test_*.sh运行所需要的数据和模型下载
├── prepare_lite_cpp.sh
# 完成手机端test_*.sh运行所需要的数据、模型、可执行文件
├── test_train_inference_python.sh
# 测试python训练预测的主程序
├── test_inference_cpp.sh
# 测试c++预测的主程序
├── test_serving.sh
# 测试serving部署预测的主程序
├── test_lite_arm_cp
u_cpp.sh
# 测试lite在arm_cpu
上部署的C++预测的主程序
├── test_lite_arm_cp
p.sh
# 测试lite在arm
上部署的C++预测的主程序
├── compare_results.py
# 用于对比log中的预测结果与results中的预存结果精度误差是否在限定范围内
└── readme.md
# 使用文档
```
...
...
@@ -123,5 +124,5 @@ test_tipc/
[
test_train_inference_python 使用
](
docs/test_train_inference_python.md
)
[
test_inference_cpp 使用
](
docs/test_inference_cpp.md
)
[
test_serving 使用
](
docs/test_serving.md
)
[
test_lite_arm_cp
u_cpp 使用
](
docs/test_lite_arm_cpu
_cpp.md
)
[
test_lite_arm_cp
p 使用
](
docs/test_lite_arm
_cpp.md
)
[
test_paddle2onnx 使用
](
docs/test_paddle2onnx.md
)
test_tipc/test_lite_arm_cpp.sh
0 → 100644
浏览文件 @
f6097cbd
#!/bin/bash
source
./common_func.sh
export
LD_LIBRARY_PATH
=
${
PWD
}
:
$LD_LIBRARY_PATH
FILENAME
=
$1
dataline
=
$(
cat
$FILENAME
)
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
# parser lite inference
inference_cmd
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
runtime_device
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
det_model_list
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
rec_model_list
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
cls_model_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
cpu_threads_list
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
det_batch_size_list
=
$(
func_parser_value
"
${
lines
[7]
}
"
)
rec_batch_size_list
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
infer_img_dir_list
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
config_dir
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
rec_dict_dir
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
benchmark_value
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
if
[[
$inference_cmd
=
~
"det"
]]
;
then
lite_model_list
=
${
det_lite_model_list
}
elif
[[
$inference_cmd
=
~
"rec"
]]
;
then
lite_model_list
=(
${
rec_lite_model_list
[*]
}
${
cls_lite_model_list
[*]
}
)
elif
[[
$inference_cmd
=
~
"system"
]]
;
then
lite_model_list
=(
${
det_lite_model_list
[*]
}
${
rec_lite_model_list
[*]
}
${
cls_lite_model_list
[*]
}
)
else
echo
"inference_cmd is wrong, please check."
exit
1
fi
LOG_PATH
=
"./output"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results.log"
function
func_test_det
(){
IFS
=
'|'
_script
=
$1
_det_model
=
$2
_log_path
=
$3
_img_dir
=
$4
_config
=
$5
if
[[
$_det_model
=
~
"slim"
]]
;
then
precision
=
"INT8"
else
precision
=
"FP32"
fi
# lite inference
for
num_threads
in
${
cpu_threads_list
[*]
}
;
do
for
det_batchsize
in
${
det_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/lite_
${
_det_model
}
_runtime_device_
${
runtime_device
}
_precision_
${
precision
}
_det_batchsize_
${
det_batchsize
}
_threads_
${
num_threads
}
.log"
command
=
"
${
_script
}
${
_det_model
}
${
runtime_device
}
${
precision
}
${
num_threads
}
${
det_batchsize
}
${
_img_dir
}
${
_config
}
${
benchmark_value
}
>
${
_save_log_path
}
2>&1"
echo
${
command
}
eval
${
command
}
status_check
$?
"
${
command
}
"
"
${
status_log
}
"
done
done
}
function
func_test_rec
(){
IFS
=
'|'
_script
=
$1
_rec_model
=
$2
_cls_model
=
$3
_log_path
=
$4
_img_dir
=
$5
_config
=
$6
_rec_dict_dir
=
$7
if
[[
$_det_model
=
~
"slim"
]]
;
then
_precision
=
"INT8"
else
_precision
=
"FP32"
fi
# lite inference
for
num_threads
in
${
cpu_threads_list
[*]
}
;
do
for
rec_batchsize
in
${
rec_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/lite_
${
_rec_model
}
_
${
cls_model
}
_runtime_device_
${
runtime_device
}
_precision_
${
_precision
}
_rec_batchsize_
${
rec_batchsize
}
_threads_
${
num_threads
}
.log"
command
=
"
${
_script
}
${
_rec_model
}
${
_cls_model
}
${
runtime_device
}
${
_precision
}
${
num_threads
}
${
rec_batchsize
}
${
_img_dir
}
${
_config
}
${
_rec_dict_dir
}
${
benchmark_value
}
>
${
_save_log_path
}
2>&1"
echo
${
command
}
eval
${
command
}
status_check
$?
"
${
command
}
"
"
${
status_log
}
"
done
done
}
function
func_test_system
(){
IFS
=
'|'
_script
=
$1
_det_model
=
$2
_rec_model
=
$3
_cls_model
=
$4
_log_path
=
$5
_img_dir
=
$6
_config
=
$7
_rec_dict_dir
=
$8
if
[[
$_det_model
=
~
"slim"
]]
;
then
_precision
=
"INT8"
else
_precision
=
"FP32"
fi
# lite inference
for
num_threads
in
${
cpu_threads_list
[*]
}
;
do
for
det_batchsize
in
${
det_batch_size_list
[*]
}
;
do
for
rec_batchsize
in
${
rec_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/lite_
${
_det_model
}
_
${
_rec_model
}
_
${
_cls_model
}
_runtime_device_
${
runtime_device
}
_precision_
${
_precision
}
_det_batchsize_
${
det_batchsize
}
_rec_batchsize_
${
rec_batchsize
}
_threads_
${
num_threads
}
.log"
command
=
"
${
_script
}
${
_det_model
}
${
_rec_model
}
${
_cls_model
}
${
runtime_device
}
${
_precision
}
${
num_threads
}
${
det_batchsize
}
${
_img_dir
}
${
_config
}
${
_rec_dict_dir
}
${
benchmark_value
}
>
${
_save_log_path
}
2>&1"
echo
${
command
}
eval
${
command
}
status_check
$?
"
${
command
}
"
"
${
status_log
}
"
done
done
done
}
echo
"################### run test ###################"
if
[[
$inference_cmd
=
~
"det"
]]
;
then
IFS
=
"|"
det_model_list
=(
${
det_model_list
[*]
}
)
for
i
in
{
0..1
}
;
do
#run lite inference
for
img_dir
in
${
infer_img_dir_list
[*]
}
;
do
func_test_det
"
${
inference_cmd
}
"
"
${
det_model_list
[i]
}
_opt.nb"
"
${
LOG_PATH
}
"
"
${
img_dir
}
"
"
${
config_dir
}
"
done
done
elif
[[
$inference_cmd
=
~
"rec"
]]
;
then
IFS
=
"|"
rec_model_list
=(
${
rec_model_list
[*]
}
)
cls_model_list
=(
${
cls_model_list
[*]
}
)
for
i
in
{
0..1
}
;
do
#run lite inference
for
img_dir
in
${
infer_img_dir_list
[*]
}
;
do
func_test_rec
"
${
inference_cmd
}
"
"
${
rec_model
}
_opt.nb"
"
${
cls_model_list
[i]
}
_opt.nb"
"
${
LOG_PATH
}
"
"
${
img_dir
}
"
"
${
rec_dict_dir
}
"
"
${
config_dir
}
"
done
done
elif
[[
$inference_cmd
=
~
"system"
]]
;
then
IFS
=
"|"
det_model_list
=(
${
det_model_list
[*]
}
)
rec_model_list
=(
${
rec_model_list
[*]
}
)
cls_model_list
=(
${
cls_model_list
[*]
}
)
for
i
in
{
0..1
}
;
do
#run lite inference
for
img_dir
in
${
infer_img_dir_list
[*]
}
;
do
func_test_system
"
${
inference_cmd
}
"
"
${
det_model_list
[i]
}
_opt.nb"
"
${
rec_model_list
[i]
}
_opt.nb"
"
${
cls_model_list
[i]
}
_opt.nb"
"
${
LOG_PATH
}
"
"
${
img_dir
}
"
"
${
config_dir
}
"
"
${
rec_dict_dir
}
"
done
done
fi
test_tipc/test_lite_arm_cpu_cpp.sh
已删除
100644 → 0
浏览文件 @
1c2c2698
#!/bin/bash
source
./common_func.sh
export
LD_LIBRARY_PATH
=
${
PWD
}
:
$LD_LIBRARY_PATH
FILENAME
=
$1
dataline
=
$(
cat
$FILENAME
)
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
# parser lite inference
lite_inference_cmd
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
lite_model_dir_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
runtime_device
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
lite_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
lite_batch_size_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
lite_infer_img_dir_list
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
lite_config_dir
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
lite_rec_dict_dir
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
lite_benchmark_value
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
LOG_PATH
=
"./output"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results.log"
function
func_lite
(){
IFS
=
'|'
_script
=
$1
_lite_model
=
$2
_log_path
=
$3
_img_dir
=
$4
_config
=
$5
if
[[
$lite_model
=
~
"slim"
]]
;
then
precision
=
"INT8"
else
precision
=
"FP32"
fi
# lite inference
for
num_threads
in
${
lite_cpu_threads_list
[*]
}
;
do
for
batchsize
in
${
lite_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/lite_
${
_lite_model
}
_runtime_device_
${
runtime_device
}
_precision_
${
precision
}
_batchsize_
${
batchsize
}
_threads_
${
num_threads
}
.log"
command
=
"
${
_script
}
${
_lite_model
}
${
runtime_device
}
${
precision
}
${
num_threads
}
${
batchsize
}
${
_img_dir
}
${
_config
}
${
lite_benchmark_value
}
>
${
_save_log_path
}
2>&1"
eval
${
command
}
status_check
$?
"
${
command
}
"
"
${
status_log
}
"
done
done
}
echo
"################### run test ###################"
IFS
=
"|"
for
lite_model
in
${
lite_model_dir_list
[*]
}
;
do
#run lite inference
for
img_dir
in
${
lite_infer_img_dir_list
[*]
}
;
do
func_lite
"
${
lite_inference_cmd
}
"
"
${
lite_model
}
_opt.nb"
"
${
LOG_PATH
}
"
"
${
img_dir
}
"
"
${
lite_config_dir
}
"
done
done
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录