Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
d515dd51
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d515dd51
编写于
11月 11, 2021
作者:
C
cuicheng01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update tipc lite demo
上级
08dcbba4
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
227 addition
and
79 deletion
+227
-79
deploy/lite/ocr_db_crnn.cc
deploy/lite/ocr_db_crnn.cc
+26
-43
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
...mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
+12
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_metal_arm_gpu.txt
...model_linux_gpu_normal_normal_lite_java_metal_arm_gpu.txt
+0
-0
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_opencl_arm_gpu.txt
...odel_linux_gpu_normal_normal_lite_java_opencl_arm_gpu.txt
+0
-0
test_tipc/docs/lite_auto_log.png
test_tipc/docs/lite_auto_log.png
+0
-0
test_tipc/docs/lite_log.png
test_tipc/docs/lite_log.png
+0
-0
test_tipc/docs/test_lite_arm_cpu_cpp.md
test_tipc/docs/test_lite_arm_cpu_cpp.md
+71
-0
test_tipc/prepare.sh
test_tipc/prepare.sh
+2
-36
test_tipc/prepare_lite.sh
test_tipc/prepare_lite.sh
+56
-0
test_tipc/test_lite_arm_cpu_cpp.sh
test_tipc/test_lite_arm_cpu_cpp.sh
+60
-0
未找到文件。
deploy/lite/ocr_db_crnn.cc
浏览文件 @
d515dd51
...
...
@@ -307,21 +307,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
return
filter_boxes
;
}
std
::
shared_ptr
<
PaddlePredictor
>
loadModel
(
std
::
string
model_file
,
std
::
string
power_mode
,
int
num_threads
)
{
std
::
shared_ptr
<
PaddlePredictor
>
loadModel
(
std
::
string
model_file
,
int
num_threads
)
{
MobileConfig
config
;
config
.
set_model_from_file
(
model_file
);
if
(
power_mode
==
"LITE_POWER_HIGH"
){
config
.
set_power_mode
(
LITE_POWER_HIGH
);
}
else
{
if
(
power_mode
==
"LITE_POWER_LOW"
)
{
config
.
set_power_mode
(
LITE_POWER_HIGH
);
}
else
{
std
::
cerr
<<
"Only support LITE_POWER_HIGH or LITE_POWER_HIGH."
<<
std
::
endl
;
exit
(
1
);
}
}
config
.
set_threads
(
num_threads
);
std
::
shared_ptr
<
PaddlePredictor
>
predictor
=
...
...
@@ -391,7 +380,7 @@ void check_params(int argc, char **argv) {
if
(
strcmp
(
argv
[
1
],
"det"
)
==
0
)
{
if
(
argc
<
9
){
std
::
cerr
<<
"[ERROR] usage:"
<<
argv
[
0
]
<<
" det det_model
num_threads batchsize power_mod
e img_dir det_config lite_benchmark_value"
<<
std
::
endl
;
<<
" det det_model
runtime_device num_threads batchsiz
e img_dir det_config lite_benchmark_value"
<<
std
::
endl
;
exit
(
1
);
}
}
...
...
@@ -399,7 +388,7 @@ void check_params(int argc, char **argv) {
if
(
strcmp
(
argv
[
1
],
"rec"
)
==
0
)
{
if
(
argc
<
9
){
std
::
cerr
<<
"[ERROR] usage:"
<<
argv
[
0
]
<<
" rec rec_model
num_threads batchsize power_mod
e img_dir key_txt lite_benchmark_value"
<<
std
::
endl
;
<<
" rec rec_model
runtime_device num_threads batchsiz
e img_dir key_txt lite_benchmark_value"
<<
std
::
endl
;
exit
(
1
);
}
}
...
...
@@ -407,7 +396,7 @@ void check_params(int argc, char **argv) {
if
(
strcmp
(
argv
[
1
],
"system"
)
==
0
)
{
if
(
argc
<
12
){
std
::
cerr
<<
"[ERROR] usage:"
<<
argv
[
0
]
<<
" system det_model rec_model clas_model
num_threads batchsize power_mod
e img_dir det_config key_txt lite_benchmark_value"
<<
std
::
endl
;
<<
" system det_model rec_model clas_model
runtime_device num_threads batchsiz
e img_dir det_config key_txt lite_benchmark_value"
<<
std
::
endl
;
exit
(
1
);
}
}
...
...
@@ -417,15 +406,15 @@ void system(char **argv){
std
::
string
det_model_file
=
argv
[
2
];
std
::
string
rec_model_file
=
argv
[
3
];
std
::
string
cls_model_file
=
argv
[
4
];
std
::
string
precision
=
argv
[
5
];
std
::
string
num_threads
=
argv
[
6
];
std
::
string
batchsize
=
argv
[
7
];
std
::
string
power_mod
e
=
argv
[
8
];
std
::
string
runtime_device
=
argv
[
5
];
std
::
string
precision
=
argv
[
6
];
std
::
string
num_threads
=
argv
[
7
];
std
::
string
batchsiz
e
=
argv
[
8
];
std
::
string
img_dir
=
argv
[
9
];
std
::
string
det_config_path
=
argv
[
10
];
std
::
string
dict_path
=
argv
[
11
];
if
(
strcmp
(
argv
[
5
],
"FP32"
)
!=
0
&&
strcmp
(
argv
[
5
],
"INT8"
)
!=
0
)
{
if
(
strcmp
(
argv
[
6
],
"FP32"
)
!=
0
&&
strcmp
(
argv
[
6
],
"INT8"
)
!=
0
)
{
std
::
cerr
<<
"Only support FP32 or INT8."
<<
std
::
endl
;
exit
(
1
);
}
...
...
@@ -441,9 +430,9 @@ void system(char **argv){
charactor_dict
.
insert
(
charactor_dict
.
begin
(),
"#"
);
// blank char for ctc
charactor_dict
.
push_back
(
" "
);
auto
det_predictor
=
loadModel
(
det_model_file
,
power_mode
,
std
::
stoi
(
num_threads
));
auto
rec_predictor
=
loadModel
(
rec_model_file
,
power_mode
,
std
::
stoi
(
num_threads
));
auto
cls_predictor
=
loadModel
(
cls_model_file
,
power_mode
,
std
::
stoi
(
num_threads
));
auto
det_predictor
=
loadModel
(
det_model_file
,
std
::
stoi
(
num_threads
));
auto
rec_predictor
=
loadModel
(
rec_model_file
,
std
::
stoi
(
num_threads
));
auto
cls_predictor
=
loadModel
(
cls_model_file
,
std
::
stoi
(
num_threads
));
for
(
int
i
=
0
;
i
<
cv_all_img_names
.
size
();
++
i
)
{
std
::
cout
<<
"The predict img: "
<<
cv_all_img_names
[
i
]
<<
std
::
endl
;
...
...
@@ -477,14 +466,14 @@ void system(char **argv){
void
det
(
int
argc
,
char
**
argv
)
{
std
::
string
det_model_file
=
argv
[
2
];
std
::
string
precision
=
argv
[
3
];
std
::
string
num_threads
=
argv
[
4
];
std
::
string
batchsize
=
argv
[
5
];
std
::
string
power_mod
e
=
argv
[
6
];
std
::
string
runtime_device
=
argv
[
3
];
std
::
string
precision
=
argv
[
4
];
std
::
string
num_threads
=
argv
[
5
];
std
::
string
batchsiz
e
=
argv
[
6
];
std
::
string
img_dir
=
argv
[
7
];
std
::
string
det_config_path
=
argv
[
8
];
if
(
strcmp
(
argv
[
3
],
"FP32"
)
!=
0
&&
strcmp
(
argv
[
3
],
"INT8"
)
!=
0
)
{
if
(
strcmp
(
argv
[
4
],
"FP32"
)
!=
0
&&
strcmp
(
argv
[
4
],
"INT8"
)
!=
0
)
{
std
::
cerr
<<
"Only support FP32 or INT8."
<<
std
::
endl
;
exit
(
1
);
}
...
...
@@ -495,7 +484,7 @@ void det(int argc, char **argv) {
//// load config from txt file
auto
Config
=
LoadConfigTxt
(
det_config_path
);
auto
det_predictor
=
loadModel
(
det_model_file
,
power_mode
,
std
::
stoi
(
num_threads
));
auto
det_predictor
=
loadModel
(
det_model_file
,
std
::
stoi
(
num_threads
));
std
::
vector
<
double
>
time_info
=
{
0
,
0
,
0
};
for
(
int
i
=
0
;
i
<
cv_all_img_names
.
size
();
++
i
)
{
...
...
@@ -530,14 +519,11 @@ void det(int argc, char **argv) {
if
(
strcmp
(
argv
[
9
],
"True"
)
==
0
)
{
AutoLogger
autolog
(
det_model_file
,
0
,
0
,
0
,
runtime_device
,
std
::
stoi
(
num_threads
),
std
::
stoi
(
batchsize
),
"dynamic"
,
precision
,
power_mode
,
time_info
,
cv_all_img_names
.
size
());
autolog
.
report
();
...
...
@@ -546,14 +532,14 @@ void det(int argc, char **argv) {
void
rec
(
int
argc
,
char
**
argv
)
{
std
::
string
rec_model_file
=
argv
[
2
];
std
::
string
precision
=
argv
[
3
];
std
::
string
num_threads
=
argv
[
4
];
std
::
string
batchsize
=
argv
[
5
];
std
::
string
power_mod
e
=
argv
[
6
];
std
::
string
runtime_device
=
argv
[
3
];
std
::
string
precision
=
argv
[
4
];
std
::
string
num_threads
=
argv
[
5
];
std
::
string
batchsiz
e
=
argv
[
6
];
std
::
string
img_dir
=
argv
[
7
];
std
::
string
dict_path
=
argv
[
8
];
if
(
strcmp
(
argv
[
3
],
"FP32"
)
!=
0
&&
strcmp
(
argv
[
3
],
"INT8"
)
!=
0
)
{
if
(
strcmp
(
argv
[
4
],
"FP32"
)
!=
0
&&
strcmp
(
argv
[
4
],
"INT8"
)
!=
0
)
{
std
::
cerr
<<
"Only support FP32 or INT8."
<<
std
::
endl
;
exit
(
1
);
}
...
...
@@ -565,7 +551,7 @@ void rec(int argc, char **argv) {
charactor_dict
.
insert
(
charactor_dict
.
begin
(),
"#"
);
// blank char for ctc
charactor_dict
.
push_back
(
" "
);
auto
rec_predictor
=
loadModel
(
rec_model_file
,
power_mode
,
std
::
stoi
(
num_threads
));
auto
rec_predictor
=
loadModel
(
rec_model_file
,
std
::
stoi
(
num_threads
));
std
::
shared_ptr
<
PaddlePredictor
>
cls_predictor
;
...
...
@@ -603,14 +589,11 @@ void rec(int argc, char **argv) {
// TODO: support autolog
if
(
strcmp
(
argv
[
9
],
"True"
)
==
0
)
{
AutoLogger
autolog
(
rec_model_file
,
0
,
0
,
0
,
runtime_device
,
std
::
stoi
(
num_threads
),
std
::
stoi
(
batchsize
),
"dynamic"
,
precision
,
power_mode
,
time_info
,
cv_all_img_names
.
size
());
autolog
.
report
();
...
...
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
0 → 100644
浏览文件 @
d515dd51
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer
runtime_device:ARM_CPU
--cpu_threads:1|4
--det_batch_size:1
--rec_batch_size:1
--system_batch_size:1
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_metal_arm_gpu.txt
已删除
100644 → 0
浏览文件 @
08dcbba4
test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_opencl_arm_gpu.txt
已删除
100644 → 0
浏览文件 @
08dcbba4
test_tipc/docs/lite_auto_log.png
查看替换文件 @
08dcbba4
浏览文件 @
d515dd51
289.9 KB
|
W:
|
H:
209.8 KB
|
W:
|
H:
2-up
Swipe
Onion skin
test_tipc/docs/lite_log.png
查看替换文件 @
08dcbba4
浏览文件 @
d515dd51
775.5 KB
|
W:
|
H:
168.7 KB
|
W:
|
H:
2-up
Swipe
Onion skin
test_tipc/docs/test_lite.md
→
test_tipc/docs/test_lite
_arm_cpu_cpp
.md
浏览文件 @
d515dd51
# Lite预测功能测试
# Lite
\_arm\_cpu\_cpp
预测功能测试
Lite
预测功能测试的主程序为
`test_lite.sh`
,可以测试基于Lite预测库的模型
推理功能。
Lite
\_
arm
\_
cpu
\_
cpp预测功能测试的主程序为
`test_lite_arm_cpu_cpp.sh`
,可以在ARM CPU上基于Lite预测库测试模型C++
推理功能。
## 1. 测试结论汇总
目前Lite端的样本间支持以方式的组合:
**字段说明:**
-
输入设置:包括C++预测、python预测、java预测
-
模型类型:包括正常模型(FP32)和量化模型(FP16)
-
模型类型:包括正常模型(FP32)和量化模型(INT8)
-
batch-size:包括1和4
-
threads:包括1和4
-
predictor数量:包括多predictor预测和单predictor预测
-
功耗模式:包括高性能模式(LITE_POWER_HIGH)和省电模式(LITE_POWER_LOW)
-
预测库来源:包括下载方式和编译方式,其中编译方式分为以下目标硬件:(1)ARM CPU;(2)Linux XPU;(3)OpenCL GPU;(4)Metal GPU
-
预测库来源:包括下载方式和编译方式
| 模型类型 | batch-size |
predictor数量 | 功耗模式 | 预测库来源 | 支持语言
|
| :----: | :----:
| :----: |
:----: | :----: | :----: |
| 正常模型/量化模型 | 1 | 1
| 高性能模式/省电模式 | 下载方式 | C++预测
|
| 模型类型 | batch-size |
threads | predictor数量 | 预测库来源
|
| :----: | :----:
|
:----: | :----: | :----: |
| 正常模型/量化模型 | 1 | 1
/4 | 1 | 下载方式
|
## 2. 测试流程
...
...
@@ -24,15 +23,15 @@ Lite预测功能测试的主程序为`test_lite.sh`,可以测试基于Lite预
### 2.1 功能测试
先运行
`prepare
.sh`
准备数据和模型,模型和数据会打包到test_lite.tar中,将test_lite.tar上传到手机上,解压后进
`入test_lite`
目录中,然后运行
`test_lite
.sh`
进行测试,最终在
`test_lite/output`
目录下生成
`lite_*.log`
后缀的日志文件。
先运行
`prepare
_lite.sh`
,运行后会在当前路径下生成
`test_lite.tar`
,其中包含了测试数据、测试模型和用于预测的可执行文件。将
`test_lite.tar`
上传到被测试的手机上,在手机的终端解压该文件,进入
`test_lite`
目录中,然后运行
`test_lite_arm_cpu_cpp
.sh`
进行测试,最终在
`test_lite/output`
目录下生成
`lite_*.log`
后缀的日志文件。
```
shell
# 数据和模型准备
bash test_tipc/prepare
.sh ./test_tipc/configs/ppocr_det_mobile_params.txt
"lite_infer"
bash test_tipc/prepare
_lite.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt
# 手机端测试:
bash test_lite
.sh ppocr_det_mobile_params
.txt
bash test_lite
_arm_cpu_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu
.txt
```
...
...
@@ -44,7 +43,7 @@ bash test_lite.sh ppocr_det_mobile_params.txt
运行成功时会输出:
```
Run successfully with command - ./ocr_db_crnn det
./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb INT8 4 1 LITE_POWER_LOW ./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ./config.txt True > ./output/lite_ch_ppocr_mobile_v2.0_det_slim_opt.nb_precision_INT8_batchsize_1_threads_4_powermode_LITE_POWER_LOW_singleimg_True
.log 2>&1!
Run successfully with command - ./ocr_db_crnn det
ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1
.log 2>&1!
Run successfully with command xxx
...
```
...
...
@@ -52,7 +51,7 @@ Run successfully with command xxx
运行失败时会输出:
```
Run failed with command - ./ocr_db_crnn det
./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb INT8 4 1 LITE_POWER_LOW ./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ./config.txt True > ./output/lite_ch_ppocr_mobile_v2.0_det_slim_opt.nb_precision_INT8_batchsize_1_threads_4_powermode_LITE_POWER_LOW_singleimg_True
.log 2>&1!
Run failed with command - ./ocr_db_crnn det
ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1
.log 2>&1!
Run failed with command xxx
...
```
...
...
test_tipc/prepare.sh
浏览文件 @
d515dd51
...
...
@@ -3,7 +3,7 @@ FILENAME=$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',
# 'whole_infer', 'klquant_whole_infer',
# 'cpp_infer', 'serving_infer'
, 'lite_infer'
]
# 'cpp_infer', 'serving_infer']
MODE
=
$2
...
...
@@ -34,7 +34,7 @@ trainer_list=$(func_parser_value "${lines[14]}")
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer',
# 'whole_infer', 'klquant_whole_infer',
# 'cpp_infer', 'serving_infer'
, 'lite_infer'
]
# 'cpp_infer', 'serving_infer']
MODE
=
$2
if
[
${
MODE
}
=
"lite_train_lite_infer"
]
;
then
...
...
@@ -169,40 +169,6 @@ if [ ${MODE} = "serving_infer" ];then
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
&&
tar
xf ch_ppocr_server_v2.0_rec_infer.tar
&&
tar
xf ch_ppocr_server_v2.0_det_infer.tar
&&
cd
../
fi
if
[
${
MODE
}
=
"lite_infer"
]
;
then
# prepare lite nb model and test data
current_dir
=
${
PWD
}
wget
-nc
-P
./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_opt.nb
wget
-nc
-P
./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_slim_opt.nb
wget
-nc
-P
./test_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
cd
./test_data
&&
tar
-xf
icdar2015_lite.tar
&&
rm
icdar2015_lite.tar
&&
cd
../
# prepare lite env
export
http_proxy
=
http://172.19.57.45:3128
export
https_proxy
=
http://172.19.57.45:3128
paddlelite_url
=
https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
paddlelite_zipfile
=
$(
echo
$paddlelite_url
|
awk
-F
"/"
'{print $NF}'
)
paddlelite_file
=
${
paddlelite_zipfile
:0:66
}
wget
${
paddlelite_url
}
tar
-xf
${
paddlelite_zipfile
}
mkdir
-p
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
mv
models test_data
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
./deploy/lite/
*
${
paddlelite_file
}
/demo/cxx/ocr/
cp
${
paddlelite_file
}
/cxx/lib/libpaddle_light_api_shared.so
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
test_tipc/configs/ppocr_det_mobile_params.txt test_tipc/test_lite.sh test_tipc/common_func.sh
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cd
${
paddlelite_file
}
/demo/cxx/ocr/
git clone https://github.com/LDOUBLEV/AutoLog.git
unset
http_proxy
unset
https_proxy
make
-j
sleep
1
make
-j
cp
ocr_db_crnn test_lite
&&
cp
test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
tar
-cf
test_lite.tar ./test_lite
&&
cp
test_lite.tar
${
current_dir
}
&&
cd
${
current_dir
}
fi
if
[
${
MODE
}
=
"paddle2onnx_infer"
]
;
then
# prepare serving env
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
...
...
test_tipc/prepare_lite.sh
0 → 100644
浏览文件 @
d515dd51
#!/bin/bash
source
./test_tipc/common_func.sh
FILENAME
=
$1
dataline
=
$(
cat
${
FILENAME
}
)
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
IFS
=
$'
\n
'
lite_model_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
# prepare lite .nb model
pip
install
paddlelite
==
2.9
current_dir
=
${
PWD
}
IFS
=
"|"
model_path
=
./inference_models
for
model
in
${
lite_model_list
[*]
}
;
do
inference_model_url
=
https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/
${
model
}
.tar
inference_model
=
${
inference_model_url
##*/
}
wget
-nc
-P
${
model_path
}
${
inference_model_url
}
cd
${
model_path
}
&&
tar
-xf
${
inference_model
}
&&
cd
../
model_dir
=
${
model_path
}
/
${
inference_model
%.*
}
model_file
=
${
model_dir
}
/inference.pdmodel
param_file
=
${
model_dir
}
/inference.pdiparams
paddle_lite_opt
--model_dir
=
${
model_dir
}
--model_file
=
${
model_file
}
--param_file
=
${
param_file
}
--valid_targets
=
arm
--optimize_out
=
${
model_dir
}
_opt
echo
"paddle_lite_opt --model_dir=
${
model_dir
}
--model_file=
${
model_file
}
--param_file=
${
param_file
}
--valid_targets=arm --optimize_out=
${
model_dir
}
_opt"
done
# prepare test data
data_url
=
https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
model_path
=
./inference_models
inference_model
=
${
inference_model_url
##*/
}
data_file
=
${
data_url
##*/
}
wget
-nc
-P
./inference_models
${
inference_model_url
}
wget
-nc
-P
./test_data
${
data_url
}
cd
./inference_models
&&
tar
-xf
${
inference_model
}
&&
cd
../
cd
./test_data
&&
tar
-xf
${
data_file
}
&&
rm
${
data_file
}
&&
cd
../
# prepare lite env
paddlelite_url
=
https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
paddlelite_zipfile
=
$(
echo
$paddlelite_url
|
awk
-F
"/"
'{print $NF}'
)
paddlelite_file
=
${
paddlelite_zipfile
:0:66
}
wget
${
paddlelite_url
}
&&
tar
-xf
${
paddlelite_zipfile
}
mkdir
-p
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
-r
${
model_path
}
/
*
_opt.nb test_data
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
-r
./deploy/lite/
*
${
paddlelite_file
}
/demo/cxx/ocr/
cp
${
paddlelite_file
}
/cxx/lib/libpaddle_light_api_shared.so
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cp
${
FILENAME
}
test_tipc/test_lite_arm_cpu_cpp.sh test_tipc/common_func.sh
${
paddlelite_file
}
/demo/cxx/ocr/test_lite
cd
${
paddlelite_file
}
/demo/cxx/ocr/
git clone https://github.com/cuicheng01/AutoLog.git
make
-j
sleep
1
make
-j
cp
ocr_db_crnn test_lite
&&
cp
test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
tar
-cf
test_lite.tar ./test_lite
&&
cp
test_lite.tar
${
current_dir
}
&&
cd
${
current_dir
}
rm
-rf
${
paddlelite_file
}*
&&
rm
-rf
${
model_path
}
test_tipc/test_lite.sh
→
test_tipc/test_lite
_arm_cpu_cpp
.sh
浏览文件 @
d515dd51
...
...
@@ -3,8 +3,7 @@ source ./common_func.sh
export
LD_LIBRARY_PATH
=
${
PWD
}
:
$LD_LIBRARY_PATH
FILENAME
=
$1
dataline
=
$(
awk
'NR==102, NR==111{print}'
$FILENAME
)
echo
$dataline
dataline
=
$(
cat
$FILENAME
)
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
...
...
@@ -12,13 +11,14 @@ lines=(${dataline})
# parser lite inference
lite_inference_cmd
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
lite_model_dir_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
lite_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
lite_batch_size_list
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
lite_power_mode_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
lite_infer_img_dir_list
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
lite_config_dir
=
$(
func_parser_value
"
${
lines
[7]
}
"
)
lite_rec_dict_dir
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
lite_benchmark_value
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
runtime_device
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
lite_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[4]
}
"
)
lite_batch_size_list
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
lite_infer_img_dir_list
=
$(
func_parser_value
"
${
lines
[8]
}
"
)
lite_config_dir
=
$(
func_parser_value
"
${
lines
[9]
}
"
)
lite_rec_dict_dir
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
lite_benchmark_value
=
$(
func_parser_value
"
${
lines
[11]
}
"
)
LOG_PATH
=
"./output"
mkdir
-p
${
LOG_PATH
}
...
...
@@ -37,23 +37,14 @@ function func_lite(){
else
precision
=
"FP32"
fi
is_single_img
=
$(
echo
$_img_dir
|
grep
-E
".jpg|.jpeg|.png|.JPEG|.JPG"
)
if
[[
"
$is_single_img
"
!=
""
]]
;
then
single_img
=
"True"
else
single_img
=
"False"
fi
# lite inference
for
num_threads
in
${
lite_cpu_threads_list
[*]
}
;
do
for
power_mode
in
${
lite_power_mode_list
[*]
}
;
do
for
batchsize
in
${
lite_batch_size_list
[*]
}
;
do
model_name
=
$(
echo
$lite_model
|
awk
-F
"/"
'{print $NF}'
)
_save_log_path
=
"
${
_log_path
}
/lite_
${
model_name
}
_precision_
${
precision
}
_batchsize_
${
batchsize
}
_threads_
${
num_threads
}
_powermode_
${
power_mode
}
_singleimg_
${
single_img
}
.log"
command
=
"
${
_script
}
${
lite_model
}
${
precision
}
${
num_threads
}
${
batchsize
}
${
power_mode
}
${
_img_dir
}
${
_config
}
${
lite_benchmark_value
}
>
${
_save_log_path
}
2>&1"
eval
${
command
}
status_check
$?
"
${
command
}
"
"
${
status_log
}
"
done
for
batchsize
in
${
lite_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/lite_
${
_lite_model
}
_runtime_device_
${
runtime_device
}
_precision_
${
precision
}
_batchsize_
${
batchsize
}
_threads_
${
num_threads
}
.log"
command
=
"
${
_script
}
${
_lite_model
}
${
runtime_device
}
${
precision
}
${
num_threads
}
${
batchsize
}
${
_img_dir
}
${
_config
}
${
lite_benchmark_value
}
>
${
_save_log_path
}
2>&1"
eval
${
command
}
status_check
$?
"
${
command
}
"
"
${
status_log
}
"
done
done
}
...
...
@@ -64,6 +55,6 @@ IFS="|"
for
lite_model
in
${
lite_model_dir_list
[*]
}
;
do
#run lite inference
for
img_dir
in
${
lite_infer_img_dir_list
[*]
}
;
do
func_lite
"
${
lite_inference_cmd
}
"
"
${
lite_model
}
"
"
${
LOG_PATH
}
"
"
${
img_dir
}
"
"
${
lite_config_dir
}
"
func_lite
"
${
lite_inference_cmd
}
"
"
${
lite_model
}
_opt.nb
"
"
${
LOG_PATH
}
"
"
${
img_dir
}
"
"
${
lite_config_dir
}
"
done
done
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录