Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
eb22ce42
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
eb22ce42
编写于
10月 17, 2021
作者:
L
LDOUBLEV
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix kl quant
上级
08f25ca4
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
11 addition
and
10 deletion
+11
-10
tests/configs/ppocr_det_mobile_params.txt
tests/configs/ppocr_det_mobile_params.txt
+3
-3
tests/test_cpp.sh
tests/test_cpp.sh
+1
-1
tests/test_python.sh
tests/test_python.sh
+7
-6
未找到文件。
tests/configs/ppocr_det_mobile_params.txt
浏览文件 @
eb22ce42
...
@@ -82,14 +82,14 @@ pipline:pipeline_http_client.py --image_dir=../../doc/imgs
...
@@ -82,14 +82,14 @@ pipline:pipeline_http_client.py --image_dir=../../doc/imgs
===========================kl_quant_params===========================
===========================kl_quant_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
infer_quant:
Fals
e
infer_quant:
Tru
e
inference:tools/infer/predict_det.py
inference:tools/infer/predict_det.py
--use_gpu:True|False
--use_gpu:True|False
--enable_mkldnn:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--cpu_threads:1|6
--rec_batch_num:1
--rec_batch_num:1
--use_tensorrt:False|True
--use_tensorrt:False|True
--precision:
fp32|fp16|
int8
--precision:int8
--det_model_dir:
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--image_dir:./inference/ch_det_data_50/all-sum-510/
null:null
null:null
...
...
tests/test_cpp.sh
浏览文件 @
eb22ce42
...
@@ -57,7 +57,7 @@ function func_cpp_inference(){
...
@@ -57,7 +57,7 @@ function func_cpp_inference(){
for
threads
in
${
cpp_cpu_threads_list
[*]
}
;
do
for
threads
in
${
cpp_cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
cpp_batch_size_list
[*]
}
;
do
for
batch_size
in
${
cpp_batch_size_list
[*]
}
;
do
precision
=
"fp32"
precision
=
"fp32"
if
[
${
_flag_quant
}
=
"True"
]
;
then
if
[
${
use_mkldnn
}
=
"False"
]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
precison
=
"int8"
precison
=
"int8"
fi
fi
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
...
...
tests/test_python.sh
浏览文件 @
eb22ce42
...
@@ -5,11 +5,7 @@ FILENAME=$1
...
@@ -5,11 +5,7 @@ FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'klquant_infer']
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'klquant_infer']
MODE
=
$2
MODE
=
$2
if
[
${
MODE
}
=
"klquant_infer"
]
;
then
dataline
=
$(
awk
'NR==1, NR==51{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==82, NR==98{print}'
$FILENAME
)
else
dataline
=
$(
awk
'NR==1, NR==51{print}'
$FILENAME
)
fi
# parser params
# parser params
IFS
=
$'
\n
'
IFS
=
$'
\n
'
...
@@ -93,6 +89,8 @@ infer_value1=$(func_parser_value "${lines[50]}")
...
@@ -93,6 +89,8 @@ infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer
# parser klquant_infer
if
[
${
MODE
}
=
"klquant_infer"
]
;
then
if
[
${
MODE
}
=
"klquant_infer"
]
;
then
dataline
=
$(
awk
'NR==82, NR==98{print}'
$FILENAME
)
lines
=(
${
dataline
}
)
# parser inference model
# parser inference model
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
infer_export_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
infer_export_list
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
...
@@ -144,7 +142,7 @@ function func_inference(){
...
@@ -144,7 +142,7 @@ function func_inference(){
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
precison
=
"fp32"
precison
=
"fp32"
if
[
${
_flag_quant
}
=
"True"
]
;
then
if
[
${
use_mkldnn
}
=
"False"
]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
precision
=
"int8"
precision
=
"int8"
fi
fi
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
...
@@ -228,6 +226,9 @@ if [ ${MODE} = "infer" ] || [ ${MODE} = "klquant_infer" ]; then
...
@@ -228,6 +226,9 @@ if [ ${MODE} = "infer" ] || [ ${MODE} = "klquant_infer" ]; then
fi
fi
#run inference
#run inference
is_quant
=
${
infer_quant_flag
[Count]
}
is_quant
=
${
infer_quant_flag
[Count]
}
if
[
${
MODE
}
=
"klquant_infer"
]
;
then
is_quant
=
"True"
fi
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
Count
=
$((
$Count
+
1
))
Count
=
$((
$Count
+
1
))
done
done
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录