Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
08166b83
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1525
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
08166b83
编写于
8月 19, 2021
作者:
M
MissPenguin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add cpp_infer for cice
上级
25de5bec
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
232 addition
and
18 deletion
+232
-18
deploy/cpp_infer/src/main.cpp
deploy/cpp_infer/src/main.cpp
+15
-14
tests/ocr_det_params.txt
tests/ocr_det_params.txt
+14
-0
tests/prepare.sh
tests/prepare.sh
+89
-3
tests/test.sh
tests/test.sh
+114
-1
未找到文件。
deploy/cpp_infer/src/main.cpp
浏览文件 @
08166b83
...
@@ -39,8 +39,8 @@
...
@@ -39,8 +39,8 @@
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU."
);
DEFINE_bool
(
use_gpu
,
false
,
"Infering with GPU or CPU."
);
DEFINE_int32
(
gpu_id
,
0
,
"Device id of GPU to execute."
);
DEFINE_int32
(
gpu_id
,
0
,
"Device id of GPU to execute."
);
DEFINE_int32
(
gpu_mem
,
4000
,
"GPU id when infering with GPU."
);
DEFINE_int32
(
gpu_mem
,
4000
,
"GPU id when infering with GPU."
);
DEFINE_int32
(
cpu_
math_library_num_
threads
,
10
,
"Num of threads with CPU."
);
DEFINE_int32
(
cpu_threads
,
10
,
"Num of threads with CPU."
);
DEFINE_bool
(
us
e_mkldnn
,
false
,
"Whether use mkldnn with CPU."
);
DEFINE_bool
(
enabl
e_mkldnn
,
false
,
"Whether use mkldnn with CPU."
);
DEFINE_bool
(
use_tensorrt
,
false
,
"Whether use tensorrt."
);
DEFINE_bool
(
use_tensorrt
,
false
,
"Whether use tensorrt."
);
DEFINE_string
(
precision
,
"fp32"
,
"Precision be one of fp32/fp16/int8"
);
DEFINE_string
(
precision
,
"fp32"
,
"Precision be one of fp32/fp16/int8"
);
DEFINE_bool
(
benchmark
,
true
,
"Whether use benchmark."
);
DEFINE_bool
(
benchmark
,
true
,
"Whether use benchmark."
);
...
@@ -60,6 +60,7 @@ DEFINE_string(cls_model_dir, "", "Path of cls inference model.");
...
@@ -60,6 +60,7 @@ DEFINE_string(cls_model_dir, "", "Path of cls inference model.");
DEFINE_double
(
cls_thresh
,
0.9
,
"Threshold of cls_thresh."
);
DEFINE_double
(
cls_thresh
,
0.9
,
"Threshold of cls_thresh."
);
// recognition related
// recognition related
DEFINE_string
(
rec_model_dir
,
""
,
"Path of rec inference model."
);
DEFINE_string
(
rec_model_dir
,
""
,
"Path of rec inference model."
);
DEFINE_int32
(
rec_batch_num
,
1
,
"rec_batch_num."
);
DEFINE_string
(
char_list_file
,
"../../ppocr/utils/ppocr_keys_v1.txt"
,
"Path of dictionary."
);
DEFINE_string
(
char_list_file
,
"../../ppocr/utils/ppocr_keys_v1.txt"
,
"Path of dictionary."
);
...
@@ -78,8 +79,8 @@ void PrintBenchmarkLog(std::string model_name,
...
@@ -78,8 +79,8 @@ void PrintBenchmarkLog(std::string model_name,
LOG
(
INFO
)
<<
"ir_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"ir_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_memory_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_memory_optim: "
<<
"True"
;
LOG
(
INFO
)
<<
"enable_tensorrt: "
<<
FLAGS_use_tensorrt
;
LOG
(
INFO
)
<<
"enable_tensorrt: "
<<
FLAGS_use_tensorrt
;
LOG
(
INFO
)
<<
"enable_mkldnn: "
<<
(
FLAGS_
us
e_mkldnn
?
"True"
:
"False"
);
LOG
(
INFO
)
<<
"enable_mkldnn: "
<<
(
FLAGS_
enabl
e_mkldnn
?
"True"
:
"False"
);
LOG
(
INFO
)
<<
"cpu_math_library_num_threads: "
<<
FLAGS_cpu_
math_library_num_
threads
;
LOG
(
INFO
)
<<
"cpu_math_library_num_threads: "
<<
FLAGS_cpu_threads
;
LOG
(
INFO
)
<<
"----------------------- Data info -----------------------"
;
LOG
(
INFO
)
<<
"----------------------- Data info -----------------------"
;
LOG
(
INFO
)
<<
"batch_size: "
<<
batch_size
;
LOG
(
INFO
)
<<
"batch_size: "
<<
batch_size
;
LOG
(
INFO
)
<<
"input_shape: "
<<
input_shape
;
LOG
(
INFO
)
<<
"input_shape: "
<<
input_shape
;
...
@@ -110,8 +111,8 @@ static bool PathExists(const std::string& path){
...
@@ -110,8 +111,8 @@ static bool PathExists(const std::string& path){
int
main_det
(
std
::
vector
<
cv
::
String
>
cv_all_img_names
)
{
int
main_det
(
std
::
vector
<
cv
::
String
>
cv_all_img_names
)
{
std
::
vector
<
double
>
time_info
=
{
0
,
0
,
0
};
std
::
vector
<
double
>
time_info
=
{
0
,
0
,
0
};
DBDetector
det
(
FLAGS_det_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
DBDetector
det
(
FLAGS_det_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
FLAGS_gpu_mem
,
FLAGS_cpu_
math_library_num_
threads
,
FLAGS_gpu_mem
,
FLAGS_cpu_threads
,
FLAGS_
us
e_mkldnn
,
FLAGS_max_side_len
,
FLAGS_det_db_thresh
,
FLAGS_
enabl
e_mkldnn
,
FLAGS_max_side_len
,
FLAGS_det_db_thresh
,
FLAGS_det_db_box_thresh
,
FLAGS_det_db_unclip_ratio
,
FLAGS_det_db_box_thresh
,
FLAGS_det_db_unclip_ratio
,
FLAGS_use_polygon_score
,
FLAGS_visualize
,
FLAGS_use_polygon_score
,
FLAGS_visualize
,
FLAGS_use_tensorrt
,
FLAGS_precision
);
FLAGS_use_tensorrt
,
FLAGS_precision
);
...
@@ -144,8 +145,8 @@ int main_det(std::vector<cv::String> cv_all_img_names) {
...
@@ -144,8 +145,8 @@ int main_det(std::vector<cv::String> cv_all_img_names) {
int
main_rec
(
std
::
vector
<
cv
::
String
>
cv_all_img_names
)
{
int
main_rec
(
std
::
vector
<
cv
::
String
>
cv_all_img_names
)
{
std
::
vector
<
double
>
time_info
=
{
0
,
0
,
0
};
std
::
vector
<
double
>
time_info
=
{
0
,
0
,
0
};
CRNNRecognizer
rec
(
FLAGS_rec_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
CRNNRecognizer
rec
(
FLAGS_rec_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
FLAGS_gpu_mem
,
FLAGS_cpu_
math_library_num_
threads
,
FLAGS_gpu_mem
,
FLAGS_cpu_threads
,
FLAGS_
us
e_mkldnn
,
FLAGS_char_list_file
,
FLAGS_
enabl
e_mkldnn
,
FLAGS_char_list_file
,
FLAGS_use_tensorrt
,
FLAGS_precision
);
FLAGS_use_tensorrt
,
FLAGS_precision
);
for
(
int
i
=
0
;
i
<
cv_all_img_names
.
size
();
++
i
)
{
for
(
int
i
=
0
;
i
<
cv_all_img_names
.
size
();
++
i
)
{
...
@@ -175,8 +176,8 @@ int main_rec(std::vector<cv::String> cv_all_img_names) {
...
@@ -175,8 +176,8 @@ int main_rec(std::vector<cv::String> cv_all_img_names) {
int
main_system
(
std
::
vector
<
cv
::
String
>
cv_all_img_names
)
{
int
main_system
(
std
::
vector
<
cv
::
String
>
cv_all_img_names
)
{
DBDetector
det
(
FLAGS_det_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
DBDetector
det
(
FLAGS_det_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
FLAGS_gpu_mem
,
FLAGS_cpu_
math_library_num_
threads
,
FLAGS_gpu_mem
,
FLAGS_cpu_threads
,
FLAGS_
us
e_mkldnn
,
FLAGS_max_side_len
,
FLAGS_det_db_thresh
,
FLAGS_
enabl
e_mkldnn
,
FLAGS_max_side_len
,
FLAGS_det_db_thresh
,
FLAGS_det_db_box_thresh
,
FLAGS_det_db_unclip_ratio
,
FLAGS_det_db_box_thresh
,
FLAGS_det_db_unclip_ratio
,
FLAGS_use_polygon_score
,
FLAGS_visualize
,
FLAGS_use_polygon_score
,
FLAGS_visualize
,
FLAGS_use_tensorrt
,
FLAGS_precision
);
FLAGS_use_tensorrt
,
FLAGS_precision
);
...
@@ -184,14 +185,14 @@ int main_system(std::vector<cv::String> cv_all_img_names) {
...
@@ -184,14 +185,14 @@ int main_system(std::vector<cv::String> cv_all_img_names) {
Classifier
*
cls
=
nullptr
;
Classifier
*
cls
=
nullptr
;
if
(
FLAGS_use_angle_cls
)
{
if
(
FLAGS_use_angle_cls
)
{
cls
=
new
Classifier
(
FLAGS_cls_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
cls
=
new
Classifier
(
FLAGS_cls_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
FLAGS_gpu_mem
,
FLAGS_cpu_
math_library_num_
threads
,
FLAGS_gpu_mem
,
FLAGS_cpu_threads
,
FLAGS_
us
e_mkldnn
,
FLAGS_cls_thresh
,
FLAGS_
enabl
e_mkldnn
,
FLAGS_cls_thresh
,
FLAGS_use_tensorrt
,
FLAGS_precision
);
FLAGS_use_tensorrt
,
FLAGS_precision
);
}
}
CRNNRecognizer
rec
(
FLAGS_rec_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
CRNNRecognizer
rec
(
FLAGS_rec_model_dir
,
FLAGS_use_gpu
,
FLAGS_gpu_id
,
FLAGS_gpu_mem
,
FLAGS_cpu_
math_library_num_
threads
,
FLAGS_gpu_mem
,
FLAGS_cpu_threads
,
FLAGS_
us
e_mkldnn
,
FLAGS_char_list_file
,
FLAGS_
enabl
e_mkldnn
,
FLAGS_char_list_file
,
FLAGS_use_tensorrt
,
FLAGS_precision
);
FLAGS_use_tensorrt
,
FLAGS_precision
);
auto
start
=
std
::
chrono
::
system_clock
::
now
();
auto
start
=
std
::
chrono
::
system_clock
::
now
();
...
...
tests/ocr_det_params.txt
浏览文件 @
08166b83
...
@@ -49,4 +49,18 @@ inference:tools/infer/predict_det.py
...
@@ -49,4 +49,18 @@ inference:tools/infer/predict_det.py
--save_log_path:null
--save_log_path:null
--benchmark:True
--benchmark:True
null:null
null:null
===========================cpp_infer_params===========================
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
infer_quant:False
inference:./deploy/cpp_infer/build/ppocr det
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--use_tensorrt:False|True
--precision:fp32|fp16
--det_model_dir:
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null
--benchmark:True
tests/prepare.sh
浏览文件 @
08166b83
#!/bin/bash
#!/bin/bash
FILENAME
=
$1
FILENAME
=
$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'
, 'cpp_infer'
]
MODE
=
$2
MODE
=
$2
dataline
=
$(
cat
${
FILENAME
}
)
dataline
=
$(
cat
${
FILENAME
}
)
...
@@ -58,11 +58,11 @@ elif [ ${MODE} = "whole_infer" ];then
...
@@ -58,11 +58,11 @@ elif [ ${MODE} = "whole_infer" ];then
cd
./train_data/
&&
tar
xf icdar2015_infer.tar
&&
tar
xf ic15_data.tar
cd
./train_data/
&&
tar
xf icdar2015_infer.tar
&&
tar
xf ic15_data.tar
ln
-s
./icdar2015_infer ./icdar2015
ln
-s
./icdar2015_infer ./icdar2015
cd
../
cd
../
el
se
el
if
[
${
MODE
}
=
"infer"
]
||
[
${
MODE
}
=
"cpp_infer"
]
;
then
if
[
${
model_name
}
=
"ocr_det"
]
;
then
if
[
${
model_name
}
=
"ocr_det"
]
;
then
eval_model_name
=
"ch_ppocr_mobile_v2.0_det_infer"
eval_model_name
=
"ch_ppocr_mobile_v2.0_det_infer"
rm
-rf
./train_data/icdar2015
rm
-rf
./train_data/icdar2015
wget
-nc
-P
./
train_data
https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget
-nc
-P
./
inference
https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf ch_det_data_50.tar
&&
cd
../
else
else
...
@@ -74,3 +74,89 @@ else
...
@@ -74,3 +74,89 @@ else
fi
fi
fi
fi
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
################### build opencv ###################
cd
deploy/cpp_infer
rm
-rf
3.4.7.tar.gz opencv-3.4.7/
wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
tar
-xf
3.4.7.tar.gz
cd
opencv-3.4.7/
install_path
=
$(
pwd
)
/opencv-3.4.7/opencv3
rm
-rf
build
mkdir
build
cd
build
cmake ..
\
-DCMAKE_INSTALL_PREFIX
=
${
install_path
}
\
-DCMAKE_BUILD_TYPE
=
Release
\
-DBUILD_SHARED_LIBS
=
OFF
\
-DWITH_IPP
=
OFF
\
-DBUILD_IPP_IW
=
OFF
\
-DWITH_LAPACK
=
OFF
\
-DWITH_EIGEN
=
OFF
\
-DCMAKE_INSTALL_LIBDIR
=
lib64
\
-DWITH_ZLIB
=
ON
\
-DBUILD_ZLIB
=
ON
\
-DWITH_JPEG
=
ON
\
-DBUILD_JPEG
=
ON
\
-DWITH_PNG
=
ON
\
-DBUILD_PNG
=
ON
\
-DWITH_TIFF
=
ON
\
-DBUILD_TIFF
=
ON
make
-j
make
install
cd
../
################### build opencv finished ###################
# ################### build paddle inference ###################
# rm -rf Paddle
# git clone https://github.com/PaddlePaddle/Paddle.git
# cd Paddle
# rm -rf build
# mkdir build
# cd build
# cmake .. \
# -DWITH_CONTRIB=OFF \
# -DWITH_MKL=ON \
# -DWITH_MKLDNN=ON \
# -DWITH_TESTING=OFF \
# -DCMAKE_BUILD_TYPE=Release \
# -DWITH_INFERENCE_API_TEST=OFF \
# -DON_INFER=ON \
# -DWITH_PYTHON=ON
# make -j
# make inference_lib_dist
# cd ../
# ################### build paddle inference finished ###################
################### build PaddleOCR demo ###################
OPENCV_DIR
=
$(
pwd
)
/opencv-3.4.7/opencv3/
LIB_DIR
=
$(
pwd
)
/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR
=
/usr/local/cuda/lib64/
CUDNN_LIB_DIR
=
/usr/lib/x86_64-linux-gnu/
BUILD_DIR
=
build
rm
-rf
${
BUILD_DIR
}
mkdir
${
BUILD_DIR
}
cd
${
BUILD_DIR
}
cmake ..
\
-DPADDLE_LIB
=
${
LIB_DIR
}
\
-DWITH_MKL
=
ON
\
-DWITH_GPU
=
OFF
\
-DWITH_STATIC_LIB
=
OFF
\
-DWITH_TENSORRT
=
OFF
\
-DOPENCV_DIR
=
${
OPENCV_DIR
}
\
-DCUDNN_LIB
=
${
CUDNN_LIB_DIR
}
\
-DCUDA_LIB
=
${
CUDA_LIB_DIR
}
\
-DTENSORRT_DIR
=
${
TENSORRT_DIR
}
\
make
-j
################### build PaddleOCR demo finished ###################
fi
\ No newline at end of file
tests/test.sh
浏览文件 @
08166b83
#!/bin/bash
#!/bin/bash
FILENAME
=
$1
FILENAME
=
$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'
, 'cpp_infer'
]
MODE
=
$2
MODE
=
$2
dataline
=
$(
cat
${
FILENAME
}
)
dataline
=
$(
cat
${
FILENAME
}
)
...
@@ -145,6 +145,35 @@ benchmark_value=$(func_parser_value "${lines[49]}")
...
@@ -145,6 +145,35 @@ benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
# parser cpp inference model
cpp_infer_model_dir_list
=
$(
func_parser_value
"
${
lines
[52]
}
"
)
cpp_infer_is_quant
=
$(
func_parser_value
"
${
lines
[53]
}
"
)
# parser cpp inference
inference_cmd
=
$(
func_parser_value
"
${
lines
[54]
}
"
)
cpp_use_gpu_key
=
$(
func_parser_key
"
${
lines
[55]
}
"
)
cpp_use_gpu_list
=
$(
func_parser_value
"
${
lines
[55]
}
"
)
cpp_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[56]
}
"
)
cpp_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[56]
}
"
)
cpp_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[57]
}
"
)
cpp_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[57]
}
"
)
cpp_batch_size_key
=
$(
func_parser_key
"
${
lines
[58]
}
"
)
cpp_batch_size_list
=
$(
func_parser_value
"
${
lines
[58]
}
"
)
cpp_use_trt_key
=
$(
func_parser_key
"
${
lines
[59]
}
"
)
cpp_use_trt_list
=
$(
func_parser_value
"
${
lines
[59]
}
"
)
cpp_precision_key
=
$(
func_parser_key
"
${
lines
[60]
}
"
)
cpp_precision_list
=
$(
func_parser_value
"
${
lines
[60]
}
"
)
cpp_infer_model_key
=
$(
func_parser_key
"
${
lines
[61]
}
"
)
cpp_image_dir_key
=
$(
func_parser_key
"
${
lines
[62]
}
"
)
cpp_infer_img_dir
=
$(
func_parser_value
"
${
lines
[62]
}
"
)
cpp_save_log_key
=
$(
func_parser_key
"
${
lines
[63]
}
"
)
cpp_benchmark_key
=
$(
func_parser_key
"
${
lines
[64]
}
"
)
cpp_benchmark_value
=
$(
func_parser_value
"
${
lines
[64]
}
"
)
echo
$inference_cmd
echo
$cpp_cpu_threads_key
$cpp_cpu_threads_list
echo
$cpp_precision_key
$cpp_precision_list
echo
$cpp_benchmark_key
$cpp_benchmark_value
LOG_PATH
=
"./tests/output"
LOG_PATH
=
"./tests/output"
mkdir
-p
${
LOG_PATH
}
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results.log"
status_log
=
"
${
LOG_PATH
}
/results.log"
...
@@ -218,6 +247,71 @@ function func_inference(){
...
@@ -218,6 +247,71 @@ function func_inference(){
done
done
}
}
function
func_cpp_inference
(){
IFS
=
'|'
_script
=
$1
_model_dir
=
$2
_log_path
=
$3
_img_dir
=
$4
_flag_quant
=
$5
# inference
for
use_gpu
in
${
cpp_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
for
use_mkldnn
in
${
cpp_use_mkldnn_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
for
threads
in
${
cpp_cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
cpp_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
cpp_image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
cpp_benchmark_key
}
"
"
${
cpp_benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
cpp_batch_size_key
}
"
"
${
batch_size
}
"
)
set_cpu_threads
=
$(
func_set_params
"
${
cpp_cpu_threads_key
}
"
"
${
threads
}
"
)
set_model_dir
=
$(
func_set_params
"
${
cpp_infer_model_key
}
"
"
${
_model_dir
}
"
)
command
=
"
${
_script
}
${
cpp_use_gpu_key
}
=
${
use_gpu
}
${
cpp_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
done
done
done
elif
[
${
use_gpu
}
=
"True"
]
||
[
${
use_gpu
}
=
"gpu"
]
;
then
for
use_trt
in
${
cpp_use_trt_list
[*]
}
;
do
for
precision
in
${
cpp_precision_list
[*]
}
;
do
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"False"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
for
batch_size
in
${
cpp_batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/cpp_infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
cpp_image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
cpp_benchmark_key
}
"
"
${
cpp_benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
cpp_batch_size_key
}
"
"
${
batch_size
}
"
)
set_tensorrt
=
$(
func_set_params
"
${
cpp_use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
cpp_precision_key
}
"
"
${
precision
}
"
)
set_model_dir
=
$(
func_set_params
"
${
cpp_infer_model_key
}
"
"
${
_model_dir
}
"
)
command
=
"
${
_script
}
${
cpp_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
${
set_model_dir
}
${
set_batchsize
}
${
set_infer_data
}
${
set_benchmark
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
done
done
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if
[
${
MODE
}
=
"infer"
]
;
then
if
[
${
MODE
}
=
"infer"
]
;
then
GPUID
=
$3
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
if
[
${#
GPUID
}
-le
0
]
;
then
...
@@ -252,6 +346,25 @@ if [ ${MODE} = "infer" ]; then
...
@@ -252,6 +346,25 @@ if [ ${MODE} = "infer" ]; then
Count
=
$((
$Count
+
1
))
Count
=
$((
$Count
+
1
))
done
done
elif
[
${
MODE
}
=
"cpp_infer"
]
;
then
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
env
=
" "
else
env
=
"export CUDA_VISIBLE_DEVICES=
${
GPUID
}
"
fi
# set CUDA_VISIBLE_DEVICES
eval
$env
export
Count
=
0
IFS
=
"|"
infer_quant_flag
=(
${
cpp_infer_is_quant
}
)
for
infer_model
in
${
cpp_infer_model_dir_list
[*]
}
;
do
#run inference
is_quant
=
${
infer_quant_flag
[Count]
}
func_cpp_inference
"
${
inference_cmd
}
"
"
${
infer_model
}
"
"
${
LOG_PATH
}
"
"
${
cpp_infer_img_dir
}
"
${
is_quant
}
Count
=
$((
$Count
+
1
))
done
else
else
IFS
=
"|"
IFS
=
"|"
export
Count
=
0
export
Count
=
0
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录