Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
3c7f6e4d
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3c7f6e4d
编写于
8月 31, 2021
作者:
D
dongshuilong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add cpp whole_chain test
上级
ec5e07da
变更
22
隐藏空白更改
内联
并排
Showing
22 changed file
with
250 addition
and
24 deletion
+250
-24
deploy/cpp/CMakeLists.txt
deploy/cpp/CMakeLists.txt
+5
-1
deploy/cpp/external-cmake/auto-log.cmake
deploy/cpp/external-cmake/auto-log.cmake
+12
-0
deploy/cpp/include/cls.h
deploy/cpp/include/cls.h
+1
-1
deploy/cpp/include/cls_config.h
deploy/cpp/include/cls_config.h
+6
-4
deploy/cpp/src/cls.cpp
deploy/cpp/src/cls.cpp
+20
-10
deploy/cpp/src/main.cpp
deploy/cpp/src/main.cpp
+16
-3
deploy/cpp/tools/build.sh
deploy/cpp/tools/build.sh
+2
-2
deploy/cpp/tools/config.txt
deploy/cpp/tools/config.txt
+4
-1
tests/config/DarkNet53.txt
tests/config/DarkNet53.txt
+0
-0
tests/config/HRNet_W18_C.txt
tests/config/HRNet_W18_C.txt
+0
-0
tests/config/LeViT_128S.txt
tests/config/LeViT_128S.txt
+0
-0
tests/config/MobileNetV1.txt
tests/config/MobileNetV1.txt
+0
-0
tests/config/MobileNetV2.txt
tests/config/MobileNetV2.txt
+0
-0
tests/config/MobileNetV3_large_x1_0.txt
tests/config/MobileNetV3_large_x1_0.txt
+0
-0
tests/config/ResNeXt101_vd_64x4d.txt
tests/config/ResNeXt101_vd_64x4d.txt
+0
-0
tests/config/ResNet50_vd.txt
tests/config/ResNet50_vd.txt
+7
-0
tests/config/ShuffleNetV2_x1_0.txt
tests/config/ShuffleNetV2_x1_0.txt
+0
-0
tests/config/SwinTransformer_tiny_patch4_window7_224.txt
tests/config/SwinTransformer_tiny_patch4_window7_224.txt
+0
-0
tests/config/cpp_config.txt
tests/config/cpp_config.txt
+19
-0
tests/cpp_config.txt
tests/cpp_config.txt
+24
-0
tests/prepare.sh
tests/prepare.sh
+61
-1
tests/test.sh
tests/test.sh
+73
-1
未找到文件。
deploy/cpp/CMakeLists.txt
浏览文件 @
3c7f6e4d
project
(
clas_system CXX C
)
cmake_minimum_required
(
VERSION 3.14
)
option
(
WITH_MKL
"Compile demo with MKL/OpenBlas support, default use MKL."
ON
)
option
(
WITH_GPU
"Compile demo with GPU/CPU, default use CPU."
OFF
)
...
...
@@ -13,7 +14,6 @@ SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
set
(
DEMO_NAME
"clas_system"
)
macro
(
safe_set_static_flag
)
foreach
(
flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
...
...
@@ -198,6 +198,10 @@ endif()
set
(
DEPS
${
DEPS
}
${
OpenCV_LIBS
}
)
include
(
FetchContent
)
include
(
external-cmake/auto-log.cmake
)
include_directories
(
${
FETCHCONTENT_BASE_DIR
}
/extern_autolog-src
)
AUX_SOURCE_DIRECTORY
(
./src SRCS
)
add_executable
(
${
DEMO_NAME
}
${
SRCS
}
)
...
...
deploy/cpp/external-cmake/auto-log.cmake
0 → 100644
浏览文件 @
3c7f6e4d
find_package
(
Git REQUIRED
)
include
(
FetchContent
)
set
(
FETCHCONTENT_BASE_DIR
"
${
CMAKE_CURRENT_BINARY_DIR
}
/third-party"
)
FetchContent_Declare
(
extern_Autolog
PREFIX autolog
GIT_REPOSITORY https://github.com/LDOUBLEV/AutoLog.git
GIT_TAG main
)
FetchContent_MakeAvailable
(
extern_Autolog
)
deploy/cpp/include/cls.h
浏览文件 @
3c7f6e4d
...
...
@@ -61,7 +61,7 @@ public:
void
LoadModel
(
const
std
::
string
&
model_path
,
const
std
::
string
&
params_path
);
// Run predictor
double
Run
(
cv
::
Mat
&
img
);
double
Run
(
cv
::
Mat
&
img
,
std
::
vector
<
double
>
*
times
);
private:
std
::
shared_ptr
<
Predictor
>
predictor_
;
...
...
deploy/cpp/include/cls_config.h
浏览文件 @
3c7f6e4d
// Copyright (c) 202
0
PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
...
...
@@ -36,8 +36,7 @@ public:
this
->
gpu_mem
=
stoi
(
config_map_
[
"gpu_mem"
]);
this
->
cpu_math_library_num_threads
=
stoi
(
config_map_
[
"cpu_math_library_num_threads"
]);
this
->
cpu_threads
=
stoi
(
config_map_
[
"cpu_threads"
]);
this
->
use_mkldnn
=
bool
(
stoi
(
config_map_
[
"use_mkldnn"
]));
...
...
@@ -51,6 +50,8 @@ public:
this
->
resize_short_size
=
stoi
(
config_map_
[
"resize_short_size"
]);
this
->
crop_size
=
stoi
(
config_map_
[
"crop_size"
]);
this
->
benchmark
=
bool
(
stoi
(
config_map_
[
"benchmark"
]));
}
bool
use_gpu
=
false
;
...
...
@@ -59,12 +60,13 @@ public:
int
gpu_mem
=
4000
;
int
cpu_
math_library_num_
threads
=
1
;
int
cpu_threads
=
1
;
bool
use_mkldnn
=
false
;
bool
use_tensorrt
=
false
;
bool
use_fp16
=
false
;
bool
benchmark
=
false
;
std
::
string
cls_model_path
;
...
...
deploy/cpp/src/cls.cpp
浏览文件 @
3c7f6e4d
...
...
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <chrono>
#include <include/cls.h>
namespace
PaddleClas
{
...
...
@@ -53,11 +52,12 @@ void Classifier::LoadModel(const std::string &model_path,
this
->
predictor_
=
CreatePredictor
(
config
);
}
double
Classifier
::
Run
(
cv
::
Mat
&
img
)
{
double
Classifier
::
Run
(
cv
::
Mat
&
img
,
std
::
vector
<
double
>
*
times
)
{
cv
::
Mat
srcimg
;
cv
::
Mat
resize_img
;
img
.
copyTo
(
srcimg
);
auto
preprocess_start
=
std
::
chrono
::
steady_clock
::
now
();
this
->
resize_op_
.
Run
(
img
,
resize_img
,
this
->
resize_short_size_
);
this
->
crop_op_
.
Run
(
resize_img
,
this
->
crop_size_
);
...
...
@@ -70,7 +70,9 @@ double Classifier::Run(cv::Mat &img) {
auto
input_names
=
this
->
predictor_
->
GetInputNames
();
auto
input_t
=
this
->
predictor_
->
GetInputHandle
(
input_names
[
0
]);
input_t
->
Reshape
({
1
,
3
,
resize_img
.
rows
,
resize_img
.
cols
});
auto
start
=
std
::
chrono
::
system_clock
::
now
();
auto
preprocess_end
=
std
::
chrono
::
system_clock
::
now
();
auto
infer_start
=
std
::
chrono
::
system_clock
::
now
();
input_t
->
CopyFromCpu
(
input
.
data
());
this
->
predictor_
->
Run
();
...
...
@@ -83,21 +85,29 @@ double Classifier::Run(cv::Mat &img) {
out_data
.
resize
(
out_num
);
output_t
->
CopyToCpu
(
out_data
.
data
());
auto
end
=
std
::
chrono
::
system_clock
::
now
();
auto
duration
=
std
::
chrono
::
duration_cast
<
std
::
chrono
::
microseconds
>
(
end
-
start
);
double
cost_time
=
double
(
duration
.
count
())
*
std
::
chrono
::
microseconds
::
period
::
num
/
std
::
chrono
::
microseconds
::
period
::
den
;
auto
infer_end
=
std
::
chrono
::
system_clock
::
now
();
auto
postprocess_start
=
std
::
chrono
::
system_clock
::
now
();
int
maxPosition
=
max_element
(
out_data
.
begin
(),
out_data
.
end
())
-
out_data
.
begin
();
auto
postprocess_end
=
std
::
chrono
::
system_clock
::
now
();
// std::chrono::duration<float> preprocess_diff = preprocess_end -
// preprocess_start;
// times->push_back(double(preprocess_diff.count() * 1000));
std
::
chrono
::
duration
<
float
>
inference_diff
=
infer_end
-
infer_start
;
double
inference_cost_time
=
double
(
inference_diff
.
count
()
*
1000
);
times
->
push_back
(
inference_cost_time
);
std
::
chrono
::
duration
<
float
>
postprocess_diff
=
postprocess_end
-
postprocess_start
;
times
->
push_back
(
double
(
postprocess_diff
.
count
()
*
1000
));
std
::
cout
<<
"result: "
<<
std
::
endl
;
std
::
cout
<<
"
\t
class id: "
<<
maxPosition
<<
std
::
endl
;
std
::
cout
<<
std
::
fixed
<<
std
::
setprecision
(
10
)
<<
"
\t
score: "
<<
double
(
out_data
[
maxPosition
])
<<
std
::
endl
;
return
cost_time
;
return
inference_
cost_time
;
}
}
// namespace PaddleClas
deploy/cpp/src/main.cpp
浏览文件 @
3c7f6e4d
// Copyright (c) 202
0
PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
...
...
@@ -26,6 +26,7 @@
#include <fstream>
#include <numeric>
#include <auto_log/autolog.h>
#include <include/cls.h>
#include <include/cls_config.h>
...
...
@@ -61,11 +62,12 @@ int main(int argc, char **argv) {
Classifier
classifier
(
config
.
cls_model_path
,
config
.
cls_params_path
,
config
.
use_gpu
,
config
.
gpu_id
,
config
.
gpu_mem
,
config
.
cpu_
math_library_num_
threads
,
config
.
use_mkldnn
,
config
.
cpu_threads
,
config
.
use_mkldnn
,
config
.
use_tensorrt
,
config
.
use_fp16
,
config
.
resize_short_size
,
config
.
crop_size
);
double
elapsed_time
=
0.0
;
std
::
vector
<
double
>
cls_times
;
int
warmup_iter
=
img_files_list
.
size
()
>
5
?
5
:
0
;
for
(
int
idx
=
0
;
idx
<
img_files_list
.
size
();
++
idx
)
{
std
::
string
img_path
=
img_files_list
[
idx
];
...
...
@@ -78,7 +80,7 @@ int main(int argc, char **argv) {
cv
::
cvtColor
(
srcimg
,
srcimg
,
cv
::
COLOR_BGR2RGB
);
double
run_time
=
classifier
.
Run
(
srcimg
);
double
run_time
=
classifier
.
Run
(
srcimg
,
&
cls_times
);
if
(
idx
>=
warmup_iter
)
{
elapsed_time
+=
run_time
;
std
::
cout
<<
"Current image path: "
<<
img_path
<<
std
::
endl
;
...
...
@@ -90,5 +92,16 @@ int main(int argc, char **argv) {
}
}
std
::
string
presion
=
"fp32"
;
if
(
config
.
use_fp16
)
presion
=
"fp16"
;
if
(
config
.
benchmark
)
{
AutoLogger
autolog
(
"Classification"
,
config
.
use_gpu
,
config
.
use_tensorrt
,
config
.
use_mkldnn
,
config
.
cpu_threads
,
1
,
"1, 3, 224, 224"
,
presion
,
cls_times
,
img_files_list
.
size
());
autolog
.
report
();
}
return
0
;
}
deploy/cpp/tools/build.sh
浏览文件 @
3c7f6e4d
OPENCV_DIR
=
/
PaddleClas/opencv-3.4.7/opencv3/
LIB_DIR
=
/
PaddleClas/fluid
_inference/
OPENCV_DIR
=
/
work/project/project/cpp_infer/opencv-3.4.7/opencv3
LIB_DIR
=
/
work/project/project/cpp_infer/paddle
_inference/
CUDA_LIB_DIR
=
/usr/local/cuda/lib64
CUDNN_LIB_DIR
=
/usr/lib/x86_64-linux-gnu/
...
...
deploy/cpp/tools/config.txt
浏览文件 @
3c7f6e4d
...
...
@@ -2,7 +2,7 @@
use_gpu 0
gpu_id 0
gpu_mem 4000
cpu_
math_library_num_
threads 10
cpu_threads 10
use_mkldnn 1
use_tensorrt 0
use_fp16 0
...
...
@@ -12,3 +12,6 @@ cls_model_path /PaddleClas/inference/cls_infer.pdmodel
cls_params_path /PaddleClas/inference/cls_infer.pdiparams
resize_short_size 256
crop_size 224
# for log env info
benchmark 0
tests/DarkNet53.txt
→
tests/
config/
DarkNet53.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/HRNet_W18_C.txt
→
tests/
config/
HRNet_W18_C.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/LeViT_128S.txt
→
tests/
config/
LeViT_128S.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/MobileNetV1.txt
→
tests/
config/
MobileNetV1.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/MobileNetV2.txt
→
tests/
config/
MobileNetV2.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/MobileNetV3_large_x1_0.txt
→
tests/
config/
MobileNetV3_large_x1_0.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/ResNeXt101_vd_64x4d.txt
→
tests/
config/
ResNeXt101_vd_64x4d.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/ResNet50_vd.txt
→
tests/
config/
ResNet50_vd.txt
浏览文件 @
3c7f6e4d
...
...
@@ -49,3 +49,10 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
null:null
===========================cpp_infer_params===========================
use_gpu:0|1
cpu_threads:1|6
use_mkldnn:0|1
use_tensorrt:0|1
use_fp16:0|1
tests/ShuffleNetV2_x1_0.txt
→
tests/
config/
ShuffleNetV2_x1_0.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/SwinTransformer_tiny_patch4_window7_224.txt
→
tests/
config/
SwinTransformer_tiny_patch4_window7_224.txt
浏览文件 @
3c7f6e4d
文件已移动
tests/config/cpp_config.txt
0 → 100755
浏览文件 @
3c7f6e4d
# model load config
gpu_id 0
gpu_mem 2000
# whole chain test will add following config
# use_gpu 0
# cpu_threads 10
# use_mkldnn 1
# use_tensorrt 0
# use_fp16 0
# cls config
cls_model_path inference/inference.pdmodel
cls_params_path inference/inference.pdiparams
resize_short_size 256
crop_size 224
# for log env info
benchmark 1
tests/cpp_config.txt
0 → 100755
浏览文件 @
3c7f6e4d
# model load config
gpu_id 0
gpu_mem 2000
# whole chain test will add following config
# use_gpu 0
# cpu_threads 10
# use_mkldnn 1
# use_tensorrt 0
# use_fp16 0
# cls config
cls_model_path inference/inference.pdmodel
cls_params_path inference/inference.pdiparams
resize_short_size 256
crop_size 224
# for log env info
benchmark 1
eval "$cpp_use_gpu_key $use_gpu"
eval "$cpp_use_gpu_key" "$use_gpu"
${cpp_use_gpu_key} ${use_gpu}
1 2
1 2
tests/prepare.sh
浏览文件 @
3c7f6e4d
...
...
@@ -33,7 +33,7 @@ if [ ${MODE} = "lite_train_infer" ] || [ ${MODE} = "whole_infer" ];then
mv
train.txt train_list.txt
mv
val.txt val_list.txt
cd
../../
elif
[
${
MODE
}
=
"infer"
]
;
then
elif
[
${
MODE
}
=
"infer"
]
||
[
${
MODE
}
=
"cpp_infer"
]
;
then
# download data
cd
dataset
rm
-rf
ILSVRC2012
...
...
@@ -58,3 +58,63 @@ elif [ ${MODE} = "whole_train_infer" ];then
mv
val.txt val_list.txt
cd
../../
fi
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
cd
deploy/cpp
echo
"################### build opencv ###################"
rm
-rf
3.4.7.tar.gz opencv-3.4.7/
wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
tar
-xf
3.4.7.tar.gz
install_path
=
$(
pwd
)
/opencv-3.4.7/opencv3
cd
opencv-3.4.7/
rm
-rf
build
mkdir
build
cd
build
cmake ..
\
-DCMAKE_INSTALL_PREFIX
=
${
install_path
}
\
-DCMAKE_BUILD_TYPE
=
Release
\
-DBUILD_SHARED_LIBS
=
OFF
\
-DWITH_IPP
=
OFF
\
-DBUILD_IPP_IW
=
OFF
\
-DWITH_LAPACK
=
OFF
\
-DWITH_EIGEN
=
OFF
\
-DCMAKE_INSTALL_LIBDIR
=
lib64
\
-DWITH_ZLIB
=
ON
\
-DBUILD_ZLIB
=
ON
\
-DWITH_JPEG
=
ON
\
-DBUILD_JPEG
=
ON
\
-DWITH_PNG
=
ON
\
-DBUILD_PNG
=
ON
\
-DWITH_TIFF
=
ON
\
-DBUILD_TIFF
=
ON
make
-j
make
install
cd
../../
echo
"################### build opencv finished ###################"
echo
"################### build PaddleClas demo ####################"
OPENCV_DIR
=
$(
pwd
)
/opencv-3.4.7/opencv3/
LIB_DIR
=
$(
pwd
)
/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR
=
$(
dirname
`
find /usr
-name
libcudart.so
`
)
CUDNN_LIB_DIR
=
$(
dirname
`
find /usr
-name
libcudnn.so
`
)
BUILD_DIR
=
build
rm
-rf
${
BUILD_DIR
}
mkdir
${
BUILD_DIR
}
cd
${
BUILD_DIR
}
cmake ..
\
-DPADDLE_LIB
=
${
LIB_DIR
}
\
-DWITH_MKL
=
ON
\
-DDEMO_NAME
=
clas_system
\
-DWITH_GPU
=
OFF
\
-DWITH_STATIC_LIB
=
OFF
\
-DWITH_TENSORRT
=
OFF
\
-DTENSORRT_DIR
=
${
TENSORRT_DIR
}
\
-DOPENCV_DIR
=
${
OPENCV_DIR
}
\
-DCUDNN_LIB
=
${
CUDNN_LIB_DIR
}
\
-DCUDA_LIB
=
${
CUDA_LIB_DIR
}
\
make
-j
echo
"################### build PaddleClas demo finished ###################"
fi
tests/test.sh
浏览文件 @
3c7f6e4d
#!/bin/bash
FILENAME
=
$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'
, 'cpp_infer'
]
MODE
=
$2
dataline
=
$(
cat
${
FILENAME
}
)
...
...
@@ -145,10 +145,78 @@ benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
cpp_use_gpu_key
=
$(
func_parser_key
"
${
lines
[53]
}
"
)
cpp_use_gpu_list
=
$(
func_parser_value
"
${
lines
[53]
}
"
)
cpp_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[54]
}
"
)
cpp_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[54]
}
"
)
cpp_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[55]
}
"
)
cpp_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[55]
}
"
)
cpp_use_tensorrt_key
=
$(
func_parser_key
"
${
lines
[56]
}
"
)
cpp_use_tensorrt_list
=
$(
func_parser_value
"
${
lines
[56]
}
"
)
cpp_use_fp16_key
=
$(
func_parser_key
"
${
lines
[57]
}
"
)
cpp_use_fp16_list
=
$(
func_parser_value
"
${
lines
[57]
}
"
)
fi
LOG_PATH
=
"./tests/output"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results.log"
function
func_cpp_inference
(){
IFS
=
'|'
_script
=
$1
_log_path
=
$2
_img_dir
=
$3
# inference
for
use_gpu
in
${
cpp_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"0"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
for
use_mkldnn
in
${
cpp_use_mkldnn_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"0"
]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
for
threads
in
${
cpp_cpu_threads_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
.log"
set_infer_data
=
$(
func_set_params
"
${
cpp_image_dir_key
}
"
"
${
_img_dir
}
"
)
cp
../tests/config/cpp_config.txt cpp_config.txt
echo
"
${
cpp_use_gpu_key
}
${
use_gpu
}
"
>>
cpp_config.txt
echo
"
${
cpp_cpu_threads_key
}
${
threads
}
"
>>
cpp_config.txt
echo
"
${
cpp_use_mkldnn_key
}
${
use_mkldnn
}
"
>>
cpp_config.txt
echo
"
${
cpp_use_tensorrt_key
}
0"
>>
cpp_config.txt
command
=
"
${
_script
}
cpp_config.txt
${
_img_dir
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
done
done
elif
[
${
use_gpu
}
=
"1"
]
||
[
${
use_gpu
}
=
"gpu"
]
;
then
for
use_trt
in
${
cpp_use_tensorrt_list
[*]
}
;
do
for
precision
in
${
cpp_use_fp16_list
[*]
}
;
do
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"False"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
_flag_quant
}
=
"True"
]
;
then
continue
fi
_save_log_path
=
"
${
_log_path
}
/cpp_infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
cp
../tests/config/cpp_config.txt cpp_config.txt
echo
"
${
cpp_use_gpu_key
}
${
use_gpu
}
"
>>
cpp_config.txt
echo
"
${
cpp_cpu_threads_key
}
${
threads
}
"
>>
cpp_config.txt
echo
"
${
cpp_use_mkldnn_key
}
${
use_mkldnn
}
"
>>
cpp_config.txt
echo
"
${
cpp_use_tensorrt_key
}
${
precision
}
"
>>
cpp_config.txt
command
=
"
${
_script
}
cpp_config.txt
${
_img_dir
}
>
${
_save_log_path
}
2>&1 "
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
done
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
done
}
function
func_inference
(){
IFS
=
'|'
...
...
@@ -247,6 +315,10 @@ if [ ${MODE} = "infer" ]; then
Count
=
$((
$Count
+
1
))
done
cd
..
elif
[
${
MODE
}
=
"cpp_infer"
]
;
then
cd
deploy
func_cpp_inference
"./cpp/build/clas_system"
"../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
cd
..
else
IFS
=
"|"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录