提交 87873447 编写于 作者: D dongshuilong

add c++(cls, shitu) chain for tipc

上级 ded24add
......@@ -18,102 +18,102 @@
namespace Feature {
void FeatureExtracter::LoadModel(const std::string &model_path,
const std::string &params_path) {
paddle_infer::Config config;
config.SetModel(model_path, params_path);
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 1, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
}
config.SwitchUseFeedFetchOps(false);
// true for multiple input
config.SwitchSpecifyInputNames(true);
config.SwitchIrOptim(true);
config.EnableMemoryOptim();
config.DisableGlogInfo();
this->predictor_ = CreatePredictor(config);
void FeatureExtracter::LoadModel(const std::string &model_path,
const std::string &params_path) {
paddle_infer::Config config;
config.SetModel(model_path, params_path);
if (this->use_gpu_) {
config.EnableUseGpu(this->gpu_mem_, this->gpu_id_);
if (this->use_tensorrt_) {
config.EnableTensorRtEngine(
1 << 20, 1, 3,
this->use_fp16_ ? paddle_infer::Config::Precision::kHalf
: paddle_infer::Config::Precision::kFloat32,
false, false);
}
void FeatureExtracter::Run(cv::Mat &img, std::vector<float> &out_data,
std::vector<double> &times) {
cv::Mat resize_img;
std::vector<double> time;
auto preprocess_start = std::chrono::system_clock::now();
this->resize_op_.Run(img, resize_img, this->resize_short_,
this->resize_size_);
this->normalize_op_.Run(&resize_img, this->mean_, this->std_, this->scale_);
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
this->permute_op_.Run(&resize_img, input.data());
auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
auto preprocess_end = std::chrono::system_clock::now();
auto infer_start = std::chrono::system_clock::now();
input_t->CopyFromCpu(input.data());
this->predictor_->Run();
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
auto infer_end = std::chrono::system_clock::now();
auto postprocess_start = std::chrono::system_clock::now();
if (this->feature_norm)
FeatureNorm(out_data);
auto postprocess_end = std::chrono::system_clock::now();
std::chrono::duration<float> preprocess_diff =
preprocess_end - preprocess_start;
time.push_back(double(preprocess_diff.count()));
std::chrono::duration<float> inference_diff = infer_end - infer_start;
double inference_cost_time = double(inference_diff.count());
time.push_back(inference_cost_time);
// std::chrono::duration<float> postprocess_diff =
// postprocess_end - postprocess_start;
time.push_back(0);
// std::cout << "result: " << std::endl;
// std::cout << "\tclass id: " << maxPosition << std::endl;
// std::cout << std::fixed << std::setprecision(10)
// << "\tscore: " << double(out_data[maxPosition]) << std::endl;
times[0] += time[0];
times[1] += time[1];
times[2] += time[2];
}
void FeatureExtracter::FeatureNorm(std::vector<float> &featuer) {
float featuer_sqrt = std::sqrt(std::inner_product(
featuer.begin(), featuer.end(), featuer.begin(), 0.0f));
for (int i = 0; i < featuer.size(); ++i)
featuer[i] /= featuer_sqrt;
} else {
config.DisableGpu();
if (this->use_mkldnn_) {
config.EnableMKLDNN();
// cache 10 different shapes for mkldnn to avoid memory leak
config.SetMkldnnCacheCapacity(10);
}
config.SetCpuMathLibraryNumThreads(this->cpu_math_library_num_threads_);
}
config.SwitchUseFeedFetchOps(false);
// true for multiple input
config.SwitchSpecifyInputNames(true);
config.SwitchIrOptim(true);
config.EnableMemoryOptim();
config.DisableGlogInfo();
this->predictor_ = CreatePredictor(config);
}
void FeatureExtracter::Run(cv::Mat &img, std::vector<float> &out_data,
std::vector<double> &times) {
cv::Mat resize_img;
std::vector<double> time;
auto preprocess_start = std::chrono::steady_clock::now();
this->resize_op_.Run(img, resize_img, this->resize_short_,
this->resize_size_);
this->normalize_op_.Run(&resize_img, this->mean_, this->std_, this->scale_);
std::vector<float> input(1 * 3 * resize_img.rows * resize_img.cols, 0.0f);
this->permute_op_.Run(&resize_img, input.data());
auto input_names = this->predictor_->GetInputNames();
auto input_t = this->predictor_->GetInputHandle(input_names[0]);
input_t->Reshape({1, 3, resize_img.rows, resize_img.cols});
auto preprocess_end = std::chrono::steady_clock::now();
auto infer_start = std::chrono::steady_clock::now();
input_t->CopyFromCpu(input.data());
this->predictor_->Run();
auto output_names = this->predictor_->GetOutputNames();
auto output_t = this->predictor_->GetOutputHandle(output_names[0]);
std::vector<int> output_shape = output_t->shape();
int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,
std::multiplies<int>());
out_data.resize(out_num);
output_t->CopyToCpu(out_data.data());
auto infer_end = std::chrono::steady_clock::now();
auto postprocess_start = std::chrono::steady_clock::now();
if (this->feature_norm)
FeatureNorm(out_data);
auto postprocess_end = std::chrono::steady_clock::now();
std::chrono::duration<float> preprocess_diff =
preprocess_end - preprocess_start;
time.push_back(double(preprocess_diff.count()) * 1000);
std::chrono::duration<float> inference_diff = infer_end - infer_start;
double inference_cost_time = double(inference_diff.count() * 1000);
time.push_back(inference_cost_time);
// std::chrono::duration<float> postprocess_diff =
// postprocess_end - postprocess_start;
time.push_back(0);
// std::cout << "result: " << std::endl;
// std::cout << "\tclass id: " << maxPosition << std::endl;
// std::cout << std::fixed << std::setprecision(10)
// << "\tscore: " << double(out_data[maxPosition]) << std::endl;
times[0] += time[0];
times[1] += time[1];
times[2] += time[2];
}
void FeatureExtracter::FeatureNorm(std::vector<float> &featuer) {
float featuer_sqrt = std::sqrt(std::inner_product(
featuer.begin(), featuer.end(), featuer.begin(), 0.0f));
for (int i = 0; i < featuer.size(); ++i)
featuer[i] /= featuer_sqrt;
}
} // namespace Feature
此差异已折叠。
......@@ -14,20 +14,21 @@ def parse_args():
def main():
args = parse_args()
with open(args.config)as fd:
config = yaml.load(fd)
with open(args.config) as fd:
config = yaml.load(fd.read(), yaml.FullLoader)
index_dir = ""
try:
index_dir = config["IndexProcess"]["index_dir"]
except Exception as e:
print("The IndexProcess.index_dir in config_file dose not exist")
print("The IndexProcess.index_dir in config_file dose not exist")
exit(1)
id_map_path = os.path.join(index_dir, "id_map.pkl")
assert os.path.exists(id_map_path), "The id_map file dose not exist: {}".format(id_map_path)
assert os.path.exists(
id_map_path), "The id_map file dose not exist: {}".format(id_map_path)
with open(id_map_path, "rb")as fd:
with open(id_map_path, "rb") as fd:
ids = pickle.load(fd)
with open(os.path.join(index_dir, "id_map.txt"), "w")as fd:
with open(os.path.join(index_dir, "id_map.txt"), "w") as fd:
for k, v in ids.items():
v = v.split("\t")[1]
fd.write(str(k) + " " + v + "\n")
......
===========================cpp_infer_params===========================
model_name:PPShiTu
cpp_infer_type:shitu
feature_inference_model_dir:./feature_inference/
det_inference_model_dir:./det_inference
cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
det_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
infer_quant:False
inference_cmd:./deploy/cpp_shitu/build/pp_shitu -c inference_drink.yaml
use_gpu:True|False
enable_mkldnn:True|False
cpu_threads:1|6
batch_size:1
use_tensorrt:False|True
precision:fp32|fp16
data_dir:./dataset/drink_dataset_v1.0
benchmark:True
===========================cpp_infer_params===========================
model_name:ResNet50_vd
cpp_infer_type:cls
cls_inference_model_dir:./cls_inference/
det_inference_model_dir:
cls_inference_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/ResNet50_vd_inference.tar
det_inference_url:
infer_quant:False
inference_cmd:./deploy/cpp/build/clas_system -c inference_cls.yaml
use_gpu:True|False
enable_mkldnn:True|False
cpu_threads:1|6
batch_size:1
use_tensorrt:False|True
precision:fp32|fp16
image_dir:./dataset/ILSVRC2012/val
benchmark:True
import os
import yaml
import argparse
def str2bool(v):
if v.lower() == 'true':
return True
else:
return False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--type', required=True, choices=["cls", "shitu"])
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--mkldnn', type=str2bool, default=True)
parser.add_argument('--gpu', type=str2bool, default=False)
parser.add_argument('--cpu_thread', type=int, default=1)
parser.add_argument('--tensorrt', type=str2bool, default=False)
parser.add_argument('--precision', type=str, choices=["fp32", "fp16"])
parser.add_argument('--benchmark', type=str2bool, default=True)
parser.add_argument(
'--cls_yaml_path',
type=str,
default="deploy/configs/inference_cls.yaml")
parser.add_argument(
'--shitu_yaml_path',
type=str,
default="deploy/configs/inference_drink.yaml")
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--save_path', type=str, default='./')
parser.add_argument('--cls_model_dir', type=str)
parser.add_argument('--det_model_dir', type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.type == "cls":
save_path = os.path.join(args.save_path,
os.path.basename(args.cls_yaml_path))
fd = open(args.cls_yaml_path)
else:
save_path = os.path.join(args.save_path,
os.path.basename(args.shitu_yaml_path))
fd = open(args.shitu_yaml_path)
config = yaml.load(fd, yaml.FullLoader)
fd.close()
config["Global"]["batch_size"] = args.batch_size
config["Global"]["use_gpu"] = args.gpu
config["Global"]["enable_mkldnn"] = args.mkldnn
config["Global"]["benchmark"] = args.benchmark
config["Global"]["use_tensorrt"] = args.tensorrt
config["Global"]["use_fp16"] = True if args.precision == "fp16" else False
if args.type == "cls":
config["Global"]["infer_imgs"] = args.data_dir
assert args.cls_model_dir
config["Global"]["inference_model_dir"] = args.cls_model_dir
else:
config["Global"]["infer_imgs"] = os.path.join(args.data_dir,
"test_images")
config["IndexProcess"]["index_dir"] = os.path.join(args.data_dir,
"index")
assert args.cls_model_dir
assert args.det_model_dir
config["Global"]["det_inference_model_dir"] = args.det_model_dir
config["Global"]["rec_inference_model_dir"] = args.cls_model_dir
with open(save_path, 'w') as fd:
yaml.dump(config, fd)
print("Generate new yaml done")
if __name__ == "__main__":
main()
......@@ -33,6 +33,59 @@ function func_parser_value(){
fi
}
function func_get_url_file_name(){
strs=$1
IFS="/"
array=(${strs})
tmp=${array[${#array[@]}-1]}
echo ${tmp}
}
model_name=$(func_parser_value "${lines[1]}")
if [ ${MODE} = "cpp_infer" ];then
if [[ $FILENAME == *infer_cpp_linux_gpu_cpu.txt ]];then
cpp_type=$(func_parser_value "${lines[2]}")
cls_inference_model_dir=$(func_parser_value "${lines[3]}")
det_inference_model_dir=$(func_parser_value "${lines[4]}")
cls_inference_url=$(func_parser_value "${lines[5]}")
det_inference_url=$(func_parser_value "${lines[6]}")
if [[ $cpp_type == "cls" ]];then
eval "wget -nc $cls_inference_url"
tar xf "${model_name}_inference.tar"
eval "mv inference $cls_inference_model_dir"
cd dataset
rm -rf ILSVRC2012
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar
tar xf whole_chain_infer.tar
ln -s whole_chain_infer ILSVRC2012
cd ..
elif [[ $cpp_type == "shitu" ]];then
eval "wget -nc $cls_inference_url"
tar_name=$(func_get_url_file_name "$cls_inference_url")
model_dir=${tar_name%.*}
eval "tar xf ${tar_name}"
eval "mv ${model_dir} ${cls_inference_model_dir}"
eval "wget -nc $det_inference_url"
tar_name=$(func_get_url_file_name "$det_inference_url")
model_dir=${tar_name%.*}
eval "tar xf ${tar_name}"
eval "mv ${model_dir} ${det_inference_model_dir}"
cd dataset
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar
tar -xf drink_dataset_v1.0.tar
else
echo "Wrong cpp type in config file in line 3. only support cls, shitu"
fi
exit 0
else
echo "use wrong config file"
exit 1
fi
fi
model_name=$(func_parser_value "${lines[1]}")
model_url_value=$(func_parser_value "${lines[35]}")
model_url_key=$(func_parser_key "${lines[35]}")
......@@ -114,63 +167,3 @@ if [ ${MODE} = "serving_infer" ];then
cd ./deploy/paddleserving
wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar
fi
if [ ${MODE} = "cpp_infer" ];then
cd deploy/cpp
echo "################### build opencv ###################"
rm -rf 3.4.7.tar.gz opencv-3.4.7/
wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
tar -xf 3.4.7.tar.gz
install_path=$(pwd)/opencv-3.4.7/opencv3
cd opencv-3.4.7/
rm -rf build
mkdir build
cd build
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DWITH_IPP=OFF \
-DBUILD_IPP_IW=OFF \
-DWITH_LAPACK=OFF \
-DWITH_EIGEN=OFF \
-DCMAKE_INSTALL_LIBDIR=lib64 \
-DWITH_ZLIB=ON \
-DBUILD_ZLIB=ON \
-DWITH_JPEG=ON \
-DBUILD_JPEG=ON \
-DWITH_PNG=ON \
-DBUILD_PNG=ON \
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
cd ../../
echo "################### build opencv finished ###################"
echo "################### build PaddleClas demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DDEMO_NAME=clas_system \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
make -j
echo "################### build PaddleClas demo finished ###################"
fi
......@@ -10,37 +10,27 @@ lines=(${dataline})
# parser cpp inference model
model_name=$(func_parser_value "${lines[1]}")
use_opencv=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir_list=$(func_parser_value "${lines[3]}")
cpp_infer_is_quant=$(func_parser_value "${lines[4]}")
cpp_infer_type=$(func_parser_value "${lines[2]}")
cpp_infer_model_dir=$(func_parser_value "${lines[3]}")
cpp_det_infer_model_dir=$(func_parser_value "${lines[4]}")
cpp_infer_is_quant=$(func_parser_value "${lines[7]}")
# parser cpp inference
inference_cmd=$(func_parser_value "${lines[5]}")
cpp_use_gpu_key=$(func_parser_key "${lines[6]}")
cpp_use_gpu_list=$(func_parser_value "${lines[6]}")
cpp_use_mkldnn_key=$(func_parser_key "${lines[7]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[7]}")
cpp_cpu_threads_key=$(func_parser_key "${lines[8]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[8]}")
cpp_batch_size_key=$(func_parser_key "${lines[9]}")
cpp_batch_size_list=$(func_parser_value "${lines[9]}")
cpp_use_trt_key=$(func_parser_key "${lines[10]}")
cpp_use_trt_list=$(func_parser_value "${lines[10]}")
cpp_precision_key=$(func_parser_key "${lines[11]}")
cpp_precision_list=$(func_parser_value "${lines[11]}")
cpp_infer_model_key=$(func_parser_key "${lines[12]}")
cpp_image_dir_key=$(func_parser_key "${lines[13]}")
cpp_infer_img_dir=$(func_parser_value "${lines[13]}")
cpp_infer_key1=$(func_parser_key "${lines[14]}")
cpp_infer_value1=$(func_parser_value "${lines[14]}")
cpp_benchmark_key=$(func_parser_key "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[15]}")
inference_cmd=$(func_parser_value "${lines[8]}")
cpp_use_gpu_list=$(func_parser_value "${lines[9]}")
cpp_use_mkldnn_list=$(func_parser_value "${lines[10]}")
cpp_cpu_threads_list=$(func_parser_value "${lines[11]}")
cpp_batch_size_list=$(func_parser_value "${lines[12]}")
cpp_use_trt_list=$(func_parser_value "${lines[13]}")
cpp_precision_list=$(func_parser_value "${lines[14]}")
cpp_image_dir_value=$(func_parser_value "${lines[15]}")
cpp_benchmark_value=$(func_parser_value "${lines[16]}")
LOG_PATH="./test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
function func_cpp_inference(){
function func_shitu_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
......@@ -48,6 +38,7 @@ function func_cpp_inference(){
_img_dir=$4
_flag_quant=$5
# inference
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
......@@ -60,17 +51,13 @@ function func_cpp_inference(){
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
precison="int8"
fi
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
_save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
......@@ -88,20 +75,74 @@ function func_cpp_inference(){
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
set_tensorrt=$(func_set_params "${cpp_use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${cpp_precision_key}" "${precision}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
_save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${_script}" "${status_log}"
done
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
function func_cls_cpp_inference(){
IFS='|'
_script=$1
_model_dir=$2
_log_path=$3
_img_dir=$4
_flag_quant=$5
# inference
for use_gpu in ${cpp_use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${cpp_use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpp_cpu_threads_list[*]}; do
for batch_size in ${cpp_batch_size_list[*]}; do
precision="fp32"
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
precison="int8"
fi
_save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir}"
eval $command
command1="${_script} 2>&1|tee ${_save_log_path}"
eval ${command1}
last_status=${PIPESTATUS[0]}
status_check $last_status "${command1}" "${status_log}"
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for use_trt in ${cpp_use_trt_list[*]}; do
for precision in ${cpp_precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
continue
fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
continue
fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi
for batch_size in ${cpp_batch_size_list[*]}; do
_save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir}"
eval $command
command="${_script} 2>&1|tee ${_save_log_path}"
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done
done
done
......@@ -111,25 +152,55 @@ function func_cpp_inference(){
done
}
if [[ $cpp_infer_type == "cls" ]]; then
cd deploy/cpp
elif [[ $cpp_infer_type == "shitu" ]]; then
cd deploy/cpp_shitu
else
echo "Only support cls and shitu"
exit 0
fi
cd deploy/cpp_infer
if [ ${use_opencv} = "True" ]; then
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### build opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
if [[ $cpp_infer_type == "shitu" ]]; then
echo "################### update cmake ###################"
wget -nc https://github.com/Kitware/CMake/releases/download/v3.22.0/cmake-3.22.0.tar.gz
tar xf cmake-3.22.0.tar.gz
cd ./cmake-3.22.0
export root_path=$PWD
export install_path=${root_path}/cmake
eval "./bootstrap --prefix=${install_path}"
make -j
make install
export PATH=${install_path}/bin:$PATH
cd ..
echo "################### update cmake done ###################"
echo "################### build faiss ###################"
apt-get install -y libopenblas-dev
git clone https://github.com/facebookresearch/faiss.git
cd faiss
export faiss_install_path=$PWD/faiss_install
eval "cmake -B build . -DFAISS_ENABLE_PYTHON=OFF -DCMAKE_INSTALL_PREFIX=${faiss_install_path}"
make -C build -j faiss
make -C build install
fi
if [ -d "opencv-3.4.7/opencv3/" ] && [ $(md5sum opencv-3.4.7.tar.gz | awk -F ' ' '{print $1}') = "faa2b5950f8bee3f03118e600c74746a" ];then
echo "################### build opencv skipped ###################"
else
echo "################### build opencv ###################"
rm -rf opencv-3.4.7.tar.gz opencv-3.4.7/
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/opencv-3.4.7.tar.gz
tar -xf opencv-3.4.7.tar.gz
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
cd opencv-3.4.7/
install_path=$(pwd)/opencv3
rm -rf build
mkdir build
cd build
rm -rf build
mkdir build
cd build
cmake .. \
cmake .. \
-DCMAKE_INSTALL_PREFIX=${install_path} \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
......@@ -147,21 +218,17 @@ if [ ${use_opencv} = "True" ]; then
-DWITH_TIFF=ON \
-DBUILD_TIFF=ON
make -j
make install
cd ../
echo "################### build opencv finished ###################"
fi
make -j
make install
cd ../
echo "################### build opencv finished ###################"
fi
echo "################### build PaddleOCR demo ####################"
if [ ${use_opencv} = "True" ]; then
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
else
OPENCV_DIR=''
fi
LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
echo "################### build PaddleClas demo ####################"
OPENCV_DIR=$(pwd)/opencv-3.4.7/opencv3/
LIB_DIR=/work/project/project/test/paddle_inference/
# LIB_DIR=$(pwd)/Paddle/build/paddle_inference_install_dir/
CUDA_LIB_DIR=$(dirname `find /usr -name libcudart.so`)
CUDNN_LIB_DIR=$(dirname `find /usr -name libcudnn.so`)
......@@ -169,20 +236,36 @@ BUILD_DIR=build
rm -rf ${BUILD_DIR}
mkdir ${BUILD_DIR}
cd ${BUILD_DIR}
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=OFF \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
if [[ $cpp_infer_type == cls ]]; then
cmake .. \
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=ON \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR}
echo "---------------------------"
else
cmake ..\
-DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=ON \
-DWITH_GPU=ON \
-DWITH_STATIC_LIB=OFF \
-DWITH_TENSORRT=OFF \
-DOPENCV_DIR=${OPENCV_DIR} \
-DCUDNN_LIB=${CUDNN_LIB_DIR} \
-DCUDA_LIB=${CUDA_LIB_DIR} \
-DTENSORRT_DIR=${TENSORRT_DIR} \
-DFAISS_DIR=${FAISS_DIR} \
-DFAISS_WITH_MKL=OFF
fi
make -j
cd ../../../
echo "################### build PaddleOCR demo finished ###################"
# cd ../../
echo "################### build PaddleClas demo finished ###################"
# set cuda device
......@@ -200,9 +283,13 @@ echo "################### run test ###################"
export Count=0
IFS="|"
infer_quant_flag=(${cpp_infer_is_quant})
for infer_model in ${cpp_infer_model_dir_list[*]}; do
for infer_model in ${cpp_infer_model_dir[*]}; do
#run inference
is_quant=${infer_quant_flag[Count]}
func_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_infer_img_dir}" ${is_quant}
if [[ $cpp_infer_type == "cls" ]]; then
func_cls_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
else
func_shitu_cpp_inference "${inference_cmd}" "${infer_model}" "${LOG_PATH}" "${cpp_image_dir_value}" ${is_quant}
fi
Count=$(($Count + 1))
done
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册