diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md index db1e27d9a8c19d5879b3eedf78f823514e9fa367..5432ea454fb55c4a8d121af3f7136090ac38f23f 100644 --- a/test_tipc/docs/test_inference_cpp.md +++ b/test_tipc/docs/test_inference_cpp.md @@ -248,20 +248,20 @@ bash test_tipc/prepare.sh test_tipc/config/ResNet/ResNet50_linux_gpu_normal_norm 测试方法如下所示,希望测试不同的模型文件,只需更换为自己的参数配置文件,即可完成对应模型的测试。 ```shell -bash test_tipc/test_inference_cpp.sh ${your_params_file} +bash test_tipc/test_inference_cpp.sh ${your_params_file} cpp_infer ``` 以`ResNet50`的`Linux GPU/CPU C++推理测试`为例,命令如下所示。 ```shell -bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt +bash test_tipc/test_inference_cpp.sh test_tipc/config/ResNet/ResNet50_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt cpp_infer ``` 输出结果如下,表示命令运行成功。 ```shell -Run successfully with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log 2>&1! -Run successfully with command - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cls_cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log 2>&1! +Run successfully with command - ResNet50 - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cpp_infer/cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log 2>&1! +Run successfully with command - ResNet50 - ./deploy/cpp/build/clas_system -c inference_cls.yaml > ./test_tipc/output/ResNet50/cpp_infer/cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log 2>&1! ``` 最终log中会打印出结果,如下所示 @@ -312,6 +312,6 @@ Current total inferen time cost: 5449.39 ms. Top5: class_id: 265, score: 0.0420, label: toy poodle ``` -详细log位于`./test_tipc/output/ResNet50/cls_cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log`和`./test_tipc/output/ResNet50/cls_cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log`中。 +详细log位于`./test_tipc/output/ResNet50/cpp_infer/cpp_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log`和`./test_tipc/output/ResNet50/cpp_infer_cpu_usemkldnn_False_threads_1_precision_fp32_batchsize_1.log`中。 如果运行失败,也会在终端中输出运行失败的日志信息以及对应的运行命令。可以基于该命令,分析运行失败的原因。 diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 9b78c0ff1e6a726b1d329bfa13e1cdd6cd86842f..aa1d44fda2bb71ca06f0cdd27bb4be18909f6b8b 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -85,7 +85,12 @@ if [[ ${MODE} = "cpp_infer" ]]; then fi if [[ ! -d "./deploy/cpp/paddle_inference/" ]]; then pushd ./deploy/cpp/ - wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz + PADDLEInfer=$3 + if [ "" = "$PADDLEInfer" ];then + wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate + else + wget -nc ${PADDLEInfer} --no-check-certificate + fi tar xf paddle_inference.tgz popd fi diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 6f67f479cf2fad49eeb85badea64f4d90e2a3964..255e0839a5d2541cd01018b4a09922af677edf80 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -2,10 +2,17 @@ source test_tipc/common_func.sh FILENAME=$1 -GPUID=$2 +MODE=$2 + +# set cuda device +GPUID=$3 if [[ ! $GPUID ]];then GPUID=0 fi +env="export CUDA_VISIBLE_DEVICES=${GPUID}" +set CUDA_VISIBLE_DEVICES +eval $env + dataline=$(awk 'NR==1, NR==19{print}' $FILENAME) # parser params @@ -30,7 +37,7 @@ cpp_benchmark_value=$(func_parser_value "${lines[16]}") generate_yaml_cmd=$(func_parser_value "${lines[17]}") transform_index_cmd=$(func_parser_value "${lines[18]}") -LOG_PATH="./test_tipc/output/${model_name}" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_cpp.log" # generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py" @@ -56,7 +63,7 @@ function func_shitu_cpp_inference(){ if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then precison="int8" fi - _save_log_path="${_log_path}/shitu_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" eval $transform_index_cmd command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}" eval $command @@ -80,7 +87,7 @@ function func_shitu_cpp_inference(){ continue fi for batch_size in ${cpp_batch_size_list[*]}; do - _save_log_path="${_log_path}/shitu_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" eval $transform_index_cmd command="${generate_yaml_cmd} --type shitu --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --det_model_dir ${cpp_det_infer_model_dir} --gpu_id ${GPUID}" eval $command @@ -118,7 +125,7 @@ function func_cls_cpp_inference(){ if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then precison="int8" fi - _save_log_path="${_log_path}/cls_cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}" eval $command @@ -142,7 +149,7 @@ function func_cls_cpp_inference(){ continue fi for batch_size in ${cpp_batch_size_list[*]}; do - _save_log_path="${_log_path}/cls_cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/cpp_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn False --gpu ${use_gpu} --cpu_thread 1 --tensorrt ${use_trt} --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}" eval $command command="${_script} > ${_save_log_path} 2>&1" @@ -235,18 +242,6 @@ cd ../../../ # cd ../../ echo "################### build PaddleClas demo finished ###################" - -# set cuda device -GPUID=$3 -if [ ${#GPUID} -le 0 ];then - env="export CUDA_VISIBLE_DEVICES=0" -else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" -fi -set CUDA_VISIBLE_DEVICES -eval $env - - echo "################### run test ###################" export Count=0 IFS="|"