提交 f1d5d539 编写于 作者: A andyjpaddle

fix tipc infer log

上级 c1f9e807
...@@ -101,6 +101,7 @@ function func_inference(){ ...@@ -101,6 +101,7 @@ function func_inference(){
_log_path=$4 _log_path=$4
_img_dir=$5 _img_dir=$5
_flag_quant=$6 _flag_quant=$6
_gpu=$7
# inference # inference
for use_gpu in ${use_gpu_list[*]}; do for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
...@@ -119,7 +120,7 @@ function func_inference(){ ...@@ -119,7 +120,7 @@ function func_inference(){
fi # skip when quant model inference but precision is not int8 fi # skip when quant model inference but precision is not int8
set_precision=$(func_set_params "${precision_key}" "${precision}") set_precision=$(func_set_params "${precision_key}" "${precision}")
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" _save_log_path="${_log_path}/python_infer_cpu_gpus_${_gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
...@@ -150,7 +151,7 @@ function func_inference(){ ...@@ -150,7 +151,7 @@ function func_inference(){
continue continue
fi fi
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" _save_log_path="${_log_path}/python_infer_gpu_gpus_${_gpu}_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
...@@ -184,6 +185,7 @@ if [ ${MODE} = "whole_infer" ]; then ...@@ -184,6 +185,7 @@ if [ ${MODE} = "whole_infer" ]; then
# set CUDA_VISIBLE_DEVICES # set CUDA_VISIBLE_DEVICES
eval $env eval $env
export Count=0 export Count=0
gpu=0
IFS="|" IFS="|"
infer_run_exports=(${infer_export_list}) infer_run_exports=(${infer_export_list})
infer_quant_flag=(${infer_is_quant}) infer_quant_flag=(${infer_is_quant})
...@@ -205,7 +207,7 @@ if [ ${MODE} = "whole_infer" ]; then ...@@ -205,7 +207,7 @@ if [ ${MODE} = "whole_infer" ]; then
fi fi
#run inference #run inference
is_quant=${infer_quant_flag[Count]} is_quant=${infer_quant_flag[Count]}
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} "${gpu}"
Count=$(($Count + 1)) Count=$(($Count + 1))
done done
else else
...@@ -328,7 +330,7 @@ else ...@@ -328,7 +330,7 @@ else
else else
infer_model_dir=${save_infer_path} infer_model_dir=${save_infer_path}
fi fi
func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" "${gpu}"
eval "unset CUDA_VISIBLE_DEVICES" eval "unset CUDA_VISIBLE_DEVICES"
fi fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册