未验证 提交 3ecc2a22 编写于 作者: D Double_V 提交者: GitHub

fix test.sh

上级 6ea53b07
...@@ -174,9 +174,11 @@ function func_inference(){ ...@@ -174,9 +174,11 @@ function func_inference(){
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} 2>&1 | tee ${_save_log_path} " command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command eval $command
status_check $? "${command}" "${status_log}" last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done done
done done
done done
...@@ -197,9 +199,12 @@ function func_inference(){ ...@@ -197,9 +199,12 @@ function func_inference(){
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${precision_key}" "${precision}") set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} 2>&1 | tee ${_save_log_path}" command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path} 2>&1 "
eval $command eval $command
status_check $? "${command}" "${status_log}" last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}"
done done
done done
done done
...@@ -348,4 +353,3 @@ else ...@@ -348,4 +353,3 @@ else
done # done with: for autocast in ${autocast_list[*]}; do done # done with: for autocast in ${autocast_list[*]}; do
done # done with: for gpu in ${gpu_list[*]}; do done # done with: for gpu in ${gpu_list[*]}; do
fi # end if [ ${MODE} = "infer" ]; then fi # end if [ ${MODE} = "infer" ]; then
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册