From 25e858cb34e28494a4b3af19e472fd90d64a6243 Mon Sep 17 00:00:00 2001 From: zhengya01 <43601548+zhengya01@users.noreply.github.com> Date: Wed, 17 Aug 2022 14:14:27 +0800 Subject: [PATCH] add log_path in result.log (#6668) --- test_tipc/test_inference_cpp.sh | 6 +++--- test_tipc/test_paddle2onnx.sh | 6 +++--- test_tipc/test_ptq_inference_python.sh | 6 +++--- test_tipc/test_serving_infer_cpp.sh | 6 +++--- test_tipc/test_serving_infer_python.sh | 6 +++--- test_tipc/test_train_inference_python.sh | 22 ++++++++++++---------- test_tipc/utils_func.sh | 5 +++-- 7 files changed, 30 insertions(+), 27 deletions(-) diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 999bcbb31..270ee7039 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -86,7 +86,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -112,7 +112,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done else @@ -209,7 +209,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? cat ${export_log_path} - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" fi #run inference diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 39897af0d..df4e7a0dc 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -81,7 +81,7 @@ function func_paddle2onnx_inference(){ eval "${trans_model_cmd} > ${trans_log_path} 2>&1" last_status=${PIPESTATUS[0]} cat ${trans_log_path} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_log_path}" # python inference echo "################### run onnx infer ###################" @@ -94,7 +94,7 @@ function func_paddle2onnx_inference(){ eval "${infer_model_cmd} > ${_save_log_path} 2>&1" last_status=${PIPESTATUS[0]} cat ${_save_log_path} - status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" } export Count=0 @@ -120,7 +120,7 @@ for infer_mode in ${infer_mode_list[*]}; do eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? cat ${export_log_path} - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" fi #run inference diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh index d8c811f87..6371d2ab3 100644 --- a/test_tipc/test_ptq_inference_python.sh +++ b/test_tipc/test_ptq_inference_python.sh @@ -72,7 +72,7 @@ function func_ptq_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -87,7 +87,7 @@ function func_ptq_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done fi done @@ -108,7 +108,7 @@ echo $ptq_cmd eval "${ptq_cmd} > ${export_log_path} 2>&1" status_export=$? cat ${export_log_path} -status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" +status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" "${export_log_path}" #run inference set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}") diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index 3aa7aff04..4be299c16 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -80,14 +80,14 @@ function func_serving_inference(){ eval $web_service_cmd last_status=${PIPESTATUS[0]} cat ${server_log_path} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" sleep 5s # run http client http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1" eval $http_client_cmd last_status=${PIPESTATUS[0]} cat ${client_log_path} - status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}" ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 sleep 2s done @@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? cat ${export_log_path} - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" fi #run inference diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 300edbf65..fd7cc07b1 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -71,14 +71,14 @@ function func_serving_inference(){ eval $web_service_cmd last_status=${PIPESTATUS[0]} cat ${server_log_path} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" sleep 5s # run http client http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1" eval $http_client_cmd last_status=${PIPESTATUS[0]} cat ${client_log_path} - status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}" ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 sleep 2s done @@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do eval "${export_cmd} > ${export_log_path} 2>&1" status_export=$? cat ${export_log_path} - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" fi #run inference diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 6da0b030f..852ad4eac 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -105,6 +105,7 @@ function func_inference(){ _log_path=$4 _img_dir=$5 _flag_quant=$6 + _gpu=$7 # inference for use_gpu in ${use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then @@ -114,7 +115,7 @@ function func_inference(){ fi for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/python_infer_cpu_gpus_${gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") @@ -125,7 +126,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -140,7 +141,7 @@ function func_inference(){ fi fi for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/python_infer_gpu_mode_${precision}_batchsize_${batch_size}.log" + _save_log_path="${_log_path}/python_infer_gpu_gpus_${gpu}_mode_${precision}_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") @@ -151,7 +152,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done else @@ -171,6 +172,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then eval $env Count=0 + gpu=0 IFS="|" infer_quant_flag=(${infer_is_quant_list}) for infer_mode in ${infer_mode_list[*]}; do @@ -198,12 +200,12 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " echo $export_cmd eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" "${model_name}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference save_export_model_dir="${save_export_value}/${model_name}" is_quant=${infer_quant_flag[Count]} - func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} "{gpu}" Count=$((${Count} + 1)) done else @@ -304,7 +306,7 @@ else eval "${cmd} > ${train_log_path} 2>&1" last_status=$? cat ${train_log_path} - status_check $last_status "${cmd}" "${status_log}" "${model_name}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" "${train_log_path}" set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}") # run eval @@ -315,7 +317,7 @@ else eval "${eval_cmd} > ${eval_log_path} 2>&1" last_status=$? cat ${eval_log_path} - status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}" fi # run export model if [ ${run_export} != "null" ]; then @@ -336,7 +338,7 @@ else eval "${export_cmd} > ${export_log_path} 2>&1" last_status=$? cat ${export_log_path} - status_check $last_status "${export_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" #run inference if [ ${export_onnx_key} != "export_onnx" ]; then @@ -344,7 +346,7 @@ else eval "cp ${save_export_model_dir}/* ${save_log}/" fi eval $env - func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" + func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" "{gpu}" eval "unset CUDA_VISIBLE_DEVICES" fi diff --git a/test_tipc/utils_func.sh b/test_tipc/utils_func.sh index 023bdf5bf..4f52f34cc 100644 --- a/test_tipc/utils_func.sh +++ b/test_tipc/utils_func.sh @@ -51,9 +51,10 @@ function status_check(){ run_command=$2 run_log=$3 model_name=$4 + log_path=$5 if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log} else - echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log} fi } -- GitLab