From 9863be35f8b7ed7b8d5819fe988bed657db67563 Mon Sep 17 00:00:00 2001 From: zhengya01 Date: Tue, 16 Aug 2022 15:18:05 +0800 Subject: [PATCH] add log_path in result.log --- test_tipc/common_func.sh | 5 +++-- test_tipc/test_inference_cpp.sh | 4 ++-- test_tipc/test_inference_python.sh | 9 +++++---- test_tipc/test_paddle2onnx.sh | 14 +++++++------- test_tipc/test_ptq_inference_python.sh | 6 +++--- test_tipc/test_serving_infer_cpp.sh | 8 ++++---- test_tipc/test_serving_infer_python.sh | 16 ++++++++-------- test_tipc/test_train_inference_python.sh | 12 ++++++------ 8 files changed, 38 insertions(+), 36 deletions(-) diff --git a/test_tipc/common_func.sh b/test_tipc/common_func.sh index f7d8a1e0..1bbf8291 100644 --- a/test_tipc/common_func.sh +++ b/test_tipc/common_func.sh @@ -58,10 +58,11 @@ function status_check(){ run_command=$2 run_log=$3 model_name=$4 + log_path=$5 if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log} else - echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log} fi } diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index c0c7c18a..aadaa8b0 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -84,7 +84,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -117,7 +117,7 @@ function func_cpp_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done diff --git a/test_tipc/test_inference_python.sh b/test_tipc/test_inference_python.sh index 2a31a468..e9908df1 100644 --- a/test_tipc/test_inference_python.sh +++ b/test_tipc/test_inference_python.sh @@ -88,7 +88,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -119,7 +119,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done @@ -146,14 +146,15 @@ if [ ${MODE} = "whole_infer" ]; then for infer_model in ${infer_model_dir_list[*]}; do # run export if [ ${infer_run_exports[Count]} != "null" ];then + _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}_infermodel_${infer_model}.log" save_infer_dir=$(dirname $infer_model) set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key} > ${_save_log_path} 2>&1 " echo ${infer_run_exports[Count]} eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" else save_infer_dir=${infer_model} fi diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 78d79d0b..bace6b2d 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -66,7 +66,7 @@ function func_paddle2onnx(){ trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_det_log} 2>&1 " eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_det_log}" # trans rec set_dirname=$(func_set_params "--model_dir" "${rec_infer_model_dir_value}") set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}") @@ -78,7 +78,7 @@ function func_paddle2onnx(){ trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_rec_log} 2>&1 " eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_rec_log}" elif [[ ${model_name} =~ "det" ]]; then # trans det set_dirname=$(func_set_params "--model_dir" "${det_infer_model_dir_value}") @@ -91,7 +91,7 @@ function func_paddle2onnx(){ trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_det_log} 2>&1 " eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_det_log}" elif [[ ${model_name} =~ "rec" ]]; then # trans rec set_dirname=$(func_set_params "--model_dir" "${rec_infer_model_dir_value}") @@ -104,7 +104,7 @@ function func_paddle2onnx(){ trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} > ${trans_rec_log} 2>&1 " eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_rec_log}" fi # python inference @@ -127,7 +127,7 @@ function func_paddle2onnx(){ eval $infer_model_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then _save_log_path="${LOG_PATH}/paddle2onnx_infer_gpu.log" set_gpu=$(func_set_params "${use_gpu_key}" "${use_gpu}") @@ -146,7 +146,7 @@ function func_paddle2onnx(){ eval $infer_model_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" else echo "Does not support hardware other than CPU and GPU Currently!" fi @@ -158,4 +158,4 @@ echo "################### run test ###################" export Count=0 IFS="|" -func_paddle2onnx \ No newline at end of file +func_paddle2onnx diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh index e2939fd5..caf3d506 100644 --- a/test_tipc/test_ptq_inference_python.sh +++ b/test_tipc/test_ptq_inference_python.sh @@ -84,7 +84,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -109,7 +109,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done @@ -145,7 +145,7 @@ if [ ${MODE} = "whole_infer" ]; then echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" else save_infer_dir=${infer_model} fi diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index 0be6a45a..10ddecf3 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -83,7 +83,7 @@ function func_serving(){ trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_rec_log} 2>&1 " eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_rec_log}" set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") python_list=(${python_list}) cd ${serving_dir_value} @@ -95,14 +95,14 @@ function func_serving(){ web_service_cpp_cmd="nohup ${python_list[0]} ${web_service_py} --model ${det_server_value} ${rec_server_value} ${op_key} ${op_value} ${port_key} ${port_value} > ${server_log_path} 2>&1 &" eval $web_service_cpp_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" "${server_log_path}" sleep 5s _save_log_path="${LOG_PATH}/cpp_client_cpu.log" cpp_client_cmd="${python_list[0]} ${cpp_client_py} ${det_client_value} ${rec_client_value} > ${_save_log_path} 2>&1" eval $cpp_client_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 else server_log_path="${LOG_PATH}/cpp_server_gpu.log" @@ -114,7 +114,7 @@ function func_serving(){ eval $cpp_client_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${cpp_client_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 fi done diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 4b7dfcf7..c7d305d5 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -126,19 +126,19 @@ function func_serving(){ web_service_cmd="nohup ${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_det_model_config} ${set_rec_model_config} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" elif [[ ${model_name} =~ "det" ]]; then set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") web_service_cmd="nohup ${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_det_model_config} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" elif [[ ${model_name} =~ "rec" ]]; then set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") web_service_cmd="nohup ${python} ${web_service_py} ${web_use_gpu_key}="" ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_rec_model_config} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" fi sleep 2s for pipeline in ${pipeline_py[*]}; do @@ -147,7 +147,7 @@ function func_serving(){ eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" sleep 2s done ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 @@ -177,19 +177,19 @@ function func_serving(){ web_service_cmd="nohup ${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_det_model_config} ${set_rec_model_config} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" elif [[ ${model_name} =~ "det" ]]; then set_det_model_config=$(func_set_params "${det_server_key}" "${det_server_value}") web_service_cmd="nohup ${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_det_model_config} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" elif [[ ${model_name} =~ "rec" ]]; then set_rec_model_config=$(func_set_params "${rec_server_key}" "${rec_server_value}") web_service_cmd="nohup ${python} ${web_service_py} ${set_tensorrt} ${set_precision} ${set_rec_model_config} > ${server_log_path} 2>&1 &" eval $web_service_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}" fi sleep 2s for pipeline in ${pipeline_py[*]}; do @@ -198,7 +198,7 @@ function func_serving(){ eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}" sleep 2s done ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 545cdbba..e182fa57 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -133,7 +133,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -164,7 +164,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done @@ -201,7 +201,7 @@ if [ ${MODE} = "whole_infer" ]; then echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" else save_infer_dir=${infer_model} fi @@ -298,7 +298,7 @@ else # run train eval $cmd eval "cat ${save_log}/train.log >> ${save_log}.log" - status_check $? "${cmd}" "${status_log}" "${model_name}" + status_check $? "${cmd}" "${status_log}" "${model_name}" "${save_log}.log" set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") @@ -309,7 +309,7 @@ else eval_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_eval.log" eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1} > ${eval_log_path} 2>&1 " eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" "${model_name}" + status_check $? "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}" fi # run export model if [ ${run_export} != "null" ]; then @@ -320,7 +320,7 @@ else set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1 " eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" "${model_name}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" #run inference eval $env -- GitLab