diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index 793b89476fb829034687b442c517546f5d8a4cfc..5c4d4112ad691569914ccf9b84480db9b76fa024 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do echo $cmd eval $cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${cmd}" "${status_log}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" else IFS=";" unset_env=`unset CUDA_VISIBLE_DEVICES` @@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do echo $cmd eval $cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${cmd}" "${status_log}" + status_check $last_status "${cmd}" "${status_log}" "${model_name}" fi done done diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 0821751c2d615418cbc71b0cc57caa5749231205..abba66503f0ceede4cc17f0d3fb93811a4f50a11 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -63,7 +63,7 @@ function func_shitu_cpp_inference(){ command="${_script} > ${_save_log_path} 2>&1" eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -87,7 +87,7 @@ function func_shitu_cpp_inference(){ command="${_script} > ${_save_log_path} 2>&1" eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -125,7 +125,7 @@ function func_cls_cpp_inference(){ command1="${_script} > ${_save_log_path} 2>&1" eval ${command1} last_status=${PIPESTATUS[0]} - status_check $last_status "${command1}" "${status_log}" + status_check $last_status "${command1}" "${status_log}" "${model_name}" done done done @@ -148,7 +148,7 @@ function func_cls_cpp_inference(){ command="${_script} > ${_save_log_path} 2>&1" eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh index 2fd76e1e9e7e8c7b52d0b6838cd15840a59fe5c4..56845003908c1a9cc8ac1b76e40ec108d33e8478 100644 --- a/test_tipc/test_inference_jeston.sh +++ b/test_tipc/test_inference_jeston.sh @@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" else save_infer_dir=${infer_model} fi diff --git a/test_tipc/test_lite_arm_cpu_cpp.sh b/test_tipc/test_lite_arm_cpu_cpp.sh index 86c340060296019d0aef798aacd95580a438e0ff..919226eea5ce38b82fad6c2130a7c6467b6ee041 100644 --- a/test_tipc/test_lite_arm_cpu_cpp.sh +++ b/test_tipc/test_lite_arm_cpu_cpp.sh @@ -67,7 +67,7 @@ function func_test_tipc(){ eval ${command1} command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1" eval ${command2} - status_check $? "${command2}" "${status_log}" + status_check $? "${command2}" "${status_log}" "${model_name}" done done done diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index c869f8f3bf9df900d779dcf98355ca56eeece207..45eb9387922aa0e3a4de82d6f6245d178c8ec6fc 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -55,7 +55,7 @@ function func_paddle2onnx(){ trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}" eval $trans_model_cmd last_status=${PIPESTATUS[0]} - status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${model_name}" # python inference set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}") @@ -64,7 +64,7 @@ function func_paddle2onnx(){ set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}") infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../" eval $infer_model_cmd - status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" + status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${model_name}" } diff --git a/test_tipc/test_serving_infer.sh b/test_tipc/test_serving_infer.sh index 3eef6b1c7a18cad56561ea4e352ca624aedc2dc6..2c677be8bdbc4c2720baa2a3dd0589030c5e4fec 100644 --- a/test_tipc/test_serving_infer.sh +++ b/test_tipc/test_serving_infer.sh @@ -88,7 +88,7 @@ function func_serving_cls(){ _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" eval $pipeline_cmd - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 else @@ -98,7 +98,7 @@ function func_serving_cls(){ _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" eval $pipeline_cmd - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 fi @@ -124,7 +124,7 @@ function func_serving_cls(){ eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s done ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 @@ -156,7 +156,7 @@ function func_serving_cls(){ eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s done ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 @@ -250,7 +250,7 @@ function func_serving_rec(){ _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" eval $pipeline_cmd - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 else @@ -260,7 +260,7 @@ function func_serving_rec(){ _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" eval $pipeline_cmd - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 fi @@ -286,7 +286,7 @@ function func_serving_rec(){ eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 5s done ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 @@ -318,7 +318,7 @@ function func_serving_rec(){ eval $pipeline_cmd last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" + status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" sleep 10s done ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 2f2b3b65a7a1b3756eca6caf49bca9f284e5e574..427005cf0601e192d01264e39ed82ed77ab57d0d 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -126,7 +126,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" + status_check $last_status "${command}" "../${status_log}" "${model_name}" done done done @@ -151,7 +151,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" + status_check $last_status "${command}" "../${status_log}" "${model_name}" done done done @@ -198,7 +198,7 @@ elif [[ ${MODE} = "klquant_whole_infer" ]]; then command="${python} ${kl_quant_cmd_value}" eval $command last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" cd inference/quant_post_static_model ln -s __model__ inference.pdmodel ln -s __params__ inference.pdiparams @@ -301,7 +301,7 @@ else # export FLAGS_cudnn_deterministic=True sleep 5 eval $cmd - status_check $? "${cmd}" "${status_log}" + status_check $? "${cmd}" "${status_log}" "${model_name}" sleep 5 if [[ $FILENAME == *GeneralRecognition* ]]; then @@ -318,7 +318,7 @@ else set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" + status_check $? "${eval_cmd}" "${status_log}" "${model_name}" sleep 5 fi # run export model @@ -333,7 +333,7 @@ else set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" #run inference eval $env