From 797d5be141af2c97ccb382a2fe0964a7117c881e Mon Sep 17 00:00:00 2001 From: Birdylx <29754889+Birdylx@users.noreply.github.com> Date: Thu, 15 Sep 2022 17:24:41 +0800 Subject: [PATCH] update status check (#691) --- test_tipc/common_func.sh | 9 ++++---- test_tipc/configs/edvr/train_infer_python.txt | 2 +- test_tipc/test_train_inference_python.sh | 21 +++++++++++-------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/test_tipc/common_func.sh b/test_tipc/common_func.sh index 3f0fa66..7e349f0 100644 --- a/test_tipc/common_func.sh +++ b/test_tipc/common_func.sh @@ -23,7 +23,7 @@ function func_set_params(){ echo " " elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then echo " " - else + else echo "${key}=${value}" fi } @@ -56,10 +56,11 @@ function status_check(){ last_status=$1 # the exit code run_command=$2 run_log=$3 + model_name=$4 + log_path=$5 if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log} else - echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log} fi } - diff --git a/test_tipc/configs/edvr/train_infer_python.txt b/test_tipc/configs/edvr/train_infer_python.txt index 9aaa593..658cbf2 100644 --- a/test_tipc/configs/edvr/train_infer_python.txt +++ b/test_tipc/configs/edvr/train_infer_python.txt @@ -8,7 +8,7 @@ total_iters:lite_train_lite_infer=100 output_dir:./output/ dataset.train.batch_size:lite_train_lite_infer=4 pretrained_model:null -train_model_name:basicvsr_reds*/*checkpoint.pdparams +train_model_name:edvr_m_wo_tsa*/*checkpoint.pdparams train_infer_img_dir:./data/basicvsr_reds/test null:null ## diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 544c3f0..d10e9db 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -108,7 +108,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -138,7 +138,7 @@ function func_inference(){ eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done @@ -168,12 +168,13 @@ if [ ${MODE} = "whole_infer" ]; then save_infer_dir=$(dirname $infer_model) set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") set_save_infer_key="${save_infer_key} ${save_infer_dir}" - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + export_log_path="${LOG_PATH}_export_${Count}.log" + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1" echo ${infer_run_exports[Count]} echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" else save_infer_dir=${infer_model} fi @@ -254,7 +255,7 @@ else log_name=${train_model_name/checkpoint.pdparams/.txt} train_log_path=$( echo "${save_log}/${log_name}") eval "cat ${train_log_path} >> ${save_log}.log" - status_check $? "${cmd}" "${status_log}" + status_check $? "${cmd}" "${status_log}" "${model_name}" "${save_log}.log" set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") # save norm trained models to set pretrain for pact training and fpgm training @@ -262,9 +263,10 @@ else # run eval if [ ${eval_py} != "null" ]; then set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") - eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_eval.log" + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1} > ${eval_log_path} 2>&1" eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" + status_check $? "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}" fi # run export model if [ ${run_export} != "null" ]; then @@ -273,9 +275,10 @@ else set_export_weight="${save_log}/${train_model_name}" set_export_weight_path=$( echo ${set_export_weight}) set_save_infer_key="${save_infer_key} ${save_infer_path}" - export_cmd="${python} ${run_export} ${set_export_weight_path} ${set_save_infer_key} > ${save_log}_export.log 2>&1" + export_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_export.log" + export_cmd="${python} ${run_export} ${set_export_weight_path} ${set_save_infer_key} > ${export_log_path} 2>&1" eval "$export_cmd" - status_check $? "${export_cmd}" "${status_log}" + status_check $? "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}" #run inference eval $env -- GitLab