未验证 提交 25e858cb 编写于 作者: Z zhengya01 提交者: GitHub

add log_path in result.log (#6668)

上级 28199de7
...@@ -86,7 +86,7 @@ function func_cpp_inference(){ ...@@ -86,7 +86,7 @@ function func_cpp_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done done
done done
done done
...@@ -112,7 +112,7 @@ function func_cpp_inference(){ ...@@ -112,7 +112,7 @@ function func_cpp_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done done
done done
else else
...@@ -209,7 +209,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do ...@@ -209,7 +209,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1" eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$? status_export=$?
cat ${export_log_path} cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi fi
#run inference #run inference
......
...@@ -81,7 +81,7 @@ function func_paddle2onnx_inference(){ ...@@ -81,7 +81,7 @@ function func_paddle2onnx_inference(){
eval "${trans_model_cmd} > ${trans_log_path} 2>&1" eval "${trans_model_cmd} > ${trans_log_path} 2>&1"
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
cat ${trans_log_path} cat ${trans_log_path}
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_log_path}"
# python inference # python inference
echo "################### run onnx infer ###################" echo "################### run onnx infer ###################"
...@@ -94,7 +94,7 @@ function func_paddle2onnx_inference(){ ...@@ -94,7 +94,7 @@ function func_paddle2onnx_inference(){
eval "${infer_model_cmd} > ${_save_log_path} 2>&1" eval "${infer_model_cmd} > ${_save_log_path} 2>&1"
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
cat ${_save_log_path} cat ${_save_log_path}
status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
} }
export Count=0 export Count=0
...@@ -120,7 +120,7 @@ for infer_mode in ${infer_mode_list[*]}; do ...@@ -120,7 +120,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1" eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$? status_export=$?
cat ${export_log_path} cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi fi
#run inference #run inference
......
...@@ -72,7 +72,7 @@ function func_ptq_inference(){ ...@@ -72,7 +72,7 @@ function func_ptq_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done done
done done
done done
...@@ -87,7 +87,7 @@ function func_ptq_inference(){ ...@@ -87,7 +87,7 @@ function func_ptq_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done done
fi fi
done done
...@@ -108,7 +108,7 @@ echo $ptq_cmd ...@@ -108,7 +108,7 @@ echo $ptq_cmd
eval "${ptq_cmd} > ${export_log_path} 2>&1" eval "${ptq_cmd} > ${export_log_path} 2>&1"
status_export=$? status_export=$?
cat ${export_log_path} cat ${export_log_path}
status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference #run inference
set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}") set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}")
......
...@@ -80,14 +80,14 @@ function func_serving_inference(){ ...@@ -80,14 +80,14 @@ function func_serving_inference(){
eval $web_service_cmd eval $web_service_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
cat ${server_log_path} cat ${server_log_path}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s sleep 5s
# run http client # run http client
http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1" http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1"
eval $http_client_cmd eval $http_client_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
cat ${client_log_path} cat ${client_log_path}
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}"
ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9 ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9
sleep 2s sleep 2s
done done
...@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do ...@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1" eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$? status_export=$?
cat ${export_log_path} cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi fi
#run inference #run inference
......
...@@ -71,14 +71,14 @@ function func_serving_inference(){ ...@@ -71,14 +71,14 @@ function func_serving_inference(){
eval $web_service_cmd eval $web_service_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
cat ${server_log_path} cat ${server_log_path}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s sleep 5s
# run http client # run http client
http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1" http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1"
eval $http_client_cmd eval $http_client_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
cat ${client_log_path} cat ${client_log_path}
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}"
ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9
sleep 2s sleep 2s
done done
...@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do ...@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1" eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$? status_export=$?
cat ${export_log_path} cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi fi
#run inference #run inference
......
...@@ -105,6 +105,7 @@ function func_inference(){ ...@@ -105,6 +105,7 @@ function func_inference(){
_log_path=$4 _log_path=$4
_img_dir=$5 _img_dir=$5
_flag_quant=$6 _flag_quant=$6
_gpu=$7
# inference # inference
for use_gpu in ${use_gpu_list[*]}; do for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
...@@ -114,7 +115,7 @@ function func_inference(){ ...@@ -114,7 +115,7 @@ function func_inference(){
fi fi
for threads in ${cpu_threads_list[*]}; do for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log" _save_log_path="${_log_path}/python_infer_cpu_gpus_${gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
...@@ -125,7 +126,7 @@ function func_inference(){ ...@@ -125,7 +126,7 @@ function func_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done done
done done
done done
...@@ -140,7 +141,7 @@ function func_inference(){ ...@@ -140,7 +141,7 @@ function func_inference(){
fi fi
fi fi
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_mode_${precision}_batchsize_${batch_size}.log" _save_log_path="${_log_path}/python_infer_gpu_gpus_${gpu}_mode_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
...@@ -151,7 +152,7 @@ function func_inference(){ ...@@ -151,7 +152,7 @@ function func_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done done
done done
else else
...@@ -171,6 +172,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then ...@@ -171,6 +172,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
eval $env eval $env
Count=0 Count=0
gpu=0
IFS="|" IFS="|"
infer_quant_flag=(${infer_is_quant_list}) infer_quant_flag=(${infer_is_quant_list})
for infer_mode in ${infer_mode_list[*]}; do for infer_mode in ${infer_mode_list[*]}; do
...@@ -198,12 +200,12 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then ...@@ -198,12 +200,12 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} " export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd echo $export_cmd
eval $export_cmd eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" "${model_name}" status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference #run inference
save_export_model_dir="${save_export_value}/${model_name}" save_export_model_dir="${save_export_value}/${model_name}"
is_quant=${infer_quant_flag[Count]} is_quant=${infer_quant_flag[Count]}
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} "{gpu}"
Count=$((${Count} + 1)) Count=$((${Count} + 1))
done done
else else
...@@ -304,7 +306,7 @@ else ...@@ -304,7 +306,7 @@ else
eval "${cmd} > ${train_log_path} 2>&1" eval "${cmd} > ${train_log_path} 2>&1"
last_status=$? last_status=$?
cat ${train_log_path} cat ${train_log_path}
status_check $last_status "${cmd}" "${status_log}" "${model_name}" status_check $last_status "${cmd}" "${status_log}" "${model_name}" "${train_log_path}"
set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}") set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}")
# run eval # run eval
...@@ -315,7 +317,7 @@ else ...@@ -315,7 +317,7 @@ else
eval "${eval_cmd} > ${eval_log_path} 2>&1" eval "${eval_cmd} > ${eval_log_path} 2>&1"
last_status=$? last_status=$?
cat ${eval_log_path} cat ${eval_log_path}
status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}" status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}"
fi fi
# run export model # run export model
if [ ${run_export} != "null" ]; then if [ ${run_export} != "null" ]; then
...@@ -336,7 +338,7 @@ else ...@@ -336,7 +338,7 @@ else
eval "${export_cmd} > ${export_log_path} 2>&1" eval "${export_cmd} > ${export_log_path} 2>&1"
last_status=$? last_status=$?
cat ${export_log_path} cat ${export_log_path}
status_check $last_status "${export_cmd}" "${status_log}" "${model_name}" status_check $last_status "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference #run inference
if [ ${export_onnx_key} != "export_onnx" ]; then if [ ${export_onnx_key} != "export_onnx" ]; then
...@@ -344,7 +346,7 @@ else ...@@ -344,7 +346,7 @@ else
eval "cp ${save_export_model_dir}/* ${save_log}/" eval "cp ${save_export_model_dir}/* ${save_log}/"
fi fi
eval $env eval $env
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" "{gpu}"
eval "unset CUDA_VISIBLE_DEVICES" eval "unset CUDA_VISIBLE_DEVICES"
fi fi
......
...@@ -51,9 +51,10 @@ function status_check(){ ...@@ -51,9 +51,10 @@ function status_check(){
run_command=$2 run_command=$2
run_log=$3 run_log=$3
model_name=$4 model_name=$4
log_path=$5
if [ $last_status -eq 0 ]; then if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log}
else else
echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log} echo -e "\033[33m Run failed with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log}
fi fi
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册