未验证 提交 25e858cb 编写于 作者: Z zhengya01 提交者: GitHub

add log_path in result.log (#6668)

上级 28199de7
......@@ -86,7 +86,7 @@ function func_cpp_inference(){
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -112,7 +112,7 @@ function func_cpp_inference(){
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
else
......@@ -209,7 +209,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi
#run inference
......
......@@ -81,7 +81,7 @@ function func_paddle2onnx_inference(){
eval "${trans_model_cmd} > ${trans_log_path} 2>&1"
last_status=${PIPESTATUS[0]}
cat ${trans_log_path}
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_log_path}"
# python inference
echo "################### run onnx infer ###################"
......@@ -94,7 +94,7 @@ function func_paddle2onnx_inference(){
eval "${infer_model_cmd} > ${_save_log_path} 2>&1"
last_status=${PIPESTATUS[0]}
cat ${_save_log_path}
status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
}
export Count=0
......@@ -120,7 +120,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi
#run inference
......
......@@ -72,7 +72,7 @@ function func_ptq_inference(){
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -87,7 +87,7 @@ function func_ptq_inference(){
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
fi
done
......@@ -108,7 +108,7 @@ echo $ptq_cmd
eval "${ptq_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}"
status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference
set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}")
......
......@@ -80,14 +80,14 @@ function func_serving_inference(){
eval $web_service_cmd
last_status=${PIPESTATUS[0]}
cat ${server_log_path}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
# run http client
http_client_cmd="${_python} ${http_client_py} ${_set_client_model_dir} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1"
eval $http_client_cmd
last_status=${PIPESTATUS[0]}
cat ${client_log_path}
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}"
ps ux | grep -i ${port_value} | awk '{print $2}' | xargs kill -s 9
sleep 2s
done
......@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi
#run inference
......
......@@ -71,14 +71,14 @@ function func_serving_inference(){
eval $web_service_cmd
last_status=${PIPESTATUS[0]}
cat ${server_log_path}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
# run http client
http_client_cmd="${_python} ${_client_script} ${_set_image_file} ${set_http_client_params1} > ${client_log_path} 2>&1"
eval $http_client_cmd
last_status=${PIPESTATUS[0]}
cat ${client_log_path}
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${http_client_cmd}" "${status_log}" "${model_name}" "${client_log_path}"
ps ux | grep -E 'web_service' | awk '{print $2}' | xargs kill -s 9
sleep 2s
done
......@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval "${export_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
fi
#run inference
......
......@@ -105,6 +105,7 @@ function func_inference(){
_log_path=$4
_img_dir=$5
_flag_quant=$6
_gpu=$7
# inference
for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
......@@ -114,7 +115,7 @@ function func_inference(){
fi
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log"
_save_log_path="${_log_path}/python_infer_cpu_gpus_${gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
......@@ -125,7 +126,7 @@ function func_inference(){
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -140,7 +141,7 @@ function func_inference(){
fi
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_mode_${precision}_batchsize_${batch_size}.log"
_save_log_path="${_log_path}/python_infer_gpu_gpus_${gpu}_mode_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
......@@ -151,7 +152,7 @@ function func_inference(){
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
else
......@@ -171,6 +172,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
eval $env
Count=0
gpu=0
IFS="|"
infer_quant_flag=(${infer_is_quant_list})
for infer_mode in ${infer_mode_list[*]}; do
......@@ -198,12 +200,12 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" "${model_name}"
status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference
save_export_model_dir="${save_export_value}/${model_name}"
is_quant=${infer_quant_flag[Count]}
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} "{gpu}"
Count=$((${Count} + 1))
done
else
......@@ -304,7 +306,7 @@ else
eval "${cmd} > ${train_log_path} 2>&1"
last_status=$?
cat ${train_log_path}
status_check $last_status "${cmd}" "${status_log}" "${model_name}"
status_check $last_status "${cmd}" "${status_log}" "${model_name}" "${train_log_path}"
set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}")
# run eval
......@@ -315,7 +317,7 @@ else
eval "${eval_cmd} > ${eval_log_path} 2>&1"
last_status=$?
cat ${eval_log_path}
status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}"
fi
# run export model
if [ ${run_export} != "null" ]; then
......@@ -336,7 +338,7 @@ else
eval "${export_cmd} > ${export_log_path} 2>&1"
last_status=$?
cat ${export_log_path}
status_check $last_status "${export_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference
if [ ${export_onnx_key} != "export_onnx" ]; then
......@@ -344,7 +346,7 @@ else
eval "cp ${save_export_model_dir}/* ${save_log}/"
fi
eval $env
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}"
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" "{gpu}"
eval "unset CUDA_VISIBLE_DEVICES"
fi
......
......@@ -51,9 +51,10 @@ function status_check(){
run_command=$2
run_log=$3
model_name=$4
log_path=$5
if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log}
else
echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
echo -e "\033[33m Run failed with command - ${model_name} - ${run_command} - ${log_path} \033[0m" | tee -a ${run_log}
fi
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册