未验证 提交 ec21470a 编写于 作者: C cuicheng01 提交者: GitHub

Merge pull request #2258 from TingquanGao/dev/tipc_fix_log_path

tipc: fix log path
......@@ -77,9 +77,10 @@ function status_check(){
run_command=$2
run_log=$3
model_name=$4
log_path=$5
if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command} - ${log_path} ! \033[0m" | tee -a ${run_log}
else
echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
echo -e "\033[33m Run failed with command - ${model_name} - ${run_command} - ${log_path} ! \033[0m" | tee -a ${run_log}
fi
}
......@@ -323,8 +323,8 @@ if [[ ${MODE} = "paddle2onnx_infer" ]]; then
# prepare paddle2onnx env
python_name=$(func_parser_value "${lines[2]}")
inference_model_url=$(func_parser_value "${lines[10]}")
tar_name=${inference_model_url##*/}
tar_name=$(func_get_url_file_name "$inference_model_url")
${python_name} -m pip install onnx
${python_name} -m pip install paddle2onnx
${python_name} -m pip install onnxruntime
......
......@@ -37,7 +37,8 @@ cpp_benchmark_value=$(func_parser_value "${lines[16]}")
generate_yaml_cmd=$(func_parser_value "${lines[17]}")
transform_index_cmd=$(func_parser_value "${lines[18]}")
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_cpp.log"
# generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
......@@ -70,7 +71,7 @@ function func_shitu_cpp_inference(){
command="${_script} > ${_save_log_path} 2>&1"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -94,7 +95,7 @@ function func_shitu_cpp_inference(){
command="${_script} > ${_save_log_path} 2>&1"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -126,13 +127,12 @@ function func_cls_cpp_inference(){
precison="int8"
fi
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
command="${generate_yaml_cmd} --type cls --batch_size ${batch_size} --mkldnn ${use_mkldnn} --gpu ${use_gpu} --cpu_thread ${threads} --tensorrt False --precision ${precision} --data_dir ${_img_dir} --benchmark True --cls_model_dir ${cpp_infer_model_dir} --gpu_id ${GPUID}"
eval $command
command1="${_script} > ${_save_log_path} 2>&1"
eval ${command1}
last_status=${PIPESTATUS[0]}
status_check $last_status "${command1}" "${status_log}" "${model_name}"
status_check $last_status "${command1}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -155,7 +155,7 @@ function func_cls_cpp_inference(){
command="${_script} > ${_save_log_path} 2>&1"
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......
......@@ -42,7 +42,8 @@ infer_key1=$(func_parser_key "${lines[17]}")
infer_value1=$(func_parser_value "${lines[17]}")
LOG_PATH="./test_tipc/output"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
......@@ -71,7 +72,7 @@ if [ ${MODE} = "whole_infer" ]; then
echo $export_cmd
eval $export_cmd
status_export=$?
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" ""
else
save_infer_dir=${infer_model}
fi
......
#!/bin/bash
source test_tipc/common_func.sh
current_path=$PWD
IFS=$'\n'
......@@ -33,7 +32,8 @@ num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":")
batch_size_list=$(func_parser_value_lite "${tipc_lines[6]}" ":")
precision_list=$(func_parser_value_lite "${tipc_lines[7]}" ":")
LOG_PATH=${current_path}"/output"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
......@@ -65,9 +65,9 @@ function func_test_tipc(){
real_inference_cmd=$(echo ${inference_cmd} | awk -F " " '{print path $1" "path $2" "path $3}' path="$lite_arm_work_path")
command1="adb push ${_basic_config} ${lite_arm_work_path}"
eval ${command1}
command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1"
command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1"
eval ${command2}
status_check $? "${command2}" "${status_log}" "${model_name}"
status_check $? "${command2}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......
......@@ -2,7 +2,7 @@
source test_tipc/common_func.sh
FILENAME=$1
MODE=$2
MODE="paddle2onnx_infer"
# parser params
dataline=$(awk 'NR==1, NR==16{print}' $FILENAME)
......@@ -36,7 +36,8 @@ inference_hardware_value=$(func_parser_value "${lines[14]}")
inference_config_key=$(func_parser_key "${lines[15]}")
inference_config_value=$(func_parser_value "${lines[15]}")
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_paddle2onnx.log"
......@@ -46,27 +47,29 @@ function func_paddle2onnx(){
_script=$1
# paddle2onnx
_save_log_path=".${LOG_PATH}/paddle2onnx_infer_cpu.log"
set_dirname=$(func_set_params "${infer_model_dir_key}" "${infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_save_model=$(func_set_params "${save_file_key}" "${save_file_value}")
set_opset_version=$(func_set_params "${opset_version_key}" "${opset_version_value}")
set_enable_onnx_checker=$(func_set_params "${enable_onnx_checker_key}" "${enable_onnx_checker_value}")
trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} --enable_dev_version=False"
trans_log="${LOG_PATH}/trans_model.log"
trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker} --enable_dev_version=False > ${trans_log} 2>&1"
eval $trans_model_cmd
last_status=${PIPESTATUS[0]}
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_log}"
# python inference
if [[ ${inference_py} != "null" ]]; then
_save_log_path="${LOG_PATH}/paddle2onnx_infer_cpu.log"
set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}")
set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}")
set_hardware=$(func_set_params "${inference_hardware_key}" "${inference_hardware_value}")
set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}")
infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../"
eval $infer_model_cmd
status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
fi
}
......
......@@ -94,7 +94,8 @@ if [[ $MODE = 'benchmark_train' ]]; then
epoch_num=1
fi
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
......@@ -123,7 +124,7 @@ function func_inference() {
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "../${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
done
done
done
......@@ -145,7 +146,7 @@ function func_inference() {
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "../${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}"
done
done
done
......@@ -168,6 +169,6 @@ if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ];
ln -s __params__ inference.pdiparams
cd ../../deploy
is_quant=True
func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
cd ..
fi
......@@ -38,10 +38,10 @@ pipeline_py=$(func_parser_value "${lines[13]}")
function func_serving_cls(){
LOG_PATH="test_tipc/output/${model_name}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/serving_infer"
mkdir -p ${LOG_PATH}
LOG_PATH="../../${LOG_PATH}"
status_log="${LOG_PATH}/results_serving.log"
status_log="${LOG_PATH}/results_cpp_serving.log"
IFS='|'
# pdserving
......@@ -53,8 +53,11 @@ function func_serving_cls(){
for python_ in ${python[*]}; do
if [[ ${python_} =~ "python" ]]; then
trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
trans_log="${LOG_PATH}/cpp_trans_model.log"
trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_log} 2>&1"
eval ${trans_model_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_log}"
break
fi
done
......@@ -102,32 +105,34 @@ function func_serving_cls(){
for use_gpu in ${web_use_gpu_list[*]}; do
if [[ ${use_gpu} = "null" ]]; then
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 &"
server_log_path="${LOG_PATH}/cpp_server_cpu.log"
web_service_cpp_cmd="nohup ${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 > ${server_log_path} 2>&1 &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_batchsize_1.log"
_save_log_path="${LOG_PATH}/cpp_client_cpu.log"
pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
eval "${python_} -m paddle_serving_server.serve stop"
sleep 5s
else
web_service_cpp_cmd="${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} &"
server_log_path="${LOG_PATH}/cpp_server_gpu.log"
web_service_cpp_cmd="nohup ${python_} -m paddle_serving_server.serve --model ${serving_server_dir_name} --op GeneralClasOp --port 9292 --gpu_id=${use_gpu} > ${server_log_path} 2>&1 &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 8s
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_pipeline_batchsize_1.log"
_save_log_path="${LOG_PATH}/cpp_client_gpu.log"
pipeline_cmd="${python_} test_cpp_serving_client.py > ${_save_log_path} 2>&1 "
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
sleep 5s
eval "${python_} -m paddle_serving_server.serve stop"
fi
......@@ -136,10 +141,11 @@ function func_serving_cls(){
function func_serving_rec(){
LOG_PATH="test_tipc/output/${model_name}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/serving_infer"
mkdir -p ${LOG_PATH}
LOG_PATH="../../../${LOG_PATH}"
status_log="${LOG_PATH}/results_serving.log"
status_log="${LOG_PATH}/results_cpp_serving.log"
trans_model_py=$(func_parser_value "${lines[5]}")
cls_infer_model_dir_key=$(func_parser_key "${lines[6]}")
cls_infer_model_dir_value=$(func_parser_value "${lines[6]}")
......@@ -181,16 +187,22 @@ function func_serving_rec(){
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
trans_cls_log="${LOG_PATH}/cpp_trans_model_cls.log"
cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_cls_log} 2>&1"
eval ${cls_trans_model_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${cls_trans_model_cmd}" "${status_log}" "${model_name}" "${trans_cls_log}"
set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
trans_det_log="${LOG_PATH}/cpp_trans_model_det.log"
det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_det_log} 2>&1"
eval ${det_trans_model_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${det_trans_model_cmd}" "${status_log}" "${model_name}" "${trans_det_log}"
OLD_IFS="${IFS}"
IFS='/'
......@@ -225,32 +237,34 @@ function func_serving_rec(){
for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
server_log_path="${LOG_PATH}/cpp_server_cpu.log"
web_service_cpp_cmd="nohup ${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 > ${server_log_path} 2>&1 &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_batchsize_1.log"
_save_log_path="${LOG_PATH}/cpp_client_cpu.log"
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
eval "${python_} -m paddle_serving_server.serve stop"
sleep 5s
else
det_serving_server_dir_name=$(func_get_url_file_name "$det_serving_server_value")
server_log_path="${LOG_PATH}/cpp_server_gpu.log"
web_service_cpp_cmd="${python_interp} -m paddle_serving_server.serve --model ../../${det_serving_server_value} ../../${cls_serving_server_value} --op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=${use_gpu} &"
eval ${web_service_cpp_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cpp_cmd}" "${status_log}" "${model_name}" ${server_log_path}
sleep 5s
_save_log_path="${LOG_PATH}/server_infer_cpp_gpu_batchsize_1.log"
_save_log_path="${LOG_PATH}/cpp_client_gpu.log"
pipeline_cmd="${python_interp} ${pipeline_py} > ${_save_log_path} 2>&1 "
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check ${last_status} "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
eval "${python_} -m paddle_serving_server.serve stop"
sleep 5s
fi
......
......@@ -36,13 +36,16 @@ web_service_py=$(func_parser_value "${lines[11]}")
web_use_gpu_key=$(func_parser_key "${lines[12]}")
web_use_gpu_list=$(func_parser_value "${lines[12]}")
pipeline_py=$(func_parser_value "${lines[13]}")
use_mkldnn="False"
threads="1"
function func_serving_cls(){
LOG_PATH="test_tipc/output/${model_name}/${MODE}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
LOG_PATH="../../${LOG_PATH}"
status_log="${LOG_PATH}/results_serving.log"
IFS='|'
# pdserving
......@@ -54,8 +57,11 @@ function func_serving_cls(){
for python_ in ${python[*]}; do
if [[ ${python_} =~ "python" ]]; then
trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
trans_log="${LOG_PATH}/python_trans_model.log"
trans_model_cmd="${python_} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_log} 2>&1"
eval ${trans_model_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}" "${trans_log}"
break
fi
done
......@@ -96,19 +102,19 @@ function func_serving_cls(){
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
eval ${set_devices_cmd}
web_service_cmd="${python_} ${web_service_py} &"
server_log_path="${LOG_PATH}/python_server_cpu.log"
web_service_cmd="nohup ${python_} ${web_service_py} > ${server_log_path} 2>&1 &"
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
_save_log_path="${LOG_PATH}/python_client_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1 "
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
sleep 5s
done
eval "${python_} -m paddle_serving_server.serve stop"
......@@ -130,19 +136,19 @@ function func_serving_cls(){
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
eval ${set_devices_cmd}
web_service_cmd="${python_} ${web_service_py} & "
server_log_path="${LOG_PATH}/python_server_gpu_usetrt_${use_trt}_precision_${precision}.log"
web_service_cmd="nohup ${python_} ${web_service_py} > ${server_log_path} 2>&1 &"
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
_save_log_path="${LOG_PATH}/python_client_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
pipeline_cmd="${python_} ${pipeline} > ${_save_log_path} 2>&1"
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
sleep 5s
done
eval "${python_} -m paddle_serving_server.serve stop"
......@@ -154,10 +160,11 @@ function func_serving_cls(){
function func_serving_rec(){
LOG_PATH="test_tipc/output/${model_name}/${MODE}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
LOG_PATH="../../../${LOG_PATH}"
status_log="${LOG_PATH}/results_serving.log"
trans_model_py=$(func_parser_value "${lines[5]}")
cls_infer_model_dir_key=$(func_parser_key "${lines[6]}")
cls_infer_model_dir_value=$(func_parser_value "${lines[6]}")
......@@ -199,16 +206,22 @@ function func_serving_rec(){
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${cls_serving_server_key}" "${cls_serving_server_value}")
set_serving_client=$(func_set_params "${cls_serving_client_key}" "${cls_serving_client_value}")
cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
trans_cls_log="${LOG_PATH}/python_trans_model_cls.log"
cls_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_cls_log} 2>&1"
eval ${cls_trans_model_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${cls_trans_model_cmd}" "${status_log}" "${model_name}" "${trans_cls_log}"
set_dirname=$(func_set_params "${det_infer_model_dir_key}" "${det_infer_model_dir_value}")
set_model_filename=$(func_set_params "${model_filename_key}" "${model_filename_value}")
set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}")
set_serving_server=$(func_set_params "${det_serving_server_key}" "${det_serving_server_value}")
set_serving_client=$(func_set_params "${det_serving_client_key}" "${det_serving_client_value}")
det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
trans_det_log="${LOG_PATH}/python_trans_model_det.log"
det_trans_model_cmd="${python_interp} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client} > ${trans_det_log} 2>&1"
eval ${det_trans_model_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${det_trans_model_cmd}" "${status_log}" "${model_name}" "${trans_det_log}"
# modify the alias_name of fetch_var to "outputs"
server_fetch_var_line_cmd="sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name: \"features\"/' $cls_serving_server_value/serving_server_conf.prototxt"
......@@ -239,19 +252,19 @@ function func_serving_rec(){
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"\"/' config.yml"
eval ${set_devices_cmd}
web_service_cmd="${python} ${web_service_py} &"
server_log_path="${LOG_PATH}/python_server_cpu.log"
web_service_cmd="nohup ${python} ${web_service_py} > ${server_log_path} 2>&1 &"
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 5s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1 "
_save_log_path="${LOG_PATH}/python_client_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
sleep 5s
done
eval "${python_} -m paddle_serving_server.serve stop"
......@@ -273,19 +286,19 @@ function func_serving_rec(){
devices_line=27
set_devices_cmd="sed -i '${devices_line}s/devices: .*/devices: \"${use_gpu}\"/' config.yml"
eval ${set_devices_cmd}
web_service_cmd="${python} ${web_service_py} & "
server_log_path="${LOG_PATH}/python_server_gpu_usetrt_${use_trt}_precision_${precision}.log"
web_service_cmd="nohup ${python} ${web_service_py} > ${server_log_path} 2>&1 &"
eval ${web_service_cmd}
last_status=${PIPESTATUS[0]}
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${web_service_cmd}" "${status_log}" "${model_name}" "${server_log_path}"
sleep 10s
for pipeline in ${pipeline_py[*]}; do
_save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_batchsize_1.log"
_save_log_path="${LOG_PATH}/python_client_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log"
pipeline_cmd="${python} ${pipeline} > ${_save_log_path} 2>&1"
eval ${pipeline_cmd}
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}" "${_save_log_path}"
sleep 10s
done
eval "${python_} -m paddle_serving_server.serve stop"
......
......@@ -95,7 +95,8 @@ if [[ $MODE = 'benchmark_train' ]]; then
epoch_num=1
fi
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
CLS_ROOT_PATH=$(pwd)
LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
......@@ -107,24 +108,27 @@ function func_inference() {
_log_path=$4
_img_dir=$5
_flag_quant=$6
_gpu=$7
# inference
for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${use_mkldnn_list[*]}; do
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "../${status_log}" "${model_name}"
for precision in ${precision_list[*]}; do
_save_log_path="${_log_path}/python_infer_cpu_gpus_${_gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
done
......@@ -135,7 +139,7 @@ function func_inference() {
continue
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
_save_log_path="${_log_path}/python_infer_gpu_gpus_${_gpu}_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
......@@ -146,7 +150,7 @@ function func_inference() {
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "../${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
......@@ -161,17 +165,19 @@ if [[ ${MODE} = "whole_infer" ]]; then
# for kl_quant
if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ]; then
echo "kl_quant"
command="${python} ${kl_quant_cmd_value}"
log_path="${LOG_PATH}/export.log"
command="${python} ${kl_quant_cmd_value} > ${log_path} 2>&1"
echo ${command}
eval $command
last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" "${model_name}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${log_path}"
cd ${infer_model_dir_list}/quant_post_static_model
ln -s __model__ inference.pdmodel
ln -s __params__ inference.pdiparams
ln -s model.pdmodel inference.pdmodel
ln -s model.pdiparams inference.pdiparams
cd ../../deploy
is_quant=True
func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant}
gpu=0
func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "${LOG_PATH}" "${infer_img_dir}" "${is_quant}" "${gpu}"
cd ..
fi
else
......@@ -240,7 +246,7 @@ else
if [ ${#ips} -le 15 ]; then
# if length of ips >= 15, then it is seen as multi-machine
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_1"
nodes=1
else
IFS=","
......@@ -257,7 +263,7 @@ else
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ]; then # train with cpu or single gpu
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} "
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} "
elif [ ${#ips} -le 15 ]; then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}"
else # train with multi-machine
......@@ -268,7 +274,8 @@ else
# export FLAGS_cudnn_deterministic=True
sleep 5
eval $cmd
status_check $? "${cmd}" "${status_log}" "${model_name}"
eval "cat ${save_log}/${model_name}/train.log >> ${save_log}.log"
status_check $? "${cmd}" "${status_log}" "${model_name}" "${save_log}.log"
sleep 5
if [[ $FILENAME == *GeneralRecognition* ]]; then
......@@ -283,9 +290,10 @@ else
# run eval
if [ ${eval_py} != "null" ]; then
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}"
eval_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_eval.log"
eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1} > ${eval_log_path} 2>&1"
eval $eval_cmd
status_check $? "${eval_cmd}" "${status_log}" "${model_name}"
status_check $? "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}"
sleep 5
fi
# run export model
......@@ -298,15 +306,16 @@ else
set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}")
fi
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}")
export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}"
export_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_export.log"
export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1"
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" "${model_name}"
status_check $? "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference
# run inference
eval $env
save_infer_path="${save_log}"
cd deploy
func_inference "${python}" "${inference_py}" "../${save_infer_path}" "../${LOG_PATH}" "${infer_img_dir}" "${flag_quant}"
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" "${flag_quant}" "${gpu}"
cd ..
fi
eval "unset CUDA_VISIBLE_DEVICES"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册