diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 78fc55b603ab6e775514c4ff3b8007df3c01f5f7..c56bc69602819cbbe3e7eeced13841cec0156085 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -37,7 +37,8 @@ cpp_benchmark_value=$(func_parser_value "${lines[16]}") generate_yaml_cmd=$(func_parser_value "${lines[17]}") transform_index_cmd=$(func_parser_value "${lines[18]}") -LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +CLS_ROOT_PATH=$(pwd) +LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_cpp.log" # generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py" diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh index 56845003908c1a9cc8ac1b76e40ec108d33e8478..7fc8adf5b772264237bbd0070b7d98d1aee13027 100644 --- a/test_tipc/test_inference_jeston.sh +++ b/test_tipc/test_inference_jeston.sh @@ -42,7 +42,8 @@ infer_key1=$(func_parser_key "${lines[17]}") infer_value1=$(func_parser_value "${lines[17]}") -LOG_PATH="./test_tipc/output" +CLS_ROOT_PATH=$(pwd) +LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -71,7 +72,7 @@ if [ ${MODE} = "whole_infer" ]; then echo $export_cmd eval $export_cmd status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" + status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "" else save_infer_dir=${infer_model} fi diff --git a/test_tipc/test_lite_arm_cpu_cpp.sh b/test_tipc/test_lite_arm_cpu_cpp.sh index 919226eea5ce38b82fad6c2130a7c6467b6ee041..07fcbe209de9d8eb88580130e4077e8c0fa6063d 100644 --- a/test_tipc/test_lite_arm_cpu_cpp.sh +++ b/test_tipc/test_lite_arm_cpu_cpp.sh @@ -1,6 +1,5 @@ #!/bin/bash source test_tipc/common_func.sh -current_path=$PWD IFS=$'\n' @@ -33,7 +32,8 @@ num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":") batch_size_list=$(func_parser_value_lite "${tipc_lines[6]}" ":") precision_list=$(func_parser_value_lite "${tipc_lines[7]}" ":") -LOG_PATH=${current_path}"/output" +CLS_ROOT_PATH=$(pwd) +LOG_PATH="${CLS_ROOT_PATH}/output" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results.log" @@ -65,9 +65,9 @@ function func_test_tipc(){ real_inference_cmd=$(echo ${inference_cmd} | awk -F " " '{print path $1" "path $2" "path $3}' path="$lite_arm_work_path") command1="adb push ${_basic_config} ${lite_arm_work_path}" eval ${command1} - command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1" + command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1" eval ${command2} - status_check $? "${command2}" "${status_log}" "${model_name}" + status_check $? "${command2}" "${status_log}" "${model_name}" "${_save_log_path}" done done done diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index bea29db9d461483d7598e28080e9425dcbb36c97..05046d157846cc4f899c6e229c1e266de999ca1b 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -36,7 +36,8 @@ inference_hardware_value=$(func_parser_value "${lines[14]}") inference_config_key=$(func_parser_key "${lines[15]}") inference_config_value=$(func_parser_value "${lines[15]}") -LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +CLS_ROOT_PATH=$(pwd) +LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_paddle2onnx.log" diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh index 82c9816478f9ea993b2e53f8a685766e8dbf81d7..e6801f640b07df5f4694ef943404974f4fbf07fc 100644 --- a/test_tipc/test_ptq_inference_python.sh +++ b/test_tipc/test_ptq_inference_python.sh @@ -94,7 +94,8 @@ if [[ $MODE = 'benchmark_train' ]]; then epoch_num=1 fi -LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +CLS_ROOT_PATH=$(pwd) +LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -123,7 +124,7 @@ function func_inference() { eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -145,7 +146,7 @@ function func_inference() { eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" "${model_name}" + status_check $last_status "${command}" "${status_log}" "${model_name}" done done done @@ -168,6 +169,6 @@ if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ]; ln -s __params__ inference.pdiparams cd ../../deploy is_quant=True - func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} + func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} cd .. fi diff --git a/test_tipc/test_serving_infer_cpp.sh b/test_tipc/test_serving_infer_cpp.sh index 50e8d9ad80105d5ee9a2e76a2f20fd7d252dbe29..a56442f09e4ae34c49856dff77d8d129990e3264 100644 --- a/test_tipc/test_serving_infer_cpp.sh +++ b/test_tipc/test_serving_infer_cpp.sh @@ -38,9 +38,9 @@ pipeline_py=$(func_parser_value "${lines[13]}") function func_serving_cls(){ - LOG_PATH="test_tipc/output/${model_name}/${MODE}/cpp" + CLS_ROOT_PATH=$(pwd) + LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}/cpp" mkdir -p ${LOG_PATH} - LOG_PATH="../../${LOG_PATH}" status_log="${LOG_PATH}/results_cpp_serving.log" IFS='|' @@ -141,10 +141,11 @@ function func_serving_cls(){ function func_serving_rec(){ - LOG_PATH="test_tipc/output/${model_name}" + CLS_ROOT_PATH=$(pwd) + LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}" mkdir -p ${LOG_PATH} - LOG_PATH="../../../${LOG_PATH}" status_log="${LOG_PATH}/results_cpp_serving.log" + trans_model_py=$(func_parser_value "${lines[5]}") cls_infer_model_dir_key=$(func_parser_key "${lines[6]}") cls_infer_model_dir_value=$(func_parser_value "${lines[6]}") diff --git a/test_tipc/test_serving_infer_python.sh b/test_tipc/test_serving_infer_python.sh index 8af1bd3ccdc25f759a2a2bf251d7f0d938f587e6..b136bc493f4349470f3f5881e24dabc24b47a417 100644 --- a/test_tipc/test_serving_infer_python.sh +++ b/test_tipc/test_serving_infer_python.sh @@ -41,10 +41,11 @@ threads="1" function func_serving_cls(){ - LOG_PATH="test_tipc/output/${model_name}/${MODE}" + CLS_ROOT_PATH=$(pwd) + LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} - LOG_PATH="../../${LOG_PATH}" status_log="${LOG_PATH}/results_serving.log" + IFS='|' # pdserving @@ -159,10 +160,11 @@ function func_serving_cls(){ function func_serving_rec(){ - LOG_PATH="test_tipc/output/${model_name}/${MODE}" + CLS_ROOT_PATH=$(pwd) + LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} - LOG_PATH="../../../${LOG_PATH}" status_log="${LOG_PATH}/results_serving.log" + trans_model_py=$(func_parser_value "${lines[5]}") cls_infer_model_dir_key=$(func_parser_key "${lines[6]}") cls_infer_model_dir_value=$(func_parser_value "${lines[6]}") diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 7f4a96d9de8e32a1f3a5502580388d687565f4ad..ba2f26db214b1849b60d9b72e090eb0239c7ac29 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -95,7 +95,8 @@ if [[ $MODE = 'benchmark_train' ]]; then epoch_num=1 fi -LOG_PATH="./test_tipc/output/${model_name}/${MODE}" +CLS_ROOT_PATH=$(pwd) +LOG_PATH="${CLS_ROOT_PATH}/test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -126,7 +127,7 @@ function func_inference() { eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" "${model_name}" "${_save_log_path}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -149,7 +150,7 @@ function func_inference() { eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" "${model_name}" "${_save_log_path}" + status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}" done done done @@ -171,12 +172,12 @@ if [[ ${MODE} = "whole_infer" ]]; then last_status=${PIPESTATUS[0]} status_check $last_status "${command}" "${status_log}" "${model_name}" "${log_path}" cd ${infer_model_dir_list}/quant_post_static_model - ln -s __model__ inference.pdmodel - ln -s __params__ inference.pdiparams + ln -s model.pdmodel inference.pdmodel + ln -s model.pdiparams inference.pdiparams cd ../../deploy is_quant=True gpu=0 - func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" "${is_quant}" "${gpu}" + func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "${LOG_PATH}" "${infer_img_dir}" "${is_quant}" "${gpu}" cd .. fi else @@ -262,7 +263,7 @@ else set_save_model=$(func_set_params "${save_model_key}" "${save_log}") if [ ${#gpu} -le 2 ]; then # train with cpu or single gpu - cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " + cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " elif [ ${#ips} -le 15 ]; then # train with multi-gpu cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" else # train with multi-machine @@ -273,8 +274,8 @@ else # export FLAGS_cudnn_deterministic=True sleep 5 eval $cmd - eval "cat ${save_log}/train.log >> ${save_log}.log" - status_check $? "${cmd}" "${status_log}" "${model_name}" "${save_log}.log"" + eval "cat ${save_log}/${model_name}/train.log >> ${save_log}.log" + status_check $? "${cmd}" "${status_log}" "${model_name}" "${save_log}.log" sleep 5 if [[ $FILENAME == *GeneralRecognition* ]]; then @@ -314,7 +315,7 @@ else eval $env save_infer_path="${save_log}" cd deploy - func_inference "${python}" "${inference_py}" "../${save_infer_path}" "../${LOG_PATH}" "${infer_img_dir}" "${flag_quant}" "${gpu}" + func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" "${flag_quant}" "${gpu}" cd .. fi eval "unset CUDA_VISIBLE_DEVICES"