From 6ecaaba9fb8b4078b185d0f68221d5db967cfc2e Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Thu, 23 Jun 2022 07:51:12 +0000 Subject: [PATCH] delete useless script --- test_tipc/test_ptq_inference_python.sh | 180 ------------------------- 1 file changed, 180 deletions(-) delete mode 100644 test_tipc/test_ptq_inference_python.sh diff --git a/test_tipc/test_ptq_inference_python.sh b/test_tipc/test_ptq_inference_python.sh deleted file mode 100644 index 6d4f2065..00000000 --- a/test_tipc/test_ptq_inference_python.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/bash -FILENAME=$1 -source test_tipc/common_func.sh - -# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer'] -MODE=$2 - -dataline=$(cat ${FILENAME}) - -# parser params -IFS=$'\n' -lines=(${dataline}) - -# The training params -model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") -gpu_list=$(func_parser_value "${lines[3]}") -train_use_gpu_key=$(func_parser_key "${lines[4]}") -train_use_gpu_value=$(func_parser_value "${lines[4]}") -autocast_list=$(func_parser_value "${lines[5]}") -autocast_key=$(func_parser_key "${lines[5]}") -epoch_key=$(func_parser_key "${lines[6]}") -epoch_num=$(func_parser_params "${lines[6]}") -save_model_key=$(func_parser_key "${lines[7]}") -train_batch_key=$(func_parser_key "${lines[8]}") -train_batch_value=$(func_parser_value "${lines[8]}") -pretrain_model_key=$(func_parser_key "${lines[9]}") -pretrain_model_value=$(func_parser_value "${lines[9]}") -train_model_name=$(func_parser_value "${lines[10]}") -train_infer_img_dir=$(func_parser_value "${lines[11]}") -train_param_key1=$(func_parser_key "${lines[12]}") -train_param_value1=$(func_parser_value "${lines[12]}") - -trainer_list=$(func_parser_value "${lines[14]}") - -trainer_norm=$(func_parser_key "${lines[15]}") -norm_trainer=$(func_parser_value "${lines[15]}") -pact_key=$(func_parser_key "${lines[16]}") -pact_trainer=$(func_parser_value "${lines[16]}") -fpgm_key=$(func_parser_key "${lines[17]}") -fpgm_trainer=$(func_parser_value "${lines[17]}") -distill_key=$(func_parser_key "${lines[18]}") -distill_trainer=$(func_parser_value "${lines[18]}") -to_static_key=$(func_parser_key "${lines[19]}") -to_static_trainer=$(func_parser_value "${lines[19]}") -trainer_key2=$(func_parser_key "${lines[20]}") -trainer_value2=$(func_parser_value "${lines[20]}") - -eval_py=$(func_parser_value "${lines[23]}") -eval_key1=$(func_parser_key "${lines[24]}") -eval_value1=$(func_parser_value "${lines[24]}") - -save_infer_key=$(func_parser_key "${lines[27]}") -export_weight=$(func_parser_key "${lines[28]}") -norm_export=$(func_parser_value "${lines[29]}") -pact_export=$(func_parser_value "${lines[30]}") -fpgm_export=$(func_parser_value "${lines[31]}") -distill_export=$(func_parser_value "${lines[32]}") -kl_quant_cmd_key=$(func_parser_key "${lines[33]}") -kl_quant_cmd_value=$(func_parser_value "${lines[33]}") -export_key2=$(func_parser_key "${lines[34]}") -export_value2=$(func_parser_value "${lines[34]}") - -# parser inference model -infer_model_dir_list=$(func_parser_value "${lines[36]}") -infer_export_flag=$(func_parser_value "${lines[37]}") -infer_is_quant=$(func_parser_value "${lines[38]}") - -# parser inference -inference_py=$(func_parser_value "${lines[39]}") -use_gpu_key=$(func_parser_key "${lines[40]}") -use_gpu_list=$(func_parser_value "${lines[40]}") -use_mkldnn_key=$(func_parser_key "${lines[41]}") -use_mkldnn_list=$(func_parser_value "${lines[41]}") -cpu_threads_key=$(func_parser_key "${lines[42]}") -cpu_threads_list=$(func_parser_value "${lines[42]}") -batch_size_key=$(func_parser_key "${lines[43]}") -batch_size_list=$(func_parser_value "${lines[43]}") -use_trt_key=$(func_parser_key "${lines[44]}") -use_trt_list=$(func_parser_value "${lines[44]}") -precision_key=$(func_parser_key "${lines[45]}") -precision_list=$(func_parser_value "${lines[45]}") -infer_model_key=$(func_parser_key "${lines[46]}") -image_dir_key=$(func_parser_key "${lines[47]}") -infer_img_dir=$(func_parser_value "${lines[47]}") -save_log_key=$(func_parser_key "${lines[48]}") -benchmark_key=$(func_parser_key "${lines[49]}") -benchmark_value=$(func_parser_value "${lines[49]}") -infer_key1=$(func_parser_key "${lines[50]}") -infer_value1=$(func_parser_value "${lines[50]}") -if [ ! $epoch_num ]; then - epoch_num=2 -fi -if [[ $MODE = 'benchmark_train' ]]; then - epoch_num=1 -fi - -LOG_PATH="./test_tipc/output/${model_name}/${MODE}" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results_python.log" - -function func_inference() { - IFS='|' - _python=$1 - _script=$2 - _model_dir=$3 - _log_path=$4 - _img_dir=$5 - _flag_quant=$6 - # inference - for use_gpu in ${use_gpu_list[*]}; do - if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then - for use_mkldnn in ${use_mkldnn_list[*]}; do - for threads in ${cpu_threads_list[*]}; do - for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" - set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") - set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") - set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") - set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") - set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") - set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " - eval $command - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" "${model_name}" - done - done - done - elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then - for use_trt in ${use_trt_list[*]}; do - for precision in ${precision_list[*]}; do - if [ ${precision} = "True" ] && [ ${use_trt} = "False" ]; then - continue - fi - # if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then - # continue - # fi - for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" - set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") - set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") - set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") - set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") - set_precision=$(func_set_params "${precision_key}" "${precision}") - set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path} 2>&1 " - eval $command - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${command}" "../${status_log}" "${model_name}" - done - done - done - else - echo "Does not support hardware other than CPU and GPU Currently!" - fi - done -} - -if [[ ${MODE} = "whole_infer" ]]; then - GPUID=$3 - if [ ${#GPUID} -le 0 ]; then - env="export CUDA_VISIBLE_DEVICES=0" - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - # set CUDA_VISIBLE_DEVICES - eval $env - export Count=0 - cd deploy - for infer_model in ${infer_model_dir_list[*]}; do - #run inference - is_quant=${infer_quant_flag[Count]} - echo "is_quant: ${is_quant}" - func_inference "${python}" "${inference_py}" "${infer_model}" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} - Count=$(($Count + 1)) - done - cd .. -- GitLab