提交 e6cea063 编写于 作者: H HydrogenSulfate

format test_train_inference_python.sh

上级 baab3478
...@@ -88,17 +88,17 @@ benchmark_value=$(func_parser_value "${lines[49]}") ...@@ -88,17 +88,17 @@ benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1=$(func_parser_key "${lines[50]}") infer_key1=$(func_parser_key "${lines[50]}")
infer_value1=$(func_parser_value "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}")
if [ ! $epoch_num ]; then if [ ! $epoch_num ]; then
epoch_num=2 epoch_num=2
fi fi
if [[ $MODE = 'benchmark_train' ]]; then if [[ $MODE = 'benchmark_train' ]]; then
epoch_num=1 epoch_num=1
fi fi
LOG_PATH="./test_tipc/output/${model_name}/${MODE}" LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log" status_log="${LOG_PATH}/results_python.log"
function func_inference(){ function func_inference() {
IFS='|' IFS='|'
_python=$1 _python=$1
_script=$2 _script=$2
...@@ -216,12 +216,12 @@ else ...@@ -216,12 +216,12 @@ else
train_use_gpu=${USE_GPU_KEY[Count]} train_use_gpu=${USE_GPU_KEY[Count]}
Count=$(($Count + 1)) Count=$(($Count + 1))
ips="" ips=""
if [ ${gpu} = "-1" ];then if [ ${gpu} = "-1" ]; then
env="" env=""
elif [ ${#gpu} -le 1 ];then elif [ ${#gpu} -le 1 ]; then
env="export CUDA_VISIBLE_DEVICES=${gpu}" env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env} eval ${env}
elif [ ${#gpu} -le 15 ];then elif [ ${#gpu} -le 15 ]; then
IFS="," IFS=","
array=(${gpu}) array=(${gpu})
env="export CUDA_VISIBLE_DEVICES=${array[0]}" env="export CUDA_VISIBLE_DEVICES=${array[0]}"
...@@ -271,7 +271,7 @@ else ...@@ -271,7 +271,7 @@ else
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}")
if [ ${#ips} -le 15 ];then if [ ${#ips} -le 15 ]; then
# if length of ips >= 15, then it is seen as multi-machine # if length of ips >= 15, then it is seen as multi-machine
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0 # 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
...@@ -290,26 +290,26 @@ else ...@@ -290,26 +290,26 @@ else
# fi # fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}") set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu if [ ${#gpu} -le 2 ]; then # train with cpu or single gpu
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} "
elif [ ${#ips} -le 15 ];then # train with multi-gpu elif [ ${#ips} -le 15 ]; then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}"
else # train with multi-machine else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}"
fi fi
# run train # run train
eval "unset CUDA_VISIBLE_DEVICES" eval "unset CUDA_VISIBLE_DEVICES"
# export FLAGS_cudnn_deterministic=True # export FLAGS_cudnn_deterministic=True
sleep 5 sleep 5
eval $cmd eval $cmd
status_check $? "${cmd}" "${status_log}" "${model_name}" status_check $? "${cmd}" "${status_log}" "${model_name}"
sleep 5 sleep 5
if [[ $FILENAME == *GeneralRecognition* ]]; then if [[ $FILENAME == *GeneralRecognition* ]]; then
set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}")
else else
set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${model_name}/${train_model_name}") set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${model_name}/${train_model_name}")
fi fi
# save norm trained models to set pretrain for pact training and fpgm training # save norm trained models to set pretrain for pact training and fpgm training
if [ ${trainer} = ${trainer_norm} ]; then if [ ${trainer} = ${trainer_norm} ]; then
load_norm_train_model=${set_eval_pretrain} load_norm_train_model=${set_eval_pretrain}
...@@ -326,11 +326,11 @@ else ...@@ -326,11 +326,11 @@ else
if [ ${run_export} != "null" ]; then if [ ${run_export} != "null" ]; then
# run export model # run export model
save_infer_path="${save_log}" save_infer_path="${save_log}"
if [[ $FILENAME == *GeneralRecognition* ]]; then if [[ $FILENAME == *GeneralRecognition* ]]; then
set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}")
else else
set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}") set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}")
fi fi
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}")
export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}"
eval $export_cmd eval $export_cmd
...@@ -339,12 +339,12 @@ else ...@@ -339,12 +339,12 @@ else
#run inference #run inference
eval $env eval $env
save_infer_path="${save_log}" save_infer_path="${save_log}"
cd deploy cd deploy
func_inference "${python}" "${inference_py}" "../${save_infer_path}" "../${LOG_PATH}" "${infer_img_dir}" "${flag_quant}" func_inference "${python}" "${inference_py}" "../${save_infer_path}" "../${LOG_PATH}" "${infer_img_dir}" "${flag_quant}"
cd .. cd ..
fi fi
eval "unset CUDA_VISIBLE_DEVICES" eval "unset CUDA_VISIBLE_DEVICES"
done # done with: for trainer in ${trainer_list[*]}; do done # done with: for trainer in ${trainer_list[*]}; do
done # done with: for autocast in ${autocast_list[*]}; do done # done with: for autocast in ${autocast_list[*]}; do
done # done with: for gpu in ${gpu_list[*]}; do done # done with: for gpu in ${gpu_list[*]}; do
fi # end if [ ${MODE} = "infer" ]; then fi # end if [ ${MODE} = "infer" ]; then
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册