提交 e6cea063 编写于 作者: H HydrogenSulfate

format test_train_inference_python.sh

上级 baab3478
...@@ -98,7 +98,7 @@ LOG_PATH="./test_tipc/output/${model_name}/${MODE}" ...@@ -98,7 +98,7 @@ LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH} mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log" status_log="${LOG_PATH}/results_python.log"
function func_inference(){ function func_inference() {
IFS='|' IFS='|'
_python=$1 _python=$1
_script=$2 _script=$2
...@@ -216,12 +216,12 @@ else ...@@ -216,12 +216,12 @@ else
train_use_gpu=${USE_GPU_KEY[Count]} train_use_gpu=${USE_GPU_KEY[Count]}
Count=$(($Count + 1)) Count=$(($Count + 1))
ips="" ips=""
if [ ${gpu} = "-1" ];then if [ ${gpu} = "-1" ]; then
env="" env=""
elif [ ${#gpu} -le 1 ];then elif [ ${#gpu} -le 1 ]; then
env="export CUDA_VISIBLE_DEVICES=${gpu}" env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env} eval ${env}
elif [ ${#gpu} -le 15 ];then elif [ ${#gpu} -le 15 ]; then
IFS="," IFS=","
array=(${gpu}) array=(${gpu})
env="export CUDA_VISIBLE_DEVICES=${array[0]}" env="export CUDA_VISIBLE_DEVICES=${array[0]}"
...@@ -271,7 +271,7 @@ else ...@@ -271,7 +271,7 @@ else
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}")
if [ ${#ips} -le 15 ];then if [ ${#ips} -le 15 ]; then
# if length of ips >= 15, then it is seen as multi-machine # if length of ips >= 15, then it is seen as multi-machine
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0 # 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
...@@ -290,9 +290,9 @@ else ...@@ -290,9 +290,9 @@ else
# fi # fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}") set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu if [ ${#gpu} -le 2 ]; then # train with cpu or single gpu
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} "
elif [ ${#ips} -le 15 ];then # train with multi-gpu elif [ ${#ips} -le 15 ]; then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}"
else # train with multi-machine else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册