未验证 提交 3e2330fb 编写于 作者: S shangliang Xu 提交者: GitHub

[TIPC] alter some params in train_benchmark txt (#6531)

上级 06c8cf7e
...@@ -23,6 +23,18 @@ function func_parser_params(){ ...@@ -23,6 +23,18 @@ function func_parser_params(){
echo ${tmp} echo ${tmp}
} }
function set_dynamic_epoch(){
string=$1
num=$2
_str=${string:1:6}
IFS="C"
arr=(${_str})
M=${arr[0]}
P=${arr[1]}
ep=`expr $num \* $P`
echo $ep
}
function func_sed_params(){ function func_sed_params(){
filename=$1 filename=$1
line=$2 line=$2
...@@ -83,9 +95,8 @@ line_num=`expr $line_num + 1` ...@@ -83,9 +95,8 @@ line_num=`expr $line_num + 1`
fp_items=$(func_parser_value "${lines[line_num]}") fp_items=$(func_parser_value "${lines[line_num]}")
line_num=`expr $line_num + 1` line_num=`expr $line_num + 1`
epoch=$(func_parser_value "${lines[line_num]}") epoch=$(func_parser_value "${lines[line_num]}")
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/coco_detection.yml" line_num=`expr $line_num + 1`
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/coco_instance.yml" repeat=$(func_parser_value "${lines[line_num]}")
eval "sed -i '10i\ repeat: ${epoch}' configs/datasets/mot.yml"
line_num=`expr $line_num + 1` line_num=`expr $line_num + 1`
profile_option_key=$(func_parser_key "${lines[line_num]}") profile_option_key=$(func_parser_key "${lines[line_num]}")
...@@ -130,7 +141,8 @@ if [ ! -n "$PARAMS" ] ;then ...@@ -130,7 +141,8 @@ if [ ! -n "$PARAMS" ] ;then
IFS="|" IFS="|"
batch_size_list=(${batch_size}) batch_size_list=(${batch_size})
fp_items_list=(${fp_items}) fp_items_list=(${fp_items})
device_num_list=(N1C4) device_num="N1C4"
device_num_list=($device_num)
run_mode="DP" run_mode="DP"
else else
# parser params from input: modeltype_bs${bs_item}_${fp_item}_${run_mode}_${device_num} # parser params from input: modeltype_bs${bs_item}_${fp_item}_${run_mode}_${device_num}
...@@ -153,6 +165,16 @@ else ...@@ -153,6 +165,16 @@ else
device_num_list=($device_num) device_num_list=($device_num)
fi fi
if [[ ${model_name} =~ "higherhrnet" ]] || [[ ${model_name} =~ "hrnet" ]] || [[ ${model_name} =~ "tinypose" ]];then
epoch=$(set_dynamic_epoch $device_num $epoch)
else
epoch=1
repeat=$(set_dynamic_epoch $device_num $repeat)
eval "sed -i '10c\ repeat: ${repeat}' configs/datasets/coco_detection.yml"
eval "sed -i '10c\ repeat: ${repeat}' configs/datasets/coco_instance.yml"
eval "sed -i '10c\ repeat: ${repeat}' configs/datasets/mot.yml"
fi
IFS="|" IFS="|"
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
for precision in ${fp_items_list[*]}; do for precision in ${fp_items_list[*]}; do
...@@ -160,7 +182,7 @@ for batch_size in ${batch_size_list[*]}; do ...@@ -160,7 +182,7 @@ for batch_size in ${batch_size_list[*]}; do
# sed batchsize and precision # sed batchsize and precision
func_sed_params "$FILENAME" "${line_precision}" "$precision" func_sed_params "$FILENAME" "${line_precision}" "$precision"
func_sed_params "$FILENAME" "${line_batchsize}" "$MODE=$batch_size" func_sed_params "$FILENAME" "${line_batchsize}" "$MODE=$batch_size"
func_sed_params "$FILENAME" "${line_epoch}" "$MODE=1" func_sed_params "$FILENAME" "${line_epoch}" "$MODE=$epoch"
gpu_id=$(set_gpu_id $device_num) gpu_id=$(set_gpu_id $device_num)
if [ ${#gpu_id} -le 1 ];then if [ ${#gpu_id} -le 1 ];then
......
...@@ -53,5 +53,6 @@ inference:./deploy/python/infer.py ...@@ -53,5 +53,6 @@ inference:./deploy/python/infer.py
batch_size:2 batch_size:2
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py ...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py
batch_size:2|8 batch_size:2|8
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py ...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py
batch_size:2|8 batch_size:2|8
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -53,5 +53,6 @@ inference:./deploy/python/infer.py ...@@ -53,5 +53,6 @@ inference:./deploy/python/infer.py
batch_size:2|8 batch_size:2|8
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -53,6 +53,7 @@ inference:./deploy/python/keypoint_infer.py ...@@ -53,6 +53,7 @@ inference:./deploy/python/keypoint_infer.py
batch_size:20|24 batch_size:20|24
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:20 epoch:20
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -53,6 +53,7 @@ null:null ...@@ -53,6 +53,7 @@ null:null
batch_size:64|160 batch_size:64|160
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:40 epoch:40
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -54,6 +54,7 @@ random_infer_input:[{float32,[3,128,96]}] ...@@ -54,6 +54,7 @@ random_infer_input:[{float32,[3,128,96]}]
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:512 batch_size:512
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:25
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -49,11 +49,5 @@ inference:./deploy/python/infer.py ...@@ -49,11 +49,5 @@ inference:./deploy/python/infer.py
--save_log_path:null --save_log_path:null
--run_benchmark:False --run_benchmark:False
--trt_max_shape:1600 --trt_max_shape:1600
===========================train_benchmark_params==========================
batch_size:2|4
fp_items:fp32|fp16
epoch:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
numpy_infer_input:3x800x1344.npy numpy_infer_input:3x800x1344.npy
\ No newline at end of file
...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py ...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py
batch_size:2|4 batch_size:2|4
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -52,7 +52,8 @@ inference:./deploy/pptracking/python/mot_jde_infer.py ...@@ -52,7 +52,8 @@ inference:./deploy/pptracking/python/mot_jde_infer.py
===========================train_benchmark_params========================== ===========================train_benchmark_params==========================
batch_size:6|22 batch_size:6|22
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:2 epoch:1
repeat:2
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -53,6 +53,7 @@ inference:./deploy/pptracking/python/mot_jde_infer.py ...@@ -53,6 +53,7 @@ inference:./deploy/pptracking/python/mot_jde_infer.py
batch_size:4|14 batch_size:4|14
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -55,5 +55,6 @@ numpy_infer_input:3x416x416_2.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x416x416_2.npy
batch_size:80 batch_size:80
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320_2.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320_2.npy
batch_size:128 batch_size:128
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320_2.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320_2.npy
batch_size:128 batch_size:128
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320.npy
batch_size:24 batch_size:24
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x608x608.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x608x608.npy
batch_size:24 batch_size:24
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x320x320.npy
batch_size:32 batch_size:32
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x640x640.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x640x640.npy
batch_size:12 batch_size:12
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -55,5 +55,6 @@ numpy_infer_input:3x640x640.npy ...@@ -55,5 +55,6 @@ numpy_infer_input:3x640x640.npy
batch_size:32 batch_size:32
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
\ No newline at end of file
...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py ...@@ -53,6 +53,7 @@ inference:./deploy/python/infer.py
batch_size:2|4 batch_size:2|4
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -53,6 +53,7 @@ null:null ...@@ -53,6 +53,7 @@ null:null
batch_size:8 batch_size:8
fp_items:fp32|fp16 fp_items:fp32|fp16
epoch:1 epoch:1
repeat:1
--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile --profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile
flags:null flags:null
===========================infer_benchmark_params=========================== ===========================infer_benchmark_params===========================
......
...@@ -83,7 +83,8 @@ elif [ ${MODE} = "cpp_infer" ];then ...@@ -83,7 +83,8 @@ elif [ ${MODE} = "cpp_infer" ];then
fi fi
cd ../../ cd ../../
elif [ ${MODE} = "benchmark_train" ];then elif [ ${MODE} = "benchmark_train" ];then
pip install -U pip Cython pip install -U pip
pip install Cython
pip install -r requirements.txt pip install -r requirements.txt
# prepare lite benchmark coco data # prepare lite benchmark coco data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar --no-check-certificate wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar --no-check-certificate
......
...@@ -262,11 +262,6 @@ else ...@@ -262,11 +262,6 @@ else
continue continue
fi fi
if [ ${autocast} = "amp" ] || [ ${autocast} = "fp16" ]; then
set_autocast="--amp"
else
set_autocast=" "
fi
set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}")
set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}")
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
...@@ -274,6 +269,12 @@ else ...@@ -274,6 +269,12 @@ else
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
if [ ${autocast} = "amp" ] || [ ${autocast} = "fp16" ]; then
set_autocast="--amp"
set_train_params1="amp_level=O2"
else
set_autocast=" "
fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}") set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
nodes="1" nodes="1"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册