diff --git a/test_tipc/configs/det_mv3_db_v2.0/train_benchmark.txt b/test_tipc/configs/det_mv3_db_v2.0/train_benchmark.txt index cad16c417c48e2baa7153a4a1ab3536e877bb1f9..39b0c8345aff939c104593a1cdc9cc59436401f8 100644 --- a/test_tipc/configs/det_mv3_db_v2.0/train_benchmark.txt +++ b/test_tipc/configs/det_mv3_db_v2.0/train_benchmark.txt @@ -10,7 +10,7 @@ Train.loader.batch_size_per_card:benchmark_train=16 Global.pretrained_model:null train_model_name:latest train_infer_img_dir:null -null:null +--profiler_options:True ## trainer:norm_train norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 637964546ac762eeae35f72fca8829f4428101f9..cdd9f4b6a570b5fe95ca1fa98b4629bd79d8798f 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -5,12 +5,6 @@ FILENAME=$1 # MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer'] MODE=$2 -if [ $# -eq 3 ] ; then - extra_train_params=$3 -else - extra_train_params="" -fi - dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) # parser params @@ -336,11 +330,11 @@ else set_save_model=$(func_set_params "${save_model_key}" "${save_log}") if [ ${#gpu} -le 2 ];then # train with cpu or single gpu - cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} ${extra_train_params}" + cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} " elif [ ${#ips} -le 26 ];then # train with multi-gpu - cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} ${extra_train_params}" + cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" else # train with multi-machine - cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} ${extra_train_params}" + cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" fi # run train eval $cmd diff --git a/tools/program.py b/tools/program.py index 743ace090cc608523fc76c4e90864b60a0934e86..4c4ff627073d649d22f27a82af8f00cc0d4df446 100755 --- a/tools/program.py +++ b/tools/program.py @@ -46,8 +46,8 @@ class ArgsParser(ArgumentParser): self.add_argument( '-p', '--profiler_options', - type=str, - default=None, + type=bool, + default=False, help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' ) @@ -150,6 +150,10 @@ def train(config, print_batch_step = config['Global']['print_batch_step'] eval_batch_step = config['Global']['eval_batch_step'] profiler_options = config['profiler_options'] + if profiler_options is True: + profiler_options = "batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile" + else: + profiler_options = None global_step = 0 if 'global_step' in pre_best_model_dict: