提交 b5aa9bde 编写于 作者: L LDOUBLEV

delete env

上级 a4fe159b
......@@ -134,12 +134,12 @@ for train_model in ${train_model_list[*]}; do
pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained"
fi
save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}"
command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2"
${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2
command="${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2"
${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2
status_check $? "${trainer}" "${command}" "${status_log}"
command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}"
${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}
command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}"
${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}
status_check $? "${trainer}" "${command}" "${status_log}"
if [ "${model_name}" = "det" ]; then
......@@ -173,8 +173,8 @@ for train_model in ${train_model_list[*]}; do
fi
for rec_batch_size in ${rec_batch_size_list[*]}; do
save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log"
command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}"
${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}
command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}"
${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}
status_check $? "${inference}" "${command}" "${status_log}"
done
done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册