未验证 提交 df522565 编写于 作者: S shangliang Xu 提交者: GitHub

[ce tests] Add trt mode and amp in ce test (#4174)

* [ce tests] add trt_mode in ppyolo

* [ce tests] set amp in tests.sh
上级 bcf18bec
......@@ -3,7 +3,7 @@ model_name:ppyolo_mbv3_large_coco
python:python3.7
gpu_list:0
use_gpu:True
auto_cast:null
auto_cast:null|amp
epoch:lite_train_infer=1|whole_train_infer=405
save_dir:./output/
TrainReader.batch_size:lite_train_infer=2|whole_train_infer=24
......@@ -43,7 +43,7 @@ inference:deploy/python/infer.py
--cpu_threads:1|4
--batch_size:1|2
--use_tensorrt:null
--run_mode:fluid
--run_mode:fluid|trt_fp32|trt_fp16|trt_int8
--model_dir:
--image_dir:./dataset/coco/val2017/
--save_log_path:null
......
......@@ -3,7 +3,7 @@ model_name:ppyolo_r50vd_dcn_1x_coco
python:python3.7
gpu_list:0
use_gpu:True
auto_cast:null
auto_cast:null|amp
epoch:lite_train_infer=1|whole_train_infer=405
save_dir:./output/
TrainReader.batch_size:lite_train_infer=2|whole_train_infer=24
......@@ -43,7 +43,7 @@ inference:deploy/python/infer.py
--cpu_threads:1|4
--batch_size:1|2
--use_tensorrt:null
--run_mode:fluid
--run_mode:fluid|trt_fp32|trt_fp16|trt_int8
--model_dir:
--image_dir:./dataset/coco/val2017/
--save_log_path:null
......
......@@ -3,7 +3,7 @@ model_name:ppyolov2_r50vd_dcn_365e_coco
python:python3.7
gpu_list:0
use_gpu:True
auto_cast:null
auto_cast:null|amp
epoch:lite_train_infer=1|whole_train_infer=365
save_dir:./output/
TrainReader.batch_size:lite_train_infer=2|whole_train_infer=12
......
......@@ -3,7 +3,7 @@ model_name:yolov3_darknet53_270e_coco
python:python3.7
gpu_list:0
use_gpu:True
auto_cast:null
auto_cast:null|amp
epoch:lite_train_infer=1|whole_train_infer=270
save_dir:./output/
TrainReader.batch_size:lite_train_infer=2|whole_train_infer=8
......
......@@ -304,7 +304,11 @@ else
continue
fi
if [ ${autocast} = "amp" ]; then
set_autocast="--fp16"
else
set_autocast=$(func_set_params "${autocast_key}" "${autocast}")
fi
set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}")
set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}")
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
......@@ -319,11 +323,11 @@ else
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} "
cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_train_params1} ${set_autocast} "
elif [ ${#gpu} -le 15 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}"
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_train_params1} ${set_autocast}"
else # train with multi-machine
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}"
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_batchsize} ${set_train_params1} ${set_autocast}"
fi
# run train
eval "unset CUDA_VISIBLE_DEVICES"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册