未验证 提交 48863382 编写于 作者: W WangZhen 提交者: GitHub

[TIPC]Support yolov3 dy2st train (#7325)

* Support yolov3 dy2st train

* Fix device_num

* Polish apply_to_static

* Modify config txt to support dy2st tipc
上级 65542ba1
......@@ -57,6 +57,39 @@ TRT_MIN_SUBGRAPH = {
KEYPOINT_ARCH = ['HigherHRNet', 'TopDownHRNet']
MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT', 'ByteTrack']
TO_STATIC_SPEC = {
'yolov3_darknet53_270e_coco': [{
'im_id': paddle.static.InputSpec(
name='im_id', shape=[-1, 1], dtype='float32'),
'is_crowd': paddle.static.InputSpec(
name='is_crowd', shape=[-1, 50], dtype='float32'),
'gt_bbox': paddle.static.InputSpec(
name='gt_bbox', shape=[-1, 50, 4], dtype='float32'),
'curr_iter': paddle.static.InputSpec(
name='curr_iter', shape=[-1], dtype='float32'),
'image': paddle.static.InputSpec(
name='image', shape=[-1, 3, -1, -1], dtype='float32'),
'im_shape': paddle.static.InputSpec(
name='im_shape', shape=[-1, 2], dtype='float32'),
'scale_factor': paddle.static.InputSpec(
name='scale_factor', shape=[-1, 2], dtype='float32'),
'target0': paddle.static.InputSpec(
name='target0', shape=[-1, 3, 86, -1, -1], dtype='float32'),
'target1': paddle.static.InputSpec(
name='target1', shape=[-1, 3, 86, -1, -1], dtype='float32'),
'target2': paddle.static.InputSpec(
name='target2', shape=[-1, 3, 86, -1, -1], dtype='float32'),
}],
}
def apply_to_static(config, model):
filename = config.get('filename', None)
spec = TO_STATIC_SPEC.get(filename, None)
model = paddle.jit.to_static(model, input_spec=spec)
logger.info("Successfully to apply @to_static with specs: {}".format(spec))
return model
def _prune_input_spec(input_spec, program, targets):
# try to prune static program to figure out pruned input spec
......
......@@ -48,7 +48,7 @@ from ppdet.utils import profiler
from ppdet.modeling.post_process import multiclass_nms
from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator, WandbCallback
from .export_utils import _dump_infer_config, _prune_input_spec
from .export_utils import _dump_infer_config, _prune_input_spec, apply_to_static
from paddle.distributed.fleet.utils.hybrid_parallel_util import fused_allreduce_gradients
......@@ -419,6 +419,8 @@ class Trainer(object):
"EvalDataset")()
model = self.model
if self.cfg.get('to_static', False):
model = apply_to_static(self.cfg, model)
sync_bn = (getattr(self.cfg, 'norm_type', None) == 'sync_bn' and
(self.cfg.use_gpu or self.cfg.use_mlu) and self._nranks > 1)
if sync_bn:
......
......@@ -165,6 +165,16 @@ else
device_num_list=($device_num)
fi
# for log name
to_static=""
# parse "to_static" options and modify trainer into "to_static_trainer"
if [[ ${model_type} = "dynamicTostatic" ]];then
to_static="d2sT_"
sed -i 's/trainer:norm_train/trainer:to_static_train/g' $FILENAME
fi
if [[ ${model_name} =~ "higherhrnet" ]] || [[ ${model_name} =~ "hrnet" ]] || [[ ${model_name} =~ "tinypose" ]];then
echo "${model_name} run on full coco dataset"
epoch=$(set_dynamic_epoch $device_num $epoch)
......@@ -189,7 +199,7 @@ for batch_size in ${batch_size_list[*]}; do
if [ ${#gpu_id} -le 1 ];then
log_path="$SAVE_LOG/profiling_log"
mkdir -p $log_path
log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_profiling"
log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_${to_static}profiling"
func_sed_params "$FILENAME" "${line_gpuid}" "0" # sed used gpu_id
# set profile_option params
tmp=`sed -i "${line_profile}s/.*/${profile_option}/" "${FILENAME}"`
......@@ -205,8 +215,8 @@ for batch_size in ${batch_size_list[*]}; do
speed_log_path="$SAVE_LOG/index"
mkdir -p $log_path
mkdir -p $speed_log_path
log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_log"
speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_speed"
log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_${to_static}log"
speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_${to_static}speed"
func_sed_params "$FILENAME" "${line_profile}" "null" # sed profile_id as null
cmd="bash test_tipc/test_train_inference_python.sh ${FILENAME} benchmark_train > ${log_path}/${log_name} 2>&1 "
echo $cmd
......@@ -240,8 +250,8 @@ for batch_size in ${batch_size_list[*]}; do
speed_log_path="$SAVE_LOG/index"
mkdir -p $log_path
mkdir -p $speed_log_path
log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_log"
speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_speed"
log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_${to_static}log"
speed_log_name="${repo_name}_${model_name}_bs${batch_size}_${precision}_${run_mode}_${device_num}_${to_static}speed"
func_sed_params "$FILENAME" "${line_gpuid}" "$gpu_id" # sed used gpu_id
func_sed_params "$FILENAME" "${line_profile}" "null" # sed --profile_option as null
cmd="bash test_tipc/test_train_inference_python.sh ${FILENAME} benchmark_train > ${log_path}/${log_name} 2>&1 "
......
......@@ -17,7 +17,7 @@ norm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o wo
pact_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o
fpgm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o
distill_train:null
null:null
to_static_train:--to_static
null:null
##
===========================eval_params===========================
......
......@@ -41,8 +41,8 @@ fpgm_key=$(func_parser_key "${lines[17]}")
fpgm_trainer=$(func_parser_value "${lines[17]}")
distill_key=$(func_parser_key "${lines[18]}")
distill_trainer=$(func_parser_value "${lines[18]}")
trainer_key1=$(func_parser_key "${lines[19]}")
trainer_value1=$(func_parser_value "${lines[19]}")
to_static_key=$(func_parser_key "${lines[19]}")
to_static_trainer=$(func_parser_value "${lines[19]}")
trainer_key2=$(func_parser_key "${lines[20]}")
trainer_value2=$(func_parser_value "${lines[20]}")
......@@ -237,6 +237,7 @@ else
for autocast in ${autocast_list[*]}; do
for trainer in ${trainer_list[*]}; do
flag_quant=False
set_to_static=""
if [ ${trainer} = "${norm_key}" ]; then
run_train=${norm_trainer}
run_export=${norm_export}
......@@ -250,9 +251,10 @@ else
elif [ ${trainer} = "${distill_key}" ]; then
run_train=${distill_trainer}
run_export=${distill_export}
elif [ ${trainer} = "${trainer_key1}" ]; then
run_train=${trainer_value1}
run_export=${export_value1}
elif [ ${trainer} = "${to_static_key}" ]; then
run_train=${norm_trainer}
run_export=${norm_export}
set_to_static=${to_static_trainer}
elif [ ${trainer} = "${trainer_key2}" ]; then
run_train=${trainer_value2}
run_export=${export_value2}
......@@ -289,9 +291,9 @@ else
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
nodes="1"
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_train_params1}"
cmd="${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_to_static} ${set_train_params1}"
elif [ ${#ips} -le 15 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_train_params1}"
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_to_static} ${set_train_params1}"
else # train with multi-machine
IFS=","
ips_array=(${ips})
......@@ -299,7 +301,7 @@ else
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}"
IFS="|"
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_train_params1}"
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_to_static} ${set_train_params1}"
fi
# run train
train_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}.log"
......
......@@ -103,6 +103,11 @@ def parse_args():
type=str,
default="sniper/proposals.json",
help='Train proposals directory')
parser.add_argument(
"--to_static",
action='store_true',
default=False,
help="Enable dy2st to train.")
args = parser.parse_args()
return args
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册