未验证 提交 8d31a3df 编写于 作者: C cnn 提交者: GitHub

[dev] add ce for train and eval (#3824)

* add ce for train and eval

* add prepare.sh

* add slim config params
上级 d5786288
===========================train_params===========================
model_name:yolov3_darknet53_270e_coco
python:python3.7
gpu_list:0|0,1
Global.use_gpu:True|True
Global.auto_cast:False
Global.epoch_num:lite_train_infer=2|whole_train_infer=300
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/coco_ce/
gpu_list:0
use_gpu:True
auto_cast:null
epoch:lite_train_infer=1|whole_train_infer=12
save_dir:./output/
TrainReader.batch_size:lite_train_infer=2|whole_train_infer=4
weights:null
train_model_name:yolov3_darknet53_270e_coco/model_final.pdparams
train_infer_img_dir:./demo1/
null:null
##
trainer:norm_train|pact_train
trainer:norm_train|fpgm_train|pact_train
norm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o
pact_train:null
fpgm_train:null
pact_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o
fpgm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o
distill_train:null
null:null
null:null
......@@ -25,8 +25,8 @@ eval:tools/eval.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o
null:null
##
===========================infer_params===========================
Global.save_inference_dir:./output/
Global.pretrained_model:
--output_dir:./output/
weights:
norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o
quant_export:deploy/slim/quantization/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o
fpgm_export:deploy/slim/prune/export_prune_model.py
......@@ -35,13 +35,13 @@ null:null
null:null
##
inference:deploy/python/infer.py
--device:cpu|gpu
--device:gpu
--enable_mkldnn:False|True
--cpu_threads:1|4
--batch_size:1|2
--use_tensorrt:null
--run_mode:fluid
--model_dir:./output_inference/yolov3_darknet53_270e_coco/
--model_dir:tests/output/norm_train_gpus_0_autocast_null/yolov3_darknet53_270e_coco/
--image_dir:./demo1/
--save_log_path:null
--run_benchmark:True
......
#!/bin/bash
FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer']
MODE=$2
# prepare dataset
if [ ${MODE} = "lite_train_infer" ];then
# pretrain lite train data
cd dataset/coco
wget https://paddledet.bj.bcebos.com/data/coco_ce.tar
tar -xvf coco_ce.tar
mv coco_ce/* .
rm -rf coco_ce*
else
# pretrain lite train data
cd dataset/coco
wget https://paddledet.bj.bcebos.com/data/coco_ce.tar
tar -xvf coco_ce.tar
mv coco_ce/* .
rm -rf coco_ce*
fi
......@@ -170,9 +170,10 @@ function func_inference(){
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} 2>&1 | tee ${_save_log_path} "
#command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} 2>&1 | tee ${_save_log_path} "
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} "
echo $command
#eval $command
eval $command
#status_check $? "${command}" "${status_log}"
done
done
......@@ -294,6 +295,7 @@ else
fi
# run train
eval "unset CUDA_VISIBLE_DEVICES"
echo $cmd
eval $cmd
status_check $? "${cmd}" "${status_log}"
......@@ -314,13 +316,17 @@ else
# run export model
save_infer_path="${save_log}"
export_cmd="${python} ${run_export} ${export_weight}=${save_log}/${train_model_name} ${save_infer_key}=${save_infer_path}"
echo $export_cmd
sleep 2
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}"
#run inference
eval $env
save_infer_path="${save_log}"
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}"
echo "start infer"
#sleep 1000
func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}"
eval "unset CUDA_VISIBLE_DEVICES"
fi
done # done with: for trainer in ${trainer_list[*]}; do
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册