未验证 提交 48db9a8c 编写于 作者: S shangliang Xu 提交者: GitHub

[ce tests] add kl to ce (#4311)

上级 7d625608
weights: https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams
slim: PTQ
PTQ:
ptq_config: {
'activation_quantizer': 'HistQuantizer',
'upsample_bins': 127,
'hist_percent': 0.999}
quant_batch_num: 10
fuse: True
weights: https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams
slim: PTQ
PTQ:
ptq_config: {
'activation_quantizer': 'HistQuantizer',
'upsample_bins': 127,
'hist_percent': 0.999}
quant_batch_num: 10
fuse: True
...@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml - ...@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml -o quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml -o fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml -o
distill_export:null distill_export:null
export1:null kl_quant:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml -o
export2:null export2:null
## ##
infer_model:ppyolo_mbv3_large_coco.pdparams|ppyolo_mbv3_large_qat.pdparams|ppyolo_mbv3_large_prune_fpgm.pdparams infer_model:ppyolo_mbv3_large_coco.pdparams|ppyolo_mbv3_large_qat.pdparams|ppyolo_mbv3_large_prune_fpgm.pdparams
infer_export:./tests/weights|./tests/weights|./tests/weights infer_model_dir:./tests/weights
infer_quant:False|True|False infer_quant:False|True|False
inference:deploy/python/infer.py inference:deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
......
...@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml ...@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml
quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml -o quant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml -o
fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml -o fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml -o
distill_export:null distill_export:null
export1:null kl_quant:tools/post_quant.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml -o
export2:null export2:null
## ##
infer_model:ppyolo_r50vd_dcn_1x_coco.pdparams|ppyolo_r50vd_qat_pact.pdparams|ppyolo_r50vd_prune_fpgm.pdparams infer_model:ppyolo_r50vd_dcn_1x_coco.pdparams|ppyolo_r50vd_qat_pact.pdparams|ppyolo_r50vd_prune_fpgm.pdparams
infer_export:./tests/weights|./tests/weights|./tests/weights infer_model_dir:./tests/weights
infer_quant:False|True|False infer_quant:False|True|False
inference:deploy/python/infer.py inference:deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
......
...@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.y ...@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.y
quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o
fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o
distill_export:null distill_export:null
export1:null kl_quant:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o
export2:null export2:null
## ##
infer_model:yolov3_darknet53_270e_coco.pdparams|yolov3_darknet_coco_qat.pdparams|yolov3_darknet_prune_fpgm.pdparams infer_model:yolov3_darknet53_270e_coco.pdparams|yolov3_darknet_coco_qat.pdparams|yolov3_darknet_prune_fpgm.pdparams
infer_export:./tests/weights|./tests/weights|./tests/weights infer_model_dir:./tests/weights
infer_quant:False|True|False infer_quant:False|True|False
inference:deploy/python/infer.py inference:deploy/python/infer.py
--device:gpu|cpu --device:gpu|cpu
......
...@@ -60,6 +60,7 @@ elif [ ${MODE} = "whole_infer" ];then ...@@ -60,6 +60,7 @@ elif [ ${MODE} = "whole_infer" ];then
cd ../../ && mkdir -p ./tests/demo/ cd ../../ && mkdir -p ./tests/demo/
cp -u dataset/coco/val2017/* ./tests/demo/ cp -u dataset/coco/val2017/* ./tests/demo/
else else
mv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/
# prepare infer data # prepare infer data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_ce.tar wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_ce.tar
cd ./dataset/coco/ && tar -xvf coco_ce.tar && mv -u coco_ce/* . cd ./dataset/coco/ && tar -xvf coco_ce.tar && mv -u coco_ce/* .
......
...@@ -119,8 +119,8 @@ export_key2=$(func_parser_key "${lines[34]}") ...@@ -119,8 +119,8 @@ export_key2=$(func_parser_key "${lines[34]}")
export_value2=$(func_parser_value "${lines[34]}") export_value2=$(func_parser_value "${lines[34]}")
# parser inference model # parser inference model
infer_model_dir_list=$(func_parser_value "${lines[36]}") infer_model_name_list=$(func_parser_value "${lines[36]}")
infer_export_list=$(func_parser_value "${lines[37]}") infer_model_dir=$(func_parser_value "${lines[37]}")
infer_is_quant=$(func_parser_value "${lines[38]}") infer_is_quant=$(func_parser_value "${lines[38]}")
# parser inference # parser inference
inference_py=$(func_parser_value "${lines[39]}") inference_py=$(func_parser_value "${lines[39]}")
...@@ -185,14 +185,13 @@ function func_inference(){ ...@@ -185,14 +185,13 @@ function func_inference(){
elif [ ${device} = "True" ] || [ ${device} = "gpu" ]; then elif [ ${device} = "True" ] || [ ${device} = "gpu" ]; then
for use_trt in ${use_trt_list[*]}; do for use_trt in ${use_trt_list[*]}; do
for precision in ${precision_list[*]}; do for precision in ${precision_list[*]}; do
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then if [[ ${precision} != "fluid" ]]; then
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} = "trt_int8" ]]; then
continue continue
fi fi
if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then if [[ ${_flag_quant} = "True" ]] && [[ ${precision} != "trt_int8" ]]; then
continue continue
fi fi
if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
continue
fi fi
for batch_size in ${batch_size_list[*]}; do for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
...@@ -229,28 +228,45 @@ if [ ${MODE} = "infer" ]; then ...@@ -229,28 +228,45 @@ if [ ${MODE} = "infer" ]; then
eval $env eval $env
export Count=0 export Count=0
IFS="|" IFS="|"
infer_run_exports=(${infer_export_list})
infer_quant_flag=(${infer_is_quant}) infer_quant_flag=(${infer_is_quant})
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
for infer_model in ${infer_model_dir_list[*]}; do set_save_infer_key=$(func_set_params "${save_infer_key}" "${infer_model_dir}")
infer_model="${infer_model_dir}/${train_param_value1}"
for infer_model_name in ${infer_model_name_list[*]}; do
# run export # run export
if [ ${infer_run_exports[Count]} != "null" ];then case ${Count} in
set_export_weight=$(func_set_params "${export_weight}" "${infer_run_exports[Count]}/${infer_model}") 0) run_export=${norm_export} ;;
set_save_infer_key=$(func_set_params "${save_infer_key}" "${infer_run_exports[Count]}") 1) run_export=${pact_export} ;;
export_cmd="${python} ${norm_export} ${set_export_weight} ${set_train_params1} ${set_save_infer_key}" 2) run_export=${fpgm_export} ;;
*) echo "Undefined run_export"; exit 1;
esac
set_export_weight=$(func_set_params "${export_weight}" "${infer_model_dir}/${infer_model_name}")
export_cmd="${python} ${run_export} ${set_export_weight} ${set_train_params1} ${set_save_infer_key}"
eval $export_cmd eval $export_cmd
status_export=$? status_export=$?
if [ ${status_export} = 0 ];then if [ ${status_export} = 0 ];then
status_check $status_export "${export_cmd}" "${status_log}" status_check $status_export "${export_cmd}" "${status_log}"
fi fi
fi
#run inference #run inference
is_quant=${infer_quant_flag[Count]} is_quant=${infer_quant_flag[Count]}
infer_model="${infer_run_exports[Count]}/${train_param_value1}"
func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
Count=$(($Count + 1)) Count=$(($Count + 1))
done done
# kl quant
if [ ${export_key1} = "kl_quant" ]; then
# run kl quant
kl_cmd="${python} ${export_value1} ${set_train_params1} ${set_save_infer_key}"
eval $kl_cmd
status_export=$?
if [ ${status_export} = 0 ];then
status_check $status_export "${kl_cmd}" "${status_log}"
fi
# run inference
is_quant=True
func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
fi
else else
IFS="|" IFS="|"
export Count=0 export Count=0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册