diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index e9c1a8d31110ef20dd66be28d78b1e866fcd85ae..87d2f333c1923b088d32e5fb420ae7a5021d6b08 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -113,7 +113,7 @@ def main(): use_srn = config['Architecture']['algorithm'] == "SRN" model_type = config['Architecture']['model_type'] # start eval - metirc = program.eval(model, valid_dataloader, post_process_class, + metric = program.eval(model, valid_dataloader, post_process_class, eval_class, model_type, use_srn) logger.info('metric eval ***************') diff --git a/test/ocr_det_params.txt b/test/ocr_det_params.txt index 01ac82d3d7d459ca324ec61cfcaac2386660a211..da7e034bdd1ef1f593020d2dd5e809b472651b54 100644 --- a/test/ocr_det_params.txt +++ b/test/ocr_det_params.txt @@ -1,13 +1,12 @@ model_name:ocr_det python:python3.7 gpu_list:0|0,1 -Global.auto_cast:False +Global.auto_cast:null Global.epoch_num:10 Global.save_model_dir:./output/ -Global.save_inference_dir:./output/ Train.loader.batch_size_per_card: -Global.use_gpu -Global.pretrained_model +Global.use_gpu: +Global.pretrained_model:null trainer:norm|pact norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained @@ -17,6 +16,8 @@ distill_train:null eval:tools/eval.py -c configs/det/det_mv3_db.yml -o +Global.save_inference_dir:./output/ +Global.checkpoints: norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o fpgm_export:deploy/slim/prune/export_prune_model.py @@ -29,7 +30,6 @@ inference:tools/infer/predict_det.py --rec_batch_num:1 --use_tensorrt:True|False --precision:fp32|fp16|int8 ---det_model_dir ---image_dir ---save_log_path - +--det_model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:./test/output/ diff --git a/test/prepare.sh b/test/prepare.sh index 150682469641a784f641313d361bb921d6d9dfb8..14b62383c2d9fd426bc84d3f58e557f2b3269353 100644 --- a/test/prepare.sh +++ b/test/prepare.sh @@ -26,8 +26,10 @@ IFS=$'\n' # The training params model_name=$(func_parser_value "${lines[0]}") train_model_list=$(func_parser_value "${lines[0]}") + trainer_list=$(func_parser_value "${lines[10]}") + # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] MODE=$2 # prepare pretrained weights and dataset @@ -62,8 +64,8 @@ else rm -rf ./train_data/icdar2015 wget -nc -P ./train_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar if [ ${model_name} = "ocr_det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar + eval_model_name="ch_ppocr_mobile_v2.0_det_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ else eval_model_name="ch_ppocr_mobile_v2.0_rec_train" @@ -94,7 +96,7 @@ for train_model in ${train_model_list[*]}; do # eval for slim_trainer in ${trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then - if [ ${model_name} = "ocr_det" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ @@ -104,7 +106,7 @@ for train_model in ${train_model_list[*]}; do cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "pact" ]; then - if [ ${model_name} = "ocr_det" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ @@ -114,7 +116,7 @@ for train_model in ${train_model_list[*]}; do cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "distill" ]; then - if [ ${model_name} = "ocr_det" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ @@ -124,7 +126,7 @@ for train_model in ${train_model_list[*]}; do cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "fpgm" ]; then - if [ ${model_name} = "ocr_det" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ diff --git a/test/test.sh b/test/test.sh index 2a27563ffaa2b1b96b58cbff89546acf7a286210..a75aed426988755e395f27520fc76de10b4676d1 100644 --- a/test/test.sh +++ b/test/test.sh @@ -41,59 +41,51 @@ gpu_list=$(func_parser_value "${lines[2]}") autocast_list=$(func_parser_value "${lines[3]}") autocast_key=$(func_parser_key "${lines[3]}") epoch_key=$(func_parser_key "${lines[4]}") +epoch_num=$(func_parser_value "${lines[4]}") save_model_key=$(func_parser_key "${lines[5]}") -save_infer_key=$(func_parser_key "${lines[6]}") -train_batch_key=$(func_parser_key "${lines[7]}") -train_use_gpu_key=$(func_parser_key "${lines[8]}") -pretrain_model_key=$(func_parser_key "${lines[9]}") - -trainer_list=$(func_parser_value "${lines[10]}") -norm_trainer=$(func_parser_value "${lines[11]}") -pact_trainer=$(func_parser_value "${lines[12]}") -fpgm_trainer=$(func_parser_value "${lines[13]}") -distill_trainer=$(func_parser_value "${lines[14]}") - -eval_py=$(func_parser_value "${lines[15]}") -norm_export=$(func_parser_value "${lines[16]}") -pact_export=$(func_parser_value "${lines[17]}") -fpgm_export=$(func_parser_value "${lines[18]}") -distill_export=$(func_parser_value "${lines[19]}") - -inference_py=$(func_parser_value "${lines[20]}") -use_gpu_key=$(func_parser_key "${lines[21]}") -use_gpu_list=$(func_parser_value "${lines[21]}") -use_mkldnn_key=$(func_parser_key "${lines[22]}") -use_mkldnn_list=$(func_parser_value "${lines[22]}") -cpu_threads_key=$(func_parser_key "${lines[23]}") -cpu_threads_list=$(func_parser_value "${lines[23]}") -batch_size_key=$(func_parser_key "${lines[24]}") -batch_size_list=$(func_parser_value "${lines[24]}") -use_trt_key=$(func_parser_key "${lines[25]}") -use_trt_list=$(func_parser_value "${lines[25]}") -precision_key=$(func_parser_key "${lines[26]}") -precision_list=$(func_parser_value "${lines[26]}") -model_dir_key=$(func_parser_key "${lines[27]}") -image_dir_key=$(func_parser_key "${lines[28]}") -save_log_key=$(func_parser_key "${lines[29]}") +train_batch_key=$(func_parser_key "${lines[6]}") +train_use_gpu_key=$(func_parser_key "${lines[7]}") +pretrain_model_key=$(func_parser_key "${lines[8]}") +pretrain_model_value=$(func_parser_value "${lines[8]}") + +trainer_list=$(func_parser_value "${lines[9]}") +norm_trainer=$(func_parser_value "${lines[10]}") +pact_trainer=$(func_parser_value "${lines[11]}") +fpgm_trainer=$(func_parser_value "${lines[12]}") +distill_trainer=$(func_parser_value "${lines[13]}") + +eval_py=$(func_parser_value "${lines[14]}") + +save_infer_key=$(func_parser_key "${lines[15]}") +export_weight=$(func_parser_key "${lines[16]}") +norm_export=$(func_parser_value "${lines[17]}") +pact_export=$(func_parser_value "${lines[18]}") +fpgm_export=$(func_parser_value "${lines[19]}") +distill_export=$(func_parser_value "${lines[20]}") + +inference_py=$(func_parser_value "${lines[21]}") +use_gpu_key=$(func_parser_key "${lines[22]}") +use_gpu_list=$(func_parser_value "${lines[22]}") +use_mkldnn_key=$(func_parser_key "${lines[23]}") +use_mkldnn_list=$(func_parser_value "${lines[23]}") +cpu_threads_key=$(func_parser_key "${lines[24]}") +cpu_threads_list=$(func_parser_value "${lines[24]}") +batch_size_key=$(func_parser_key "${lines[25]}") +batch_size_list=$(func_parser_value "${lines[25]}") +use_trt_key=$(func_parser_key "${lines[26]}") +use_trt_list=$(func_parser_value "${lines[26]}") +precision_key=$(func_parser_key "${lines[27]}") +precision_list=$(func_parser_value "${lines[27]}") +infer_model_key=$(func_parser_key "${lines[28]}") +infer_model=$(func_parser_value "${lines[28]}") +image_dir_key=$(func_parser_key "${lines[29]}") +infer_img_dir=$(func_parser_value "${lines[29]}") +save_log_key=$(func_parser_key "${lines[30]}") LOG_PATH="./test/output" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results.log" -if [ ${MODE} = "lite_train_infer" ]; then - export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" - export epoch_num=10 -elif [ ${MODE} = "whole_infer" ]; then - export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" - export epoch_num=10 -elif [ ${MODE} = "whole_train_infer" ]; then - export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" - export epoch_num=300 -else - export infer_img_dir="./inference/ch_det_data_50/all-sum-510" - export infer_model_dir="./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy" -fi - function func_inference(){ IFS='|' @@ -110,7 +102,7 @@ function func_inference(){ for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" eval $command status_check $? "${command}" "${status_log}" done @@ -124,7 +116,7 @@ function func_inference(){ fi for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" eval $command status_check $? "${command}" "${status_log}" done @@ -138,9 +130,9 @@ if [ ${MODE} != "infer" ]; then IFS="|" for gpu in ${gpu_list[*]}; do - train_use_gpu=True + use_gpu=True if [ ${gpu} = "-1" ];then - train_use_gpu=False + use_gpu=False env="" elif [ ${#gpu} -le 1 ];then env="export CUDA_VISIBLE_DEVICES=${gpu}" @@ -155,6 +147,7 @@ for gpu in ${gpu_list[*]}; do ips=${array[0]} gpu=${array[1]} IFS="|" + env=" " fi for autocast in ${autocast_list[*]}; do for trainer in ${trainer_list[*]}; do @@ -179,13 +172,32 @@ for gpu in ${gpu_list[*]}; do continue fi - save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" - if [ ${#gpu} -le 2 ];then # epoch_num #TODO - cmd="${python} ${run_train} ${train_use_gpu_key}=${train_use_gpu} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log} " - elif [ ${#gpu} -le 15 ];then - cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}" + # not set autocast when autocast is null + if [ ${autocast} = "null" ]; then + set_autocast=" " + else + set_autocast="${autocast_key}=${autocast}" + fi + # not set epoch when whole_train_infer + if [ ${MODE} != "whole_train_infer" ]; then + set_epoch="${epoch_key}=${epoch_num}" + else + set_epoch=" " + fi + # set pretrain + if [ ${pretrain_model_value} != "null" ]; then + set_pretrain="${pretrain_model_key}=${pretrain_model_value}" else - cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}" + set_pretrain=" " + fi + + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + if [ ${#gpu} -le 2 ];then # train with cpu or single gpu + cmd="${python} ${run_train} ${train_use_gpu_key}=${use_gpu} ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}" + elif [ ${#gpu} -le 15 ];then # train with multi-gpu + cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}" + else # train with multi-machine + cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_pretrain} ${set_epoch} ${set_autocast}" fi # run train eval $cmd @@ -198,11 +210,12 @@ for gpu in ${gpu_list[*]}; do # run export model save_infer_path="${save_log}" - export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest ${save_infer_key}=${save_infer_path}" + export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${export_weight}=${save_log}/latest ${save_infer_key}=${save_infer_path}" eval $export_cmd status_check $? "${export_cmd}" "${status_log}" #run inference + echo $env save_infer_path="${save_log}" func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" done @@ -210,12 +223,13 @@ for gpu in ${gpu_list[*]}; do done else - save_infer_path="${LOG_PATH}/${MODE}" - run_export=${norm_export} - export_cmd="${python} ${run_export} ${save_model_key}=${save_infer_path} ${pretrain_model_key}=${infer_model_dir} ${save_infer_key}=${save_infer_path}" - eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" - + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + echo $env #run inference - func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" + func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" fi diff --git a/tools/infer/utility.py b/tools/infer/utility.py index cf14e4abd71f1ac6e2ceec11163e635daef11f4d..e464722f094faa7b4296889f1b1b94c206f929b3 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -37,7 +37,7 @@ def init_args(): parser.add_argument("--use_gpu", type=str2bool, default=True) parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--use_tensorrt", type=str2bool, default=False) - parser.add_argument("--min_subgraph_size", type=int, default=3) + parser.add_argument("--min_subgraph_size", type=int, default=10) parser.add_argument("--precision", type=str, default="fp32") parser.add_argument("--gpu_mem", type=int, default=500)