diff --git a/tests/ocr_det_params.txt b/tests/configs/ppocr_det_mobile_params.txt similarity index 84% rename from tests/ocr_det_params.txt rename to tests/configs/ppocr_det_mobile_params.txt index 6fd22e409a5219574b2f29285ff5ee5d2e1cf7ca..5edb14cdbf8eef87b5b5558cbd8d1a2ff54ae919 100644 --- a/tests/ocr_det_params.txt +++ b/tests/configs/ppocr_det_mobile_params.txt @@ -40,13 +40,13 @@ infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False --enable_mkldnn:True|False ---cpu_threads:6 +--cpu_threads:1|6 --rec_batch_num:1 --use_tensorrt:False|True --precision:fp32|fp16|int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ ---save_log_path:null +null:null --benchmark:True null:null ===========================cpp_infer_params=========================== @@ -79,4 +79,20 @@ op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 pipline:pipeline_http_client.py --image_dir=../../doc/imgs - +===========================kl_quant_params=========================== +infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null +null:null \ No newline at end of file diff --git a/tests/ocr_det_server_params.txt b/tests/configs/ppocr_det_server_params.txt similarity index 77% rename from tests/ocr_det_server_params.txt rename to tests/configs/ppocr_det_server_params.txt index 4a17fa683439fdc4716b4ed6b067a572fa3a5057..b3df1735e50d941b34eeb274c28eb4ce50d79292 100644 --- a/tests/ocr_det_server_params.txt +++ b/tests/configs/ppocr_det_server_params.txt @@ -12,10 +12,10 @@ train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## -trainer:norm_train|pact_train -norm_train:tools/train.py -c tests/configs/det_r50_vd_db.yml -o Global.pretrained_model="" -pact_train:null -fpgm_train:null +trainer:norm_train|pact_train|fpgm_export +norm_train:tools/train.py -c tests/configs/det_r50_vd_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c tests/configs/det_r50_vd_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py -c tests/configs/det_r50_vd_db.yml -o distill_train:null null:null null:null @@ -34,8 +34,8 @@ distill_export:null export1:null export2:null ## -infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ -infer_export:null +train_model:./inference/ch_ppocr_server_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml -o infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False diff --git a/tests/ocr_rec_params.txt b/tests/configs/ppocr_rec_mobile_params.txt similarity index 100% rename from tests/ocr_rec_params.txt rename to tests/configs/ppocr_rec_mobile_params.txt diff --git a/tests/ocr_rec_server_params.txt b/tests/configs/ppocr_rec_server_params.txt similarity index 100% rename from tests/ocr_rec_server_params.txt rename to tests/configs/ppocr_rec_server_params.txt diff --git a/tests/ocr_ppocr_mobile_params.txt b/tests/configs/ppocr_sys_mobile_params.txt similarity index 100% rename from tests/ocr_ppocr_mobile_params.txt rename to tests/configs/ppocr_sys_mobile_params.txt diff --git a/tests/ocr_ppocr_server_params.txt b/tests/configs/ppocr_sys_server_params.txt similarity index 100% rename from tests/ocr_ppocr_server_params.txt rename to tests/configs/ppocr_sys_server_params.txt diff --git a/tests/ocr_kl_quant_params.txt b/tests/ocr_kl_quant_params.txt deleted file mode 100644 index c6ee97dca49bb7d942a339783af44053e6c79b00..0000000000000000000000000000000000000000 --- a/tests/ocr_kl_quant_params.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:ocr_system -python:python3.7 -gpu_list:null -Global.use_gpu:null -Global.auto_cast:null -Global.epoch_num:null -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:null -Global.pretrained_model:null -train_model_name:null -train_infer_img_dir:null -null:null -## -trainer: -norm_train:null -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:null -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -## -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -kl_quant:deploy/slim/quantization/quant_kl.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:True -inference:tools/infer/predict_det.py ---use_gpu:TrueFalse ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---save_log_path:null ---benchmark:True -null:null diff --git a/tests/prepare.sh b/tests/prepare.sh index ef021fa385f16ae5c9c996bfcb607f73b4129f49..f43ddb56fcd615050f110fb0d05bb178b1621da0 100644 --- a/tests/prepare.sh +++ b/tests/prepare.sh @@ -1,7 +1,9 @@ #!/bin/bash FILENAME=$1 -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer', 'serving_infer'] +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', +# 'cpp_infer', 'serving_infer', 'klquant_infer'] + MODE=$2 dataline=$(cat ${FILENAME}) @@ -72,9 +74,9 @@ elif [ ${MODE} = "infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ elif [ ${model_name} = "ocr_server_det" ]; then - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar - cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + cd ./inference && tar xf ch_ppocr_server_v2.0_det_train.tar && tar xf ch_det_data_50.tar && cd ../ elif [ ${model_name} = "ocr_system_mobile" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar @@ -98,6 +100,12 @@ elif [ ${MODE} = "infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../ fi +elif [ ${MODE} = "klquant_infer" ];then + if [ ${model_name} = "ocr_det" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ + fi elif [ ${MODE} = "cpp_infer" ];then if [ ${model_name} = "ocr_det" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar diff --git a/tests/results/det_results_gpu_trt_fp16.txt b/tests/results/ppocr_det_mobile_results_fp16.txt similarity index 100% rename from tests/results/det_results_gpu_trt_fp16.txt rename to tests/results/ppocr_det_mobile_results_fp16.txt diff --git a/tests/results/det_results_gpu_trt_fp16_cpp.txt b/tests/results/ppocr_det_mobile_results_fp16_cpp.txt similarity index 100% rename from tests/results/det_results_gpu_trt_fp16_cpp.txt rename to tests/results/ppocr_det_mobile_results_fp16_cpp.txt diff --git a/tests/results/det_results_gpu_fp32.txt b/tests/results/ppocr_det_mobile_results_fp32.txt similarity index 100% rename from tests/results/det_results_gpu_fp32.txt rename to tests/results/ppocr_det_mobile_results_fp32.txt diff --git a/tests/results/det_results_gpu_trt_fp32_cpp.txt b/tests/results/ppocr_det_mobile_results_fp32_cpp.txt similarity index 100% rename from tests/results/det_results_gpu_trt_fp32_cpp.txt rename to tests/results/ppocr_det_mobile_results_fp32_cpp.txt diff --git a/tests/test.sh b/tests/test.sh index 5649e344b76cf4485db533eee4035e1cbdd5adae..3df0d52cc5cfa6fd8d7259d47178d8c26d2952fb 100644 --- a/tests/test.sh +++ b/tests/test.sh @@ -1,9 +1,16 @@ #!/bin/bash FILENAME=$1 -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer'] +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'cpp_infer', 'serving_infer', 'klquant_infer'] MODE=$2 - -dataline=$(cat ${FILENAME}) +if [ ${MODE} = "cpp_infer" ]; then + dataline=$(awk 'NR==67, NR==81{print}' $FILENAME) +elif [ ${MODE} = "serving_infer" ]; then + dataline=$(awk 'NR==52, NR==66{print}' $FILENAME) +elif [ ${MODE} = "klquant_infer" ]; then + dataline=$(awk 'NR==82, NR==98{print}' $FILENAME) +else + dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) +fi # parser params IFS=$'\n' @@ -144,61 +151,93 @@ benchmark_key=$(func_parser_key "${lines[49]}") benchmark_value=$(func_parser_value "${lines[49]}") infer_key1=$(func_parser_key "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}") -# parser serving -trans_model_py=$(func_parser_value "${lines[67]}") -infer_model_dir_key=$(func_parser_key "${lines[68]}") -infer_model_dir_value=$(func_parser_value "${lines[68]}") -model_filename_key=$(func_parser_key "${lines[69]}") -model_filename_value=$(func_parser_value "${lines[69]}") -params_filename_key=$(func_parser_key "${lines[70]}") -params_filename_value=$(func_parser_value "${lines[70]}") -serving_server_key=$(func_parser_key "${lines[71]}") -serving_server_value=$(func_parser_value "${lines[71]}") -serving_client_key=$(func_parser_key "${lines[72]}") -serving_client_value=$(func_parser_value "${lines[72]}") -serving_dir_value=$(func_parser_value "${lines[73]}") -web_service_py=$(func_parser_value "${lines[74]}") -web_use_gpu_key=$(func_parser_key "${lines[75]}") -web_use_gpu_list=$(func_parser_value "${lines[75]}") -web_use_mkldnn_key=$(func_parser_key "${lines[76]}") -web_use_mkldnn_list=$(func_parser_value "${lines[76]}") -web_cpu_threads_key=$(func_parser_key "${lines[77]}") -web_cpu_threads_list=$(func_parser_value "${lines[77]}") -web_use_trt_key=$(func_parser_key "${lines[78]}") -web_use_trt_list=$(func_parser_value "${lines[78]}") -web_precision_key=$(func_parser_key "${lines[79]}") -web_precision_list=$(func_parser_value "${lines[79]}") -pipeline_py=$(func_parser_value "${lines[80]}") +# parser serving +if [ ${MODE} = "klquant_infer" ]; then + # parser inference model + infer_model_dir_list=$(func_parser_value "${lines[1]}") + infer_export_list=$(func_parser_value "${lines[2]}") + infer_is_quant=$(func_parser_value "${lines[3]}") + # parser inference + inference_py=$(func_parser_value "${lines[4]}") + use_gpu_key=$(func_parser_key "${lines[5]}") + use_gpu_list=$(func_parser_value "${lines[5]}") + use_mkldnn_key=$(func_parser_key "${lines[6]}") + use_mkldnn_list=$(func_parser_value "${lines[6]}") + cpu_threads_key=$(func_parser_key "${lines[7]}") + cpu_threads_list=$(func_parser_value "${lines[7]}") + batch_size_key=$(func_parser_key "${lines[8]}") + batch_size_list=$(func_parser_value "${lines[8]}") + use_trt_key=$(func_parser_key "${lines[9]}") + use_trt_list=$(func_parser_value "${lines[9]}") + precision_key=$(func_parser_key "${lines[10]}") + precision_list=$(func_parser_value "${lines[10]}") + infer_model_key=$(func_parser_key "${lines[11]}") + image_dir_key=$(func_parser_key "${lines[12]}") + infer_img_dir=$(func_parser_value "${lines[12]}") + save_log_key=$(func_parser_key "${lines[13]}") + benchmark_key=$(func_parser_key "${lines[14]}") + benchmark_value=$(func_parser_value "${lines[14]}") + infer_key1=$(func_parser_key "${lines[15]}") + infer_value1=$(func_parser_value "${lines[15]}") +fi +# parser serving +if [ ${MODE} = "server_infer" ]; then + trans_model_py=$(func_parser_value "${lines[1]}") + infer_model_dir_key=$(func_parser_key "${lines[2]}") + infer_model_dir_value=$(func_parser_value "${lines[2]}") + model_filename_key=$(func_parser_key "${lines[3]}") + model_filename_value=$(func_parser_value "${lines[3]}") + params_filename_key=$(func_parser_key "${lines[4]}") + params_filename_value=$(func_parser_value "${lines[4]}") + serving_server_key=$(func_parser_key "${lines[5]}") + serving_server_value=$(func_parser_value "${lines[5]}") + serving_client_key=$(func_parser_key "${lines[6]}") + serving_client_value=$(func_parser_value "${lines[6]}") + serving_dir_value=$(func_parser_value "${lines[7]}") + web_service_py=$(func_parser_value "${lines[8]}") + web_use_gpu_key=$(func_parser_key "${lines[9]}") + web_use_gpu_list=$(func_parser_value "${lines[9]}") + web_use_mkldnn_key=$(func_parser_key "${lines[10]}") + web_use_mkldnn_list=$(func_parser_value "${lines[10]}") + web_cpu_threads_key=$(func_parser_key "${lines[11]}") + web_cpu_threads_list=$(func_parser_value "${lines[11]}") + web_use_trt_key=$(func_parser_key "${lines[12]}") + web_use_trt_list=$(func_parser_value "${lines[12]}") + web_precision_key=$(func_parser_key "${lines[13]}") + web_precision_list=$(func_parser_value "${lines[13]}") + pipeline_py=$(func_parser_value "${lines[14]}") +fi if [ ${MODE} = "cpp_infer" ]; then # parser cpp inference model - cpp_infer_model_dir_list=$(func_parser_value "${lines[53]}") - cpp_infer_is_quant=$(func_parser_value "${lines[54]}") + cpp_infer_model_dir_list=$(func_parser_value "${lines[1]}") + cpp_infer_is_quant=$(func_parser_value "${lines[2]}") # parser cpp inference - inference_cmd=$(func_parser_value "${lines[55]}") - cpp_use_gpu_key=$(func_parser_key "${lines[56]}") - cpp_use_gpu_list=$(func_parser_value "${lines[56]}") - cpp_use_mkldnn_key=$(func_parser_key "${lines[57]}") - cpp_use_mkldnn_list=$(func_parser_value "${lines[57]}") - cpp_cpu_threads_key=$(func_parser_key "${lines[58]}") - cpp_cpu_threads_list=$(func_parser_value "${lines[58]}") - cpp_batch_size_key=$(func_parser_key "${lines[59]}") - cpp_batch_size_list=$(func_parser_value "${lines[59]}") - cpp_use_trt_key=$(func_parser_key "${lines[60]}") - cpp_use_trt_list=$(func_parser_value "${lines[60]}") - cpp_precision_key=$(func_parser_key "${lines[61]}") - cpp_precision_list=$(func_parser_value "${lines[61]}") - cpp_infer_model_key=$(func_parser_key "${lines[62]}") - cpp_image_dir_key=$(func_parser_key "${lines[63]}") - cpp_infer_img_dir=$(func_parser_value "${lines[63]}") - cpp_infer_key1=$(func_parser_key "${lines[64]}") - cpp_infer_value1=$(func_parser_value "${lines[64]}") - cpp_benchmark_key=$(func_parser_key "${lines[65]}") - cpp_benchmark_value=$(func_parser_value "${lines[65]}") + inference_cmd=$(func_parser_value "${lines[3]}") + cpp_use_gpu_key=$(func_parser_key "${lines[4]}") + cpp_use_gpu_list=$(func_parser_value "${lines[4]}") + cpp_use_mkldnn_key=$(func_parser_key "${lines[5]}") + cpp_use_mkldnn_list=$(func_parser_value "${lines[5]}") + cpp_cpu_threads_key=$(func_parser_key "${lines[6]}") + cpp_cpu_threads_list=$(func_parser_value "${lines[6]}") + cpp_batch_size_key=$(func_parser_key "${lines[7]}") + cpp_batch_size_list=$(func_parser_value "${lines[7]}") + cpp_use_trt_key=$(func_parser_key "${lines[8]}") + cpp_use_trt_list=$(func_parser_value "${lines[8]}") + cpp_precision_key=$(func_parser_key "${lines[9]}") + cpp_precision_list=$(func_parser_value "${lines[9]}") + cpp_infer_model_key=$(func_parser_key "${lines[10]}") + cpp_image_dir_key=$(func_parser_key "${lines[11]}") + cpp_infer_img_dir=$(func_parser_value "${lines[12]}") + cpp_infer_key1=$(func_parser_key "${lines[13]}") + cpp_infer_value1=$(func_parser_value "${lines[13]}") + cpp_benchmark_key=$(func_parser_key "${lines[14]}") + cpp_benchmark_value=$(func_parser_value "${lines[14]}") fi + LOG_PATH="./tests/output" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results.log" @@ -414,7 +453,7 @@ function func_cpp_inference(){ done } -if [ ${MODE} = "infer" ]; then +if [ ${MODE} = "infer" ] || [ ${MODE} = "klquant_infer" ]; then GPUID=$3 if [ ${#GPUID} -le 0 ];then env=" " @@ -447,7 +486,6 @@ if [ ${MODE} = "infer" ]; then func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} Count=$(($Count + 1)) done - elif [ ${MODE} = "cpp_infer" ]; then GPUID=$3 if [ ${#GPUID} -le 0 ];then @@ -481,6 +519,8 @@ elif [ ${MODE} = "serving_infer" ]; then #run serving func_serving "${web_service_cmd}" + + else IFS="|" export Count=0