diff --git a/tests/configs/ppocr_det_mobile_params.txt b/tests/configs/ppocr_det_mobile_params.txt index 5edb14cdbf8eef87b5b5558cbd8d1a2ff54ae919..7b65895856684c1921deaf746997536b14c6c46c 100644 --- a/tests/configs/ppocr_det_mobile_params.txt +++ b/tests/configs/ppocr_det_mobile_params.txt @@ -82,17 +82,17 @@ pipline:pipeline_http_client.py --image_dir=../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:False +infer_quant:True inference:tools/infer/predict_det.py --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1 --use_tensorrt:False|True ---precision:fp32|fp16|int8 +--precision:int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null --benchmark:True null:null -null:null \ No newline at end of file +null:null diff --git a/tests/test_cpp.sh b/tests/test_cpp.sh index ac7e79603e73b43e5258c1d81f9d671bbe150f39..124bdacb7dad04bdea07a62ba9c86b248be5a06d 100644 --- a/tests/test_cpp.sh +++ b/tests/test_cpp.sh @@ -57,7 +57,7 @@ function func_cpp_inference(){ for threads in ${cpp_cpu_threads_list[*]}; do for batch_size in ${cpp_batch_size_list[*]}; do precision="fp32" - if [ ${_flag_quant} = "True" ]; then + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then precison="int8" fi _save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" diff --git a/tests/test_python.sh b/tests/test_python.sh index 3a5718d03e40291a7c963c936500ebd1b968657f..00e98986e015863d5a77ee376acdef96946d5421 100644 --- a/tests/test_python.sh +++ b/tests/test_python.sh @@ -5,11 +5,7 @@ FILENAME=$1 # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'klquant_infer'] MODE=$2 -if [ ${MODE} = "klquant_infer" ]; then - dataline=$(awk 'NR==82, NR==98{print}' $FILENAME) -else - dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) -fi +dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) # parser params IFS=$'\n' @@ -93,6 +89,8 @@ infer_value1=$(func_parser_value "${lines[50]}") # parser klquant_infer if [ ${MODE} = "klquant_infer" ]; then + dataline=$(awk 'NR==82, NR==98{print}' $FILENAME) + lines=(${dataline}) # parser inference model infer_model_dir_list=$(func_parser_value "${lines[1]}") infer_export_list=$(func_parser_value "${lines[2]}") @@ -144,7 +142,7 @@ function func_inference(){ for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do precison="fp32" - if [ ${_flag_quant} = "True" ]; then + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then precision="int8" fi _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" @@ -228,6 +226,9 @@ if [ ${MODE} = "infer" ] || [ ${MODE} = "klquant_infer" ]; then fi #run inference is_quant=${infer_quant_flag[Count]} + if [ ${MODE} = "klquant_infer" ]; then + is_quant="True" + fi func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} Count=$(($Count + 1)) done