diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index cf4d3fde0221b2a0edaf3d0f9bde5d8ff02991da..fcac6e3984cf3fd45fec9f7b736f794289278b25 100644 --- a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,16 +1,16 @@ -===========================ch_ppocr_mobile_v2.0=========================== +===========================ch_PP-OCRv2=========================== model_name:ch_PP-OCRv2 python:python3.7 infer_model:./inference/ch_PP-OCRv2_det_infer/ infer_export:null -infer_quant:True +infer_quant:False inference:tools/infer/predict_system.py --use_gpu:False|True --enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1 --use_tensorrt:False|True ---precision:fp32|fp16 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --rec_model_dir:./inference/ch_PP-OCRv2_rec_infer/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 6a023951713f2993cbec448d88cd4029919d5860..4a46f0cf09dcf2bb812910f0cf322dda0749b87c 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -3,14 +3,14 @@ model_name:ch_ppocr_mobile_v2.0 python:python3.7 infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:null -infer_quant:True +infer_quant:False inference:tools/infer/predict_system.py --use_gpu:False|True --enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1 --use_tensorrt:False|True ---precision:fp32|fp16 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 5a93571a76366de191d2fb1736aa3ff4c71b1737..92d7031e884d10df3a5c98bf675d64d63b3cb335 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,16 +1,16 @@ -===========================ch_ppocr_mobile_v2.0=========================== +===========================ch_ppocr_server_v2.0=========================== model_name:ch_ppocr_server_v2.0 python:python3.7 infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ infer_export:null infer_quant:True inference:tools/infer/predict_system.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1 --use_tensorrt:False ---precision:int8 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ diff --git a/test_tipc/configs/en_server_pgnetA/train_infer_python.txt b/test_tipc/configs/en_server_pgnetA/train_infer_python.txt index c7b2d1b0a712693b666cd0b40cff4a8871084aa6..d70776998c4e326905920586e90f2833fe42e89b 100644 --- a/test_tipc/configs/en_server_pgnetA/train_infer_python.txt +++ b/test_tipc/configs/en_server_pgnetA/train_infer_python.txt @@ -44,7 +44,7 @@ inference:tools/infer/predict_e2e.py --rec_batch_num:1 --use_tensorrt:False|True --precision:fp32|fp16|int8 ---det_model_dir: +--e2e_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null --benchmark:True diff --git a/test_tipc/docs/jeston_test_train_inference_python.md b/test_tipc/docs/jeston_test_train_inference_python.md index e23aa7651da8b57c9f5e92338bb21dbde2ccda05..d96505985ea8a291b3579acb2aaee1b3d66c1baa 100644 --- a/test_tipc/docs/jeston_test_train_inference_python.md +++ b/test_tipc/docs/jeston_test_train_inference_python.md @@ -1,6 +1,6 @@ # Jeston端基础训练预测功能测试 -Jeston端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 +Jeston端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 ## 1. 测试结论汇总 @@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth ### 2.2 功能测试 -先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 +先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 -`test_train_inference_python.sh`包含5种[运行模式](./test_train_inference_python.md),在Jeston端,仅需要测试预测推理的模式即可: +`test_inference_inference.sh`仅有一个模式`whole_infer`,在Jeston端,仅需要测试预测推理的模式即可: ``` - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; ```shell bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' # 用法1: -bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' +bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1' ``` -运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`lite_train_lite_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: +运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: ``` test_tipc/output/ |- results_python.log # 运行指令状态的日志 diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index b721f2c39ff55af52c5748a9d26b9f2723568b96..d152ef29d0a2983e656f9868147158a3b7e66aa5 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -45,7 +45,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../ - cd ./train_data && tar xf total_text_lite.tar && ln -s total_text && cd ../ + cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../ fi if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ] || [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh deleted file mode 100644 index 2fd76e1e9e7e8c7b52d0b6838cd15840a59fe5c4..0000000000000000000000000000000000000000 --- a/test_tipc/test_inference_jeston.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -source test_tipc/common_func.sh -source test_tipc/test_train_inference_python.sh - -FILENAME=$1 -# MODE be one of ['whole_infer'] -MODE=$2 - -dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) - -# parser params -IFS=$'\n' -lines=(${dataline}) - -model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") - -infer_model_dir_list=$(func_parser_value "${lines[3]}") -infer_export_list=$(func_parser_value "${lines[4]}") -infer_is_quant=$(func_parser_value "${lines[5]}") -# parser inference -inference_py=$(func_parser_value "${lines[6]}") -use_gpu_key=$(func_parser_key "${lines[7]}") -use_gpu_list=$(func_parser_value "${lines[7]}") -use_mkldnn_key=$(func_parser_key "${lines[8]}") -use_mkldnn_list=$(func_parser_value "${lines[8]}") -cpu_threads_key=$(func_parser_key "${lines[9]}") -cpu_threads_list=$(func_parser_value "${lines[9]}") -batch_size_key=$(func_parser_key "${lines[10]}") -batch_size_list=$(func_parser_value "${lines[10]}") -use_trt_key=$(func_parser_key "${lines[11]}") -use_trt_list=$(func_parser_value "${lines[11]}") -precision_key=$(func_parser_key "${lines[12]}") -precision_list=$(func_parser_value "${lines[12]}") -infer_model_key=$(func_parser_key "${lines[13]}") -image_dir_key=$(func_parser_key "${lines[14]}") -infer_img_dir=$(func_parser_value "${lines[14]}") -save_log_key=$(func_parser_key "${lines[15]}") -benchmark_key=$(func_parser_key "${lines[16]}") -benchmark_value=$(func_parser_value "${lines[16]}") -infer_key1=$(func_parser_key "${lines[17]}") -infer_value1=$(func_parser_value "${lines[17]}") - - -LOG_PATH="./test_tipc/output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results_python.log" - - -if [ ${MODE} = "whole_infer" ]; then - GPUID=$3 - if [ ${#GPUID} -le 0 ];then - env=" " - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - # set CUDA_VISIBLE_DEVICES - eval $env - export Count=0 - IFS="|" - infer_run_exports=(${infer_export_list}) - infer_quant_flag=(${infer_is_quant}) - for infer_model in ${infer_model_dir_list[*]}; do - # run export - if [ ${infer_run_exports[Count]} != "null" ];then - save_infer_dir=$(dirname $infer_model) - set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") - set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" - echo ${infer_run_exports[Count]} - echo $export_cmd - eval $export_cmd - status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" - else - save_infer_dir=${infer_model} - fi - #run inference - is_quant=${infer_quant_flag[Count]} - if [ ${MODE} = "klquant_infer" ]; then - is_quant="True" - fi - func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} - Count=$(($Count + 1)) - done -fi - diff --git a/test_tipc/test_inference_python.sh b/test_tipc/test_inference_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..72516e044ed8a23c660a4c4f486d19f22a584fb0 --- /dev/null +++ b/test_tipc/test_inference_python.sh @@ -0,0 +1,169 @@ +#!/bin/bash +source test_tipc/common_func.sh +#source test_tipc/test_train_inference_python.sh + +FILENAME=$1 +# MODE be one of ['whole_infer'] +MODE=$2 + +dataline=$(awk 'NR==1, NR==20{print}' $FILENAME) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") + +infer_model_dir_list=$(func_parser_value "${lines[3]}") +infer_export_list=$(func_parser_value "${lines[4]}") +infer_is_quant=$(func_parser_value "${lines[5]}") +# parser inference +inference_py=$(func_parser_value "${lines[6]}") +use_gpu_key=$(func_parser_key "${lines[7]}") +use_gpu_list=$(func_parser_value "${lines[7]}") +use_mkldnn_key=$(func_parser_key "${lines[8]}") +use_mkldnn_list=$(func_parser_value "${lines[8]}") +cpu_threads_key=$(func_parser_key "${lines[9]}") +cpu_threads_list=$(func_parser_value "${lines[9]}") +batch_size_key=$(func_parser_key "${lines[10]}") +batch_size_list=$(func_parser_value "${lines[10]}") +use_trt_key=$(func_parser_key "${lines[11]}") +use_trt_list=$(func_parser_value "${lines[11]}") +precision_key=$(func_parser_key "${lines[12]}") +precision_list=$(func_parser_value "${lines[12]}") +infer_model_key=$(func_parser_key "${lines[13]}") +image_dir_key=$(func_parser_key "${lines[14]}") +infer_img_dir=$(func_parser_value "${lines[14]}") +rec_model_key=$(func_parser_key "${lines[15]}") +rec_model_value=$(func_parser_value "${lines[15]}") +benchmark_key=$(func_parser_key "${lines[16]}") +benchmark_value=$(func_parser_value "${lines[16]}") +infer_key1=$(func_parser_key "${lines[17]}") +infer_value1=$(func_parser_value "${lines[17]}") + + + +LOG_PATH="./test_tipc/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python.log" + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then + continue + fi + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then + continue + fi # skip when enable fp16 but disable mkldnn + if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then + continue + fi # skip when quant model inference but precision is not int8 + set_precision=$(func_set_params "${precision_key}" "${precision}") + + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + done + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +if [ ${MODE} = "whole_infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir=$(dirname $infer_model) + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + echo ${infer_run_exports[Count]} + eval $export_cmd + status_export=$? + status_check $status_export "${export_cmd}" "${status_log}" + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant=${infer_quant_flag[Count]} + if [ ${MODE} = "klquant_infer" ]; then + is_quant="True" + fi + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done +fi + + diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 2a563022aa66c80bd2c9b6c2f822bee5ac3f3c18..0b0a4e4a75f5e978f64404b27a5f26594dbd484e 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -118,6 +118,7 @@ if [ ${MODE} = "klquant_whole_infer" ]; then image_dir_key=$(func_parser_key "${lines[16]}") infer_img_dir=$(func_parser_value "${lines[16]}") save_log_key=$(func_parser_key "${lines[17]}") + save_log_value=$(func_parser_value "${lines[17]}") benchmark_key=$(func_parser_key "${lines[18]}") benchmark_value=$(func_parser_value "${lines[18]}") infer_key1=$(func_parser_key "${lines[19]}") @@ -161,8 +162,9 @@ function func_inference(){ set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -191,8 +193,9 @@ function func_inference(){ set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") set_precision=$(func_set_params "${precision_key}" "${precision}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" diff --git a/tools/infer/utility.py b/tools/infer/utility.py index bd9e14a65749f4223eeb6cf79a37546909854d17..f437056ec7b10e28e626d2028b6401cebc647bb1 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -211,7 +211,7 @@ def create_predictor(args, mode, logger): "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2] } max_input_shape = { - "x": [1, 3, 1280, 1280], + "x": [1, 3, 1536, 1536], "conv2d_92.tmp_0": [1, 120, 400, 400], "conv2d_91.tmp_0": [1, 24, 200, 200], "conv2d_59.tmp_0": [1, 96, 400, 400], @@ -261,7 +261,7 @@ def create_predictor(args, mode, logger): opt_input_shape.update(opt_pact_shape) elif mode == "rec": min_input_shape = {"x": [1, 3, 32, 10]} - max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]} + max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1536]} opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]} elif mode == "cls": min_input_shape = {"x": [1, 3, 48, 10]}