diff --git a/configs/det/det_r50_vd_sast_icdar15.yml b/configs/det/det_r50_vd_sast_icdar15.yml index dbfcefca964e73d42298fbbbc1e654b3bd809c77..674de7fbf250bd712b46e04d42ff0022029fd095 100755 --- a/configs/det/det_r50_vd_sast_icdar15.yml +++ b/configs/det/det_r50_vd_sast_icdar15.yml @@ -8,7 +8,7 @@ Global: # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] cal_metric_during_train: False - pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained checkpoints: save_inference_dir: use_visualdl: False diff --git a/configs/det/det_r50_vd_sast_totaltext.yml b/configs/det/det_r50_vd_sast_totaltext.yml index 88dd31f3c21b184d956ad718dae808bb6054532e..44a0766b1bf2790a9633602bc637932529046e34 100755 --- a/configs/det/det_r50_vd_sast_totaltext.yml +++ b/configs/det/det_r50_vd_sast_totaltext.yml @@ -8,7 +8,7 @@ Global: # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] cal_metric_during_train: False - pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained checkpoints: save_inference_dir: use_visualdl: False diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_jetson.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_jetson.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_mac.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_mac.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_metal_arm_gpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_metal_arm_gpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_opencl_arm_gpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_java_opencl_arm_gpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d1a59920af20472b2006e6fd70f60e3ba1844d7 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,12 @@ +===========================paddle2onnx_params=========================== +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/det_mobile_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_det.py +--use_gpu:True|False +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a19c8ee3355b010b55d1dbf16aa0e21940ba546c --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_det_mobile +python:python3.7|cpp +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 +op.det.local_service_conf.devices:null|0 +op.det.local_service_conf.use_mkldnn:True|False +op.det.local_service_conf.thread_num:1|6 +op.det.local_service_conf.use_trt:False|True +op.det.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_mobile/train_infer_python.txt b/test_tipc/configs/ppocr_det_mobile/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..919736085bc9032debbae6cd432729995f224fe8 --- /dev/null +++ b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,12 @@ +===========================paddle2onnx_params=========================== +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/det_server_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_det.py +--use_gpu:True|False +--det_model_dir: +--image_dir:./inference/det_inference \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..09b7ab750408a54fa292f1168d8de01bd962ca43 --- /dev/null +++ b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_det_server +python:python3.7|cpp +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_det_server_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 +op.det.local_service_conf.devices:null|0 +op.det.local_service_conf.use_mkldnn:True|False +op.det.local_service_conf.thread_num:1|6 +op.det.local_service_conf.use_trt:False|True +op.det.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs_words_en \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..69bd018623ec323ac979729447e0b2f96643c4c0 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,12 @@ +===========================paddle2onnx_params=========================== +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/rec_mobile_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--rec_model_dir: +--image_dir:./inference/rec_inference \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7351e5bd6d5d8ffc5d49b313ad662b1e2fd55bd2 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_rec_mobile +python:python3.7|cpp +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 +op.rec.local_service_conf.devices:null|0 +op.rec.local_service_conf.use_mkldnn:True|False +op.rec.local_service_conf.thread_num:1|6 +op.rec.local_service_conf.use_trt:False|True +op.rec.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs_words_en \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..16411aca520bf2e62775871da19eaa6b91602770 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,12 @@ +===========================paddle2onnx_params=========================== +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/rec_server_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--rec_model_dir: +--image_dir:./inference/rec_inference \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..24e7a8f3e0364f2a0a14c74a27da7372508cd414 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_rec_server +python:python3.7 +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 +op.rec.local_service_conf.devices:null|0 +op.rec.local_service_conf.use_mkldnn:True|False +op.rec.local_service_conf.thread_num:1|6 +op.rec.local_service_conf.use_trt:False|True +op.rec.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs_words_en \ No newline at end of file diff --git a/test_tipc/docs/test_paddle2onnx.md b/test_tipc/docs/test_paddle2onnx.md index 5d784c5e93c3a93d00c256004de582dcbf357c45..df2734771e9252a40811c42ead03abbff1b7a1a3 100644 --- a/test_tipc/docs/test_paddle2onnx.md +++ b/test_tipc/docs/test_paddle2onnx.md @@ -18,10 +18,10 @@ PaddleServing预测功能测试的主程序为`test_paddle2onnx.sh`,可以测 先运行`prepare.sh`准备数据和模型,然后运行`test_paddle2onnx.sh`进行测试,最终在```test_tipc/output```目录下生成`paddle2onnx_infer_*.log`后缀的日志文件。 ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "paddle2onnx_infer" +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt "paddle2onnx_infer" # 用法: -bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ppocr_det_mobile_params.txt +bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt ``` #### 运行结果 diff --git a/test_tipc/docs/test_serving.md b/test_tipc/docs/test_serving.md index f63d6c7107ce92807c53d81a22a582b09178a712..1eded6f5821a5ebd9180cc4d89a1fecac61ad63d 100644 --- a/test_tipc/docs/test_serving.md +++ b/test_tipc/docs/test_serving.md @@ -20,10 +20,10 @@ PaddleServing预测功能测试的主程序为`test_serving.sh`,可以测试 先运行`prepare.sh`准备数据和模型,然后运行`test_serving.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_*.log`后缀的日志文件。 ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "serving_infer" +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" # 用法: -bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile_params.txt +bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt ``` #### 运行结果 diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index f3ad242538a9471af237c804eae343da06e2b9dd..9b63bf5b20cd08b4ab08c17d7fd84f53feb93967 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -87,7 +87,8 @@ elif [ ${MODE} = "whole_infer" ];then rm -rf ./train_data/icdar2015 wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar --no-check-certificate - cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && cd ../ elif [ ${model_name} = "ocr_server_det" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 5dc6e65ec81e6b8674877fc686c8b3650ce93a59..cd3cfc6120be631a21944a49ba8ffa6f92b5fc38 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -11,7 +11,7 @@ python=$(func_parser_value "${lines[2]}") # parser params -dataline=$(awk 'NR==111, NR==123{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==12{print}' $FILENAME) IFS=$'\n' lines=(${dataline}) diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh index 9b1e90ed6116f32e232657e30277a747a70904c7..c36935a60fecacea672fd932773a8fb0bdcd619b 100644 --- a/test_tipc/test_serving.sh +++ b/test_tipc/test_serving.sh @@ -2,7 +2,7 @@ source test_tipc/common_func.sh FILENAME=$1 -dataline=$(awk 'NR==67, NR==84{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==18{print}' $FILENAME) # parser params IFS=$'\n' diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 0d4e182b2832f65cec08beffe99055603b90982b..c62b6274f8dcbc84d08900c5d228d78fd3c0de1a 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -244,7 +244,7 @@ else export Count=0 USE_GPU_KEY=(${train_use_gpu_value}) for gpu in ${gpu_list[*]}; do - use_gpu=${USE_GPU_KEY[Count]} + train_use_gpu=${USE_GPU_KEY[Count]} Count=$(($Count + 1)) ips="" if [ ${gpu} = "-1" ];then @@ -302,11 +302,20 @@ else set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") - set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}") - save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" - + set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}") + if [ ${#ips} -le 26 ];then + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + nodes=1 + else + IFS="," + ips_array=(${ips}) + IFS="|" + nodes=${#ips_array[@]} + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" + fi + # load pretrain from norm training if current trainer is pact or fpgm trainer - if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then + if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then set_pretrain="${load_norm_train_model}" fi @@ -325,7 +334,7 @@ else set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") # save norm trained models to set pretrain for pact training and fpgm training - if [ ${trainer} = ${trainer_norm} ]; then + if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1]; then load_norm_train_model=${set_eval_pretrain} fi # run eval diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 58170e393cdc9d8441408a89c84aa6f88d683db3..98bf0362f6842cb70490e6817ef53fe39109f406 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -205,7 +205,7 @@ def create_predictor(args, mode, logger): "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2] } max_input_shape = { - "x": [1, 3, 2000, 2000], + "x": [1, 3, 1280, 1280], "conv2d_92.tmp_0": [1, 120, 400, 400], "conv2d_91.tmp_0": [1, 24, 200, 200], "conv2d_59.tmp_0": [1, 96, 400, 400], @@ -255,16 +255,16 @@ def create_predictor(args, mode, logger): opt_input_shape.update(opt_pact_shape) elif mode == "rec": min_input_shape = {"x": [1, 3, 32, 10]} - max_input_shape = {"x": [args.rec_batch_num, 3, 32, 2000]} + max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]} opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]} elif mode == "cls": min_input_shape = {"x": [1, 3, 48, 10]} - max_input_shape = {"x": [args.rec_batch_num, 3, 48, 2000]} + max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]} opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]} else: min_input_shape = {"x": [1, 3, 10, 10]} - max_input_shape = {"x": [1, 3, 1000, 1000]} - opt_input_shape = {"x": [1, 3, 500, 500]} + max_input_shape = {"x": [1, 3, 512, 512]} + opt_input_shape = {"x": [1, 3, 256, 256]} config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape, opt_input_shape)