diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index 34cf80f5e5566707a08d15ddeaaa51348dcd9acf..bbd291c3347929bf394d7859e277286cb4932042 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -127,6 +127,7 @@ def main(): arch_config = config["Architecture"] if arch_config["algorithm"] in ["Distillation", ]: # distillation model for idx, name in enumerate(model.model_name_list): + model.model_list[idx].eval() sub_model_save_path = os.path.join(save_path, name, "inference") export_single_model(quanter, model.model_list[idx], infer_shape, sub_model_save_path, logger) diff --git a/ppocr/modeling/heads/rec_ctc_head.py b/ppocr/modeling/heads/rec_ctc_head.py index 35d33d5f56b3b378286565cbfa9755f43343b278..5cc4f9d8a4cf8da71e114d70890d6951345658ed 100755 --- a/ppocr/modeling/heads/rec_ctc_head.py +++ b/ppocr/modeling/heads/rec_ctc_head.py @@ -80,8 +80,9 @@ class CTCHead(nn.Layer): result = (x, predicts) else: result = predicts - + print("self.training: ", self.training) if not self.training: + print("self.training, ", self.training) predicts = F.softmax(predicts, axis=2) result = predicts diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt index 03d749f55765b2ea9e82d538cb4e6fb3d29e0b9f..cfe0947fe010f1545288e3e55922f980e89980d2 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt @@ -1,13 +1,13 @@ ===========================train_params=========================== -model_name:PPOCRv2_ocr_rec_pact +model_name:ch_PPOCRv2_rec_PACT python:python3.7 -gpu_list:0|0,1 +gpu_list:6 Global.use_gpu:True|True Global.auto_cast:fp32 -Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=6|whole_train_whole_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 -Global.pretrained_model:null +Global.pretrained_model:pretrain_models/ch_PP-OCRv2_rec_train/best_accuracy train_model_name:latest train_infer_img_dir:./inference/rec_inference null:null diff --git a/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt b/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt index 0603fa10a640fd6d7b71582a92b92f026b4d1d51..5634297973bafbdad6c168e369d15520db09aba3 100644 --- a/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt @@ -1,13 +1,13 @@ ===========================train_params=========================== model_name:det_mv3_east_v2.0 python:python3.7 -gpu_list:0 +gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:fp32 Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 -Global.pretrained_model:null +Global.pretrained_model:./pretrain_models/det_mv3_east_v2.0_train/best_accuracy train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 31433884a69da24fc55dee4f0853e5cee1ea8edb..6a8983009e527b8a59b41c1d9b950e8e3f349ef2 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -64,6 +64,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_ppocr_server_v2.0_det_train.tar && cd ../ fi + if [ ${model_name} == "ch_PPOCRv2_rec" ] || [ ${model_name} == "ch_PPOCRv2_rec_PACT" ]; then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf ch_PP-OCRv2_rec_train.tar && cd ../ + fi if [ ${model_name} == "det_r18_db_v2_0" ]; then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet18_vd_pretrained.pdparams --no-check-certificate fi @@ -91,6 +95,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_ppocr_mobile_v2.0_rec_train.tar && cd ../ fi + if [ ${model_name} == "det_mv3_east_v2.0" ]; then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf det_mv3_east_v2.0_train.tar && cd ../ + fi elif [ ${MODE} = "whole_train_whole_infer" ];then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 80abba67b293e3412afa6c1ea8da0291331ef8de..1b282a0112a96a74f1f0b539c29ca8142191564e 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -312,12 +312,28 @@ def create_predictor(args, mode, logger): input_names = predictor.get_input_names() for name in input_names: input_tensor = predictor.get_input_handle(name) - output_names = predictor.get_output_names() - output_tensors = [] + output_tensors = get_output_tensors(args, mode, predictor) + #output_names = predictor.get_output_names() + #output_tensors = [] + #print("output names", output_names) + #for output_name in output_names: + # output_tensor = predictor.get_output_handle(output_name) + # output_tensors.append(output_tensor) + return predictor, input_tensor, output_tensors, config + + +def get_output_tensors(args, mode, predictor): + output_names = predictor.get_output_names() + output_tensors = [] + if mode == "rec" and args.rec_algorithm == "CRNN": + output_name = 'softmax_0.tmp_0' + if output_name in output_names: + return [predictor.get_output_handle(output_name)] + else: for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) output_tensors.append(output_tensor) - return predictor, input_tensor, output_tensors, config + return output_tensors def get_infer_gpuid():