diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index 0cb86108d2275dc6ee1a74e118c27b94131975d3..79d95d04ebf04c927d15f64a0a934082d298460f 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -127,6 +127,7 @@ def main(): arch_config = config["Architecture"] if arch_config["algorithm"] in ["Distillation", ]: # distillation model for idx, name in enumerate(model.model_name_list): + model.model_list[idx].eval() sub_model_save_path = os.path.join(save_path, name, "inference") export_single_model(quanter, model.model_list[idx], infer_shape, sub_model_save_path, logger) diff --git a/ppocr/postprocess/rec_postprocess.py b/ppocr/postprocess/rec_postprocess.py index caaa2948522cb6ea7ed74b8ab79a3d0b465059a3..cddc263a659f30925e257dc1f1a28256f3b31eee 100644 --- a/ppocr/postprocess/rec_postprocess.py +++ b/ppocr/postprocess/rec_postprocess.py @@ -87,7 +87,7 @@ class CTCLabelDecode(BaseRecLabelDecode): use_space_char) def __call__(self, preds, label=None, *args, **kwargs): - if isinstance(preds, tuple): + if isinstance(preds, (tuple, list)): preds = preds[-1] if isinstance(preds, paddle.Tensor): preds = preds.numpy() diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 7b7b81e3cd22c12561b30e6705eded1c92ec7761..37122f0aa0858281676b43d8c525a113677b2375 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -305,12 +305,22 @@ def create_predictor(args, mode, logger): input_names = predictor.get_input_names() for name in input_names: input_tensor = predictor.get_input_handle(name) - output_names = predictor.get_output_names() - output_tensors = [] + output_tensors = get_output_tensors(args, mode, predictor) + return predictor, input_tensor, output_tensors, config + + +def get_output_tensors(args, mode, predictor): + output_names = predictor.get_output_names() + output_tensors = [] + if mode == "rec" and args.rec_algorithm == "CRNN": + output_name = 'softmax_0.tmp_0' + if output_name in output_names: + return [predictor.get_output_handle(output_name)] + else: for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) output_tensors.append(output_tensor) - return predictor, input_tensor, output_tensors, config + return output_tensors def get_infer_gpuid():