predict_vqa_token_ser.py 5.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys

__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
文幕地方's avatar
fix  
文幕地方 已提交
19
sys.path.insert(0, os.path.abspath(os.path.join(__dir__, '../..')))
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

os.environ["FLAGS_allocator_strategy"] = 'auto_growth'

import cv2
import json
import numpy as np
import time

import tools.infer.utility as utility
from ppocr.data import create_operators, transform
from ppocr.postprocess import build_post_process
from ppocr.utils.logging import get_logger
from ppocr.utils.visual import draw_ser_results
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from ppstructure.utility import parse_args

from paddleocr import PaddleOCR

logger = get_logger()


class SerPredictor(object):
    def __init__(self, args):
        self.ocr_engine = PaddleOCR(use_angle_cls=False, show_log=False)

        pre_process_list = [{
            'VQATokenLabelEncode': {
                'algorithm': args.vqa_algorithm,
                'class_path': args.ser_dict_path,
                'contains_re': False,
                'ocr_engine': self.ocr_engine
            }
        }, {
文幕地方's avatar
fix  
文幕地方 已提交
53 54
            'VQATokenPad': {
                'max_seq_len': 512,
55 56 57
                'return_attention_mask': True
            }
        }, {
文幕地方's avatar
fix  
文幕地方 已提交
58 59
            'VQASerTokenChunk': {
                'max_seq_len': 512,
60 61 62
                'return_attention_mask': True
            }
        }, {
文幕地方's avatar
fix  
文幕地方 已提交
63 64
            'Resize': {
                'size': [224, 224]
65 66 67 68 69 70 71 72 73 74 75 76 77
            }
        }, {
            'NormalizeImage': {
                'std': [58.395, 57.12, 57.375],
                'mean': [123.675, 116.28, 103.53],
                'scale': '1',
                'order': 'hwc'
            }
        }, {
            'ToCHWImage': None
        }, {
            'KeepKeys': {
                'keep_keys': [
文幕地方's avatar
fix  
文幕地方 已提交
78 79
                    'input_ids', 'bbox', 'attention_mask', 'token_type_ids',
                    'image', 'labels', 'segment_offset_id', 'ocr_info',
80 81 82 83 84 85 86 87 88
                    'entities'
                ]
            }
        }]
        postprocess_params = {
            'name': 'VQASerTokenLayoutLMPostProcess',
            "class_path": args.ser_dict_path,
        }

文幕地方's avatar
fix  
文幕地方 已提交
89 90
        self.preprocess_op = create_operators(pre_process_list,
                                              {'infer_mode': True})
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
        self.postprocess_op = build_post_process(postprocess_params)
        self.predictor, self.input_tensor, self.output_tensors, self.config = \
            utility.create_predictor(args, 'ser', logger)

    def __call__(self, img):
        ori_im = img.copy()
        data = {'image': img}
        data = transform(data, self.preprocess_op)
        img = data[0]
        if img is None:
            return None, 0
        img = np.expand_dims(img, axis=0)
        img = img.copy()
        starttime = time.time()

        for idx in range(len(self.input_tensor)):
            expand_input = np.expand_dims(data[idx], axis=0)
            self.input_tensor[idx].copy_from_cpu(expand_input)

        self.predictor.run()

        outputs = []
        for output_tensor in self.output_tensors:
            output = output_tensor.copy_to_cpu()
            outputs.append(output)
        preds = outputs[0]
文幕地方's avatar
fix  
文幕地方 已提交
117

118
        post_result = self.postprocess_op(
文幕地方's avatar
fix  
文幕地方 已提交
119
            preds, segment_offset_ids=[data[6]], ocr_infos=[data[7]])
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
        elapse = time.time() - starttime
        return post_result, elapse


def main(args):
    image_file_list = get_image_file_list(args.image_dir)
    ser_predictor = SerPredictor(args)
    count = 0
    total_time = 0

    os.makedirs(args.output, exist_ok=True)
    with open(
            os.path.join(args.output, 'infer.txt'), mode='w',
            encoding='utf-8') as f_w:
        for image_file in image_file_list:
            img, flag = check_and_read_gif(image_file)
            if not flag:
                img = cv2.imread(image_file)
文幕地方's avatar
fix  
文幕地方 已提交
138
                img = img[:, :, ::-1]
139 140 141 142 143 144
            if img is None:
                logger.info("error in loading image:{}".format(image_file))
                continue
            ser_res, elapse = ser_predictor(img)
            ser_res = ser_res[0]

文幕地方's avatar
fix  
文幕地方 已提交
145 146 147 148 149 150
            res_str = '{}\t{}\n'.format(
                image_file,
                json.dumps(
                    {
                        "ocr_info": ser_res,
                    }, ensure_ascii=False))
151 152
            f_w.write(res_str)

文幕地方's avatar
fix  
文幕地方 已提交
153 154 155
            img_res = draw_ser_results(
                image_file,
                ser_res,
文幕地方's avatar
文幕地方 已提交
156
                font_path=args.vis_font_path, )
157 158 159 160 161 162 163 164 165 166

            img_save_path = os.path.join(args.output,
                                         os.path.basename(image_file))
            cv2.imwrite(img_save_path, img_res)
            logger.info("save vis result to {}".format(img_save_path))
            if count > 0:
                total_time += elapse
            count += 1
            logger.info("Predict time of {}: {}".format(image_file, elapse))

文幕地方's avatar
fix  
文幕地方 已提交
167

168 169
if __name__ == "__main__":
    main(parse_args())