export_model.py 4.8 KB
Newer Older
D
dyning 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19
import os
import sys

__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
20
sys.path.append(os.path.abspath(os.path.join(__dir__, "..")))
21

D
dyning 已提交
22 23 24 25 26 27 28 29
import argparse

import paddle
from paddle.jit import to_static

from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.utils.save_load import init_model
30
from ppocr.utils.logging import get_logger
W
WenmuZhou 已提交
31
from tools.program import load_config, merge_config, ArgsParser
D
dyning 已提交
32 33


34 35 36
def export_single_model(model, arch_config, save_path, logger):
    if arch_config["algorithm"] == "SRN":
        max_text_length = arch_config["Head"]["max_text_length"]
T
tink2123 已提交
37
        other_shape = [
T
tink2123 已提交
38
            paddle.static.InputSpec(
39
                shape=[None, 1, 64, 256], dtype="float32"), [
T
tink2123 已提交
40 41 42
                    paddle.static.InputSpec(
                        shape=[None, 256, 1],
                        dtype="int64"), paddle.static.InputSpec(
W
WenmuZhou 已提交
43
                            shape=[None, max_text_length, 1], dtype="int64"),
T
tink2123 已提交
44
                    paddle.static.InputSpec(
W
WenmuZhou 已提交
45 46 47 48
                        shape=[None, 8, max_text_length, max_text_length],
                        dtype="int64"), paddle.static.InputSpec(
                            shape=[None, 8, max_text_length, max_text_length],
                            dtype="int64")
T
tink2123 已提交
49 50 51 52
                ]
        ]
        model = to_static(model, input_spec=other_shape)
    else:
53
        infer_shape = [3, -1, -1]
54
        if arch_config["model_type"] == "rec":
55
            infer_shape = [3, 32, -1]  # for rec model, H must be 32
56 57 58
            if "Transform" in arch_config and arch_config[
                    "Transform"] is not None and arch_config["Transform"][
                        "name"] == "TPS":
59
                logger.info(
60
                    "When there is tps in the network, variable length input is not supported, and the input size needs to be the same as during training"
61 62
                )
                infer_shape[-1] = 100
T
Topdu 已提交
63 64
            if arch_config["algorithm"] == "NRTR":
                infer_shape = [1, 32, 100]
M
refine  
MissPenguin 已提交
65 66
        elif arch_config["model_type"] == "table":
            infer_shape = [3, 488, 488]
T
tink2123 已提交
67 68 69 70
        model = to_static(
            model,
            input_spec=[
                paddle.static.InputSpec(
71
                    shape=[None] + infer_shape, dtype="float32")
T
tink2123 已提交
72 73
            ])

74
    paddle.jit.save(model, save_path)
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    logger.info("inference model is saved to {}".format(save_path))
    return


def main():
    FLAGS = ArgsParser().parse_args()
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    logger = get_logger()
    # build post process

    post_process_class = build_post_process(config["PostProcess"],
                                            config["Global"])

    # build model
    # for rec algorithm
    if hasattr(post_process_class, "character"):
        char_num = len(getattr(post_process_class, "character"))
        if config["Architecture"]["algorithm"] in ["Distillation",
                                                   ]:  # distillation model
            for key in config["Architecture"]["Models"]:
                config["Architecture"]["Models"][key]["Head"][
                    "out_channels"] = char_num
98 99 100
                # just one final tensor needs to to exported for inference
                config["Architecture"]["Models"][key][
                    "return_all_feats"] = False
101 102 103
        else:  # base rec model
            config["Architecture"]["Head"]["out_channels"] = char_num
    model = build_model(config["Architecture"])
littletomatodonkey's avatar
littletomatodonkey 已提交
104
    init_model(config, model)
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
    model.eval()

    save_path = config["Global"]["save_inference_dir"]

    arch_config = config["Architecture"]

    if arch_config["algorithm"] in ["Distillation", ]:  # distillation model
        archs = list(arch_config["Models"].values())
        for idx, name in enumerate(model.model_name_list):
            sub_model_save_path = os.path.join(save_path, name, "inference")
            export_single_model(model.model_list[idx], archs[idx],
                                sub_model_save_path, logger)
    else:
        save_path = os.path.join(save_path, "inference")
        export_single_model(model, arch_config, save_path, logger)
D
dyning 已提交
120 121 122 123


if __name__ == "__main__":
    main()