export_model.py 2.8 KB
Newer Older
W
wangguanzhong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
20 21 22 23 24
import sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
    sys.path.append(parent_path)
W
wangguanzhong 已提交
25

26
import paddle
W
wangguanzhong 已提交
27 28 29 30 31
from paddle import fluid

from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
import ppdet.utils.checkpoint as checkpoint
32
from ppdet.utils.export_utils import save_infer_model, dump_infer_config
33
from ppdet.utils.check import check_config, check_version, check_py_func
W
wangguanzhong 已提交
34 35 36 37 38 39 40 41 42
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)


def main():
    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
43 44
    check_config(cfg)

45 46
    check_version()

47
    main_arch = cfg.architecture
W
wangguanzhong 已提交
48 49 50 51 52 53 54 55 56 57 58

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
59 60 61
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
K
Kaipeng Deng 已提交
62 63
            # postprocess not need in exclude_nms, exclude NMS in exclude_nms mode
            test_fetches = model.test(feed_vars, exclude_nms=FLAGS.exclude_nms)
W
wangguanzhong 已提交
64
    infer_prog = infer_prog.clone(True)
65
    check_py_func(infer_prog)
W
wangguanzhong 已提交
66 67 68 69

    exe.run(startup_prog)
    checkpoint.load_params(exe, infer_prog, cfg.weights)

J
Jiawei Wang 已提交
70
    dump_infer_config(FLAGS, cfg)
71
    save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
W
wangguanzhong 已提交
72 73 74


if __name__ == '__main__':
75
    paddle.enable_static()
W
wangguanzhong 已提交
76 77 78 79 80 81
    parser = ArgsParser()
    parser.add_argument(
        "--output_dir",
        type=str,
        default="output",
        help="Directory for storing the output model files.")
K
Kaipeng Deng 已提交
82 83 84 85 86
    parser.add_argument(
        "--exclude_nms",
        action='store_true',
        default=False,
        help="Whether prune NMS for benchmark")
87

W
wangguanzhong 已提交
88 89
    FLAGS = parser.parse_args()
    main()