export_model.py 8.6 KB
Newer Older
W
wangguanzhong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
20 21 22 23 24
import sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
if parent_path not in sys.path:
    sys.path.append(parent_path)
W
wangguanzhong 已提交
25 26 27 28 29 30

from paddle import fluid

from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
import ppdet.utils.checkpoint as checkpoint
31
from ppdet.utils.check import check_config, check_version, check_py_func
32
import yaml
W
wangguanzhong 已提交
33
import logging
34
from collections import OrderedDict
W
wangguanzhong 已提交
35 36 37 38
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)

39 40 41 42 43 44 45 46 47 48
# Global dictionary
TRT_MIN_SUBGRAPH = {
    'YOLO': 3,
    'SSD': 3,
    'RCNN': 40,
    'RetinaNet': 40,
    'EfficientDet': 40,
    'Face': 3,
    'TTFNet': 3,
    'FCOS': 3,
G
Guanghua Yu 已提交
49
    'SOLOv2': 3,
50 51 52 53 54
}
RESIZE_SCALE_SET = {
    'RCNN',
    'RetinaNet',
    'FCOS',
G
Guanghua Yu 已提交
55
    'SOLOv2',
56 57
}

W
wangguanzhong 已提交
58

59 60 61
def parse_reader(reader_cfg, metric, arch):
    preprocess_list = []

62
    image_shape = reader_cfg['inputs_def'].get('image_shape', [3, None, None])
63 64 65 66 67 68 69 70 71
    has_shape_def = not None in image_shape

    dataset = reader_cfg['dataset']
    anno_file = dataset.get_anno()
    with_background = dataset.with_background
    use_default_label = dataset.use_default_label

    if metric == 'COCO':
        from ppdet.utils.coco_eval import get_category_info
W
wangguanzhong 已提交
72
    elif metric == "VOC":
73
        from ppdet.utils.voc_eval import get_category_info
W
wangguanzhong 已提交
74
    elif metric == "WIDERFACE":
75 76 77 78 79
        from ppdet.utils.widerface_eval_utils import get_category_info
    else:
        raise ValueError(
            "metric only supports COCO, VOC, WIDERFACE, but received {}".format(
                metric))
80 81
    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)
82

83 84 85 86 87 88 89 90 91
    label_list = [str(cat) for cat in catid2name.values()]

    sample_transforms = reader_cfg['sample_transforms']
    for st in sample_transforms[1:]:
        method = st.__class__.__name__
        p = {'type': method.replace('Image', '')}
        params = st.__dict__
        params.pop('_id')
        if p['type'] == 'Resize' and has_shape_def:
92
            params['target_size'] = min(image_shape[
93
                1:]) if arch in RESIZE_SCALE_SET else image_shape[1]
94
            params['max_size'] = max(image_shape[
95
                1:]) if arch in RESIZE_SCALE_SET else 0
96
            params['image_shape'] = image_shape[1:]
W
wangguanzhong 已提交
97 98
            if 'target_dim' in params:
                params.pop('target_dim')
99 100 101 102 103 104 105 106 107
        if p['type'] == 'ResizeAndPad':
            assert has_shape_def, "missing input shape"
            p['type'] = 'Resize'
            p['target_size'] = params['target_dim']
            p['max_size'] = params['target_dim']
            p['interp'] = params['interp']
            p['image_shape'] = image_shape[1:]
            preprocess_list.append(p)
            continue
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
        p.update(params)
        preprocess_list.append(p)
    batch_transforms = reader_cfg.get('batch_transforms', None)
    if batch_transforms:
        methods = [bt.__class__.__name__ for bt in batch_transforms]
        for bt in batch_transforms:
            method = bt.__class__.__name__
            if method == 'PadBatch':
                preprocess_list.append({'type': 'PadStride'})
                params = bt.__dict__
                preprocess_list[-1].update({'stride': params['pad_to_stride']})
                break

    return with_background, preprocess_list, label_list


J
Jiawei Wang 已提交
124
def dump_infer_config(FLAGS, config):
125
    arch_state = 0
126 127
    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(FLAGS.output_dir, cfg_name)
128 129
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
130 131 132 133 134 135 136 137 138 139
    from ppdet.core.config.yaml_helpers import setup_orderdict
    setup_orderdict()
    infer_cfg = OrderedDict({
        'use_python_inference': False,
        'mode': 'fluid',
        'draw_threshold': 0.5,
        'metric': config['metric']
    })
    infer_arch = config['architecture']

140
    for arch, min_subgraph_size in TRT_MIN_SUBGRAPH.items():
141 142 143
        if arch in infer_arch:
            infer_cfg['arch'] = arch
            infer_cfg['min_subgraph_size'] = min_subgraph_size
144
            arch_state = 1
145
            break
146 147 148 149 150
    if not arch_state:
        logger.error(
            'Architecture: {} is not supported for exporting model now'.format(
                infer_arch))
        os._exit(0)
151 152 153 154 155 156

    if 'Mask' in config['architecture']:
        infer_cfg['mask_resolution'] = config['MaskHead']['resolution']
    infer_cfg['with_background'], infer_cfg['Preprocess'], infer_cfg[
        'label_list'] = parse_reader(config['TestReader'], config['metric'],
                                     infer_cfg['arch'])
157

158 159 160 161 162
    yaml.dump(infer_cfg, open(os.path.join(save_dir, 'infer_cfg.yml'), 'w'))
    logger.info("Export inference config file to {}".format(
        os.path.join(save_dir, 'infer_cfg.yml')))


W
wangguanzhong 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
def prune_feed_vars(feeded_var_names, target_vars, prog):
    """
    Filter out feed variables which are not in program,
    pruned feed variables are only used in post processing
    on model output, which are not used in program, such
    as im_id to identify image order, im_shape to clip bbox
    in image.
    """
    exist_var_names = []
    prog = prog.clone()
    prog = prog._prune(targets=target_vars)
    global_block = prog.global_block()
    for name in feeded_var_names:
        try:
            v = global_block.var(name)
            exist_var_names.append(str(v.name))
        except Exception:
            logger.info('save_inference_model pruned unused feed '
                        'variables {}'.format(name))
            pass
    return exist_var_names


def save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog):
    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(FLAGS.output_dir, cfg_name)
    feed_var_names = [var.name for var in feed_vars.values()]
190 191
    fetch_list = sorted(test_fetches.items(), key=lambda i: i[0])
    target_vars = [var[1] for var in fetch_list]
W
wangguanzhong 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
    feed_var_names = prune_feed_vars(feed_var_names, target_vars, infer_prog)
    logger.info("Export inference model to {}, input: {}, output: "
                "{}...".format(save_dir, feed_var_names,
                               [str(var.name) for var in target_vars]))
    fluid.io.save_inference_model(
        save_dir,
        feeded_var_names=feed_var_names,
        target_vars=target_vars,
        executor=exe,
        main_program=infer_prog,
        params_filename="__params__")


def main():
    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
208 209
    check_config(cfg)

210 211
    check_version()

212
    main_arch = cfg.architecture
W
wangguanzhong 已提交
213 214 215 216 217 218 219 220 221 222 223

    # Use CPU for exporting inference model instead of GPU
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
224 225 226
            inputs_def = cfg['TestReader']['inputs_def']
            inputs_def['use_dataloader'] = False
            feed_vars, _ = model.build_inputs(**inputs_def)
K
Kaipeng Deng 已提交
227 228
            # postprocess not need in exclude_nms, exclude NMS in exclude_nms mode
            test_fetches = model.test(feed_vars, exclude_nms=FLAGS.exclude_nms)
W
wangguanzhong 已提交
229
    infer_prog = infer_prog.clone(True)
230
    check_py_func(infer_prog)
W
wangguanzhong 已提交
231 232 233 234

    exe.run(startup_prog)
    checkpoint.load_params(exe, infer_prog, cfg.weights)

J
Jiawei Wang 已提交
235
    dump_infer_config(FLAGS, cfg)
236
    save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)
W
wangguanzhong 已提交
237 238 239 240 241 242 243 244 245


if __name__ == '__main__':
    parser = ArgsParser()
    parser.add_argument(
        "--output_dir",
        type=str,
        default="output",
        help="Directory for storing the output model files.")
K
Kaipeng Deng 已提交
246 247 248 249 250
    parser.add_argument(
        "--exclude_nms",
        action='store_true',
        default=False,
        help="Whether prune NMS for benchmark")
251

W
wangguanzhong 已提交
252 253
    FLAGS = parser.parse_args()
    main()