export_utils.py 4.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import yaml
import numpy as np
from collections import OrderedDict

import logging
logger = logging.getLogger(__name__)

import paddle.fluid as fluid

__all__ = ['dump_infer_config', 'save_infer_model']

# Global dictionary
TRT_MIN_SUBGRAPH = {
    'YOLO': 3,
    'SSD': 3,
    'RCNN': 40,
    'RetinaNet': 40,
    'EfficientDet': 40,
    'Face': 3,
    'TTFNet': 3,
    'FCOS': 3,
    'SOLOv2': 60,
}


def parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
    preprocess_list = []

    anno_file = dataset_cfg.get_anno()
    with_background = reader_cfg['with_background']
    use_default_label = dataset_cfg.use_default_label

    if metric == 'COCO':
        from ppdet.utils.coco_eval import get_category_info
    else:
        raise ValueError("metric only supports COCO, but received {}".format(
            metric))
    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)

    label_list = [str(cat) for cat in catid2name.values()]

    sample_transforms = reader_cfg['sample_transforms']
    for st in sample_transforms[1:]:
        for key, value in st.items():
            p = {'type': key}
            if key == 'ResizeOp':
W
wangguanzhong 已提交
67 68
                if value.get('keep_ratio',
                             False) and image_shape[1] is not None:
69 70
                    max_size = max(image_shape[1:])
                    image_shape = [3, max_size, max_size]
W
wangguanzhong 已提交
71
                    value['target_size'] = image_shape[1:]
72 73 74 75 76 77 78
            p.update(value)
            preprocess_list.append(p)
    batch_transforms = reader_cfg.get('batch_transforms', None)
    if batch_transforms:
        methods = [list(bt.keys())[0] for bt in batch_transforms]
        for bt in batch_transforms:
            for key, value in bt.items():
W
wangguanzhong 已提交
79
                if key == 'PadBatchOp':
80 81 82 83 84 85 86 87 88
                    preprocess_list.append({'type': 'PadStride'})
                    preprocess_list[-1].update({
                        'stride': value['pad_to_stride']
                    })
                    break

    return with_background, preprocess_list, label_list, image_shape


W
wangguanzhong 已提交
89
def dump_infer_config(config, path, image_shape, model):
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
    arch_state = False
    from ppdet.core.config.yaml_helpers import setup_orderdict
    setup_orderdict()
    infer_cfg = OrderedDict({
        'mode': 'fluid',
        'draw_threshold': 0.5,
        'metric': config['metric'],
        'image_shape': image_shape
    })
    infer_arch = config['architecture']

    for arch, min_subgraph_size in TRT_MIN_SUBGRAPH.items():
        if arch in infer_arch:
            infer_cfg['arch'] = arch
            infer_cfg['min_subgraph_size'] = min_subgraph_size
            arch_state = True
            break
    if not arch_state:
        logger.error(
            'Architecture: {} is not supported for exporting model now'.format(
                infer_arch))
        os._exit(0)
W
wangguanzhong 已提交
112
    if getattr(model.__dict__, 'mask_post_process', None):
W
wangguanzhong 已提交
113
        infer_cfg['mask_resolution'] = model.mask_post_process.mask_resolution
114 115 116 117 118 119 120 121
    infer_cfg['with_background'], infer_cfg['Preprocess'], infer_cfg[
        'label_list'], image_shape = parse_reader(
            config['TestReader'], config['TestDataset'], config['metric'],
            infer_cfg['arch'], image_shape)

    yaml.dump(infer_cfg, open(path, 'w'))
    logger.info("Export inference config file to {}".format(os.path.join(path)))
    return image_shape