infer.py 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
K
Kaipeng Deng 已提交
20
import glob
21 22

import numpy as np
Y
Yang Zhang 已提交
23
from PIL import Image
24

W
wangguanzhong 已提交
25

26 27 28 29 30
def set_paddle_flags(**kwargs):
    for key, value in kwargs.items():
        if os.environ.get(key, None) is None:
            os.environ[key] = str(value)

W
wangguanzhong 已提交
31

32 33 34 35 36 37
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
    FLAGS_eager_delete_tensor_gb=0,  # enable GC to save memory
)

38 39
from paddle import fluid

W
wangguanzhong 已提交
40
from tools.configure import print_total_cfg
41
from ppdet.core.workspace import load_config, merge_config, create
42
from ppdet.modeling.model_input import create_feed
43 44 45
from ppdet.data.data_feed import create_reader

from ppdet.utils.eval_utils import parse_fetches
Y
Yang Zhang 已提交
46
from ppdet.utils.cli import ArgsParser
47
from ppdet.utils.check import check_gpu
48 49 50 51 52 53 54 55 56
from ppdet.utils.visualizer import visualize_results
import ppdet.utils.checkpoint as checkpoint

import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)


Y
Yang Zhang 已提交
57 58 59 60 61 62
def get_save_image_name(output_dir, image_path):
    """
    Get save image name from source image path.
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
K
Kaipeng Deng 已提交
63
    image_name = os.path.split(image_path)[-1]
Y
Yang Zhang 已提交
64 65 66 67
    name, ext = os.path.splitext(image_name)
    return os.path.join(output_dir, "{}".format(name)) + ext


K
Kaipeng Deng 已提交
68 69 70 71 72
def get_test_images(infer_dir, infer_img):
    """
    Get image path list in TEST mode
    """
    assert infer_img is not None or infer_dir is not None, \
73
        "--infer_img or --infer_dir should be set"
K
Kaipeng Deng 已提交
74 75 76 77
    assert infer_img is None or os.path.isfile(infer_img), \
            "{} is not a file".format(infer_img)
    assert infer_dir is None or os.path.isdir(infer_dir), \
            "{} is not a directory".format(infer_dir)
K
Kaipeng Deng 已提交
78 79 80 81 82 83 84 85 86
    images = []

    # infer_img has a higher priority
    if infer_img and os.path.isfile(infer_img):
        images.append(infer_img)
        return images

    infer_dir = os.path.abspath(infer_dir)
    assert os.path.isdir(infer_dir), \
Y
Yang Zhang 已提交
87 88 89 90 91
        "infer_dir {} is not a directory".format(infer_dir)
    exts = ['jpg', 'jpeg', 'png', 'bmp']
    exts += [ext.upper() for ext in exts]
    for ext in exts:
        images.extend(glob.glob('{}/*.{}'.format(infer_dir, ext)))
K
Kaipeng Deng 已提交
92

Y
Yang Zhang 已提交
93
    assert len(images) > 0, "no image found in {}".format(infer_dir)
K
Kaipeng Deng 已提交
94 95 96 97 98
    logger.info("Found {} inference images in total.".format(len(images)))

    return images


99 100 101 102 103 104 105 106 107 108
def prune_feed_vars(feeded_var_names, target_vars, prog):
    """
    Filter out feed variables which are not in program,
    pruned feed variables are only used in post processing
    on model output, which are not used in program, such
    as im_id to identify image order, im_shape to clip bbox
    in image.
    """
    exist_var_names = []
    prog = prog.clone()
W
wangguanzhong 已提交
109
    prog = prog._prune(feeded_var_names, targets=target_vars)
110 111 112 113
    global_block = prog.global_block()
    for name in feeded_var_names:
        try:
            v = global_block.var(name)
W
wangguanzhong 已提交
114
            exist_var_names.append(v.name.encode('utf-8'))
115 116 117 118 119 120 121
        except Exception:
            logger.info('save_inference_model pruned unused feed '
                        'variables {}'.format(name))
            pass
    return exist_var_names


122 123 124 125
def save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog):
    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(FLAGS.output_dir, cfg_name)
    feeded_var_names = [var.name for var in feed_vars.values()]
W
wangguanzhong 已提交
126 127 128
    target_vars = list(test_fetches.values())
    feeded_var_names = prune_feed_vars(feeded_var_names, target_vars,
                                       infer_prog)
129
    logger.info("Save inference model to {}, input: {}, output: "
W
wangguanzhong 已提交
130 131 132
                "{}...".format(save_dir, feeded_var_names, [
                    var.name.encode('utf-8') for var in target_vars
                ]))
W
wangguanzhong 已提交
133 134 135 136 137 138 139
    fluid.io.save_inference_model(
        save_dir,
        feeded_var_names=feeded_var_names,
        target_vars=target_vars,
        executor=exe,
        main_program=infer_prog,
        params_filename="__params__")
140 141


142
def main():
Y
Yang Zhang 已提交
143
    cfg = load_config(FLAGS.config)
144 145

    if 'architecture' in cfg:
Y
Yang Zhang 已提交
146
        main_arch = cfg.architecture
147 148 149
    else:
        raise ValueError("'architecture' not specified in config file.")

Y
Yang Zhang 已提交
150
    merge_config(FLAGS.opt)
151

152 153
    # check if set use_gpu=True in paddlepaddle cpu version
    check_gpu(cfg.use_gpu)
W
wangguanzhong 已提交
154
    print_total_cfg(cfg)
155

156
    if 'test_feed' not in cfg:
157
        test_feed = create(main_arch + 'TestFeed')
158
    else:
Y
Yang Zhang 已提交
159
        test_feed = create(cfg.test_feed)
160

Y
Yang Zhang 已提交
161
    test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
K
Kaipeng Deng 已提交
162 163
    test_feed.dataset.add_images(test_images)

Y
Yang Zhang 已提交
164
    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
165 166 167 168 169 170 171 172
    exe = fluid.Executor(place)

    model = create(main_arch)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
173
            _, feed_vars = create_feed(test_feed, use_pyreader=False)
174 175 176 177 178 179 180
            test_fetches = model.test(feed_vars)
    infer_prog = infer_prog.clone(True)

    reader = create_reader(test_feed)
    feeder = fluid.DataFeeder(place=place, feed_list=feed_vars.values())

    exe.run(startup_prog)
Y
Yang Zhang 已提交
181 182
    if cfg.weights:
        checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)
183

184 185 186
    if FLAGS.save_inference_model:
        save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)

187
    # parse infer fetches
188 189
    assert cfg.metric in ['COCO', 'VOC'], \
            "unknown metric type {}".format(cfg.metric)
190 191 192
    extra_keys = []
    if cfg['metric'] == 'COCO':
        extra_keys = ['im_info', 'im_id', 'im_shape']
K
Kaipeng Deng 已提交
193
    if cfg['metric'] == 'VOC':
194
        extra_keys = ['im_id', 'im_shape']
195 196
    keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys)

197
    # parse dataset category
Y
Yang Zhang 已提交
198
    if cfg.metric == 'COCO':
199
        from ppdet.utils.coco_eval import bbox2out, mask2out, get_category_info
Y
Yang Zhang 已提交
200
    if cfg.metric == "VOC":
K
Kaipeng Deng 已提交
201
        from ppdet.utils.voc_eval import bbox2out, get_category_info
202 203 204

    anno_file = getattr(test_feed.dataset, 'annotation', None)
    with_background = getattr(test_feed, 'with_background', True)
K
Kaipeng Deng 已提交
205 206 207
    use_default_label = getattr(test_feed, 'use_default_label', False)
    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)
208

209 210 211 212 213 214
    # whether output bbox is normalized in model output layer
    is_bbox_normalized = False
    if hasattr(model, 'is_bbox_normalized') and \
            callable(model.is_bbox_normalized):
        is_bbox_normalized = model.is_bbox_normalized()

215 216 217 218 219
    # use tb-paddle to log image
    if FLAGS.use_tb:
        from tb_paddle import SummaryWriter
        tb_writer = SummaryWriter(FLAGS.tb_log_dir)
        tb_image_step = 0
W
wangguanzhong 已提交
220
        tb_image_frame = 0  # each frame can display ten pictures at most. 
221

222 223 224 225 226 227 228 229 230 231 232 233
    imid2path = reader.imid2path
    for iter_id, data in enumerate(reader()):
        outs = exe.run(infer_prog,
                       feed=feeder.feed(data),
                       fetch_list=values,
                       return_numpy=False)
        res = {
            k: (np.array(v), v.recursive_sequence_lengths())
            for k, v in zip(keys, outs)
        }
        logger.info('Infer iter {}'.format(iter_id))

K
Kaipeng Deng 已提交
234 235 236
        bbox_results = None
        mask_results = None
        if 'bbox' in res:
237
            bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)
K
Kaipeng Deng 已提交
238 239
        if 'mask' in res:
            mask_results = mask2out([res], clsid2catid,
240
                                    model.mask_head.resolution)
K
Kaipeng Deng 已提交
241 242 243 244 245 246

        # visualize result
        im_ids = res['im_id'][0]
        for im_id in im_ids:
            image_path = imid2path[int(im_id)]
            image = Image.open(image_path).convert('RGB')
247 248 249 250 251 252 253 254 255 256

            # use tb-paddle to log original image           
            if FLAGS.use_tb:
                original_image_np = np.array(image)
                tb_writer.add_image(
                    "original/frame_{}".format(tb_image_frame),
                    original_image_np,
                    tb_image_step,
                    dataformats='HWC')

257
            image = visualize_results(image,
J
jerrywgz 已提交
258 259
                                      int(im_id), catid2name,
                                      FLAGS.draw_threshold, bbox_results,
260
                                      mask_results)
W
wangguanzhong 已提交
261

262 263 264 265 266 267 268 269 270 271 272 273 274
            # use tb-paddle to log image with bbox
            if FLAGS.use_tb:
                infer_image_np = np.array(image)
                tb_writer.add_image(
                    "bbox/frame_{}".format(tb_image_frame),
                    infer_image_np,
                    tb_image_step,
                    dataformats='HWC')
                tb_image_step += 1
                if tb_image_step % 10 == 0:
                    tb_image_step = 0
                    tb_image_frame += 1

Y
Yang Zhang 已提交
275 276
            save_name = get_save_image_name(FLAGS.output_dir, image_path)
            logger.info("Detection bbox results save in {}".format(save_name))
J
jerrywgz 已提交
277
            image.save(save_name, quality=95)
Y
Yang Zhang 已提交
278

279 280

if __name__ == '__main__':
Y
Yang Zhang 已提交
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
    parser = ArgsParser()
    parser.add_argument(
        "--infer_dir",
        type=str,
        default=None,
        help="Directory for images to perform inference on.")
    parser.add_argument(
        "--infer_img",
        type=str,
        default=None,
        help="Image path, has higher priority over --infer_dir")
    parser.add_argument(
        "--output_dir",
        type=str,
        default="output",
        help="Directory for storing the output visualization files.")
J
jerrywgz 已提交
297 298 299 300 301
    parser.add_argument(
        "--draw_threshold",
        type=float,
        default=0.5,
        help="Threshold to reserve the result for visualization.")
302 303 304 305 306
    parser.add_argument(
        "--save_inference_model",
        action='store_true',
        default=False,
        help="Save inference model in output_dir if True.")
307 308 309 310 311 312 313 314 315 316
    parser.add_argument(
        "--use_tb",
        type=bool,
        default=False,
        help="whether to record the data to Tensorboard.")
    parser.add_argument(
        '--tb_log_dir',
        type=str,
        default="tb_log_dir/image",
        help='Tensorboard logging directory for image.')
Y
Yang Zhang 已提交
317
    FLAGS = parser.parse_args()
318
    main()