detection.py 19.1 KB
Newer Older
C
chengduoZH 已提交
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

from ..layer_helper import LayerHelper
C
chengduoZH 已提交
19 20
from ..param_attr import ParamAttr
from ..framework import Variable
21 22 23
import tensor
import ops
import nn
C
chengduoZH 已提交
24
import math
25

C
chengduoZH 已提交
26 27
__all__ = [
    'detection_output',
C
chengduoZH 已提交
28
    'prior_box',
C
chengduoZH 已提交
29 30
    'multi_box_head',
]
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45


def detection_output(scores,
                     loc,
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
                     nms_eta=1.0):
    """
    **Detection Output Layer**

C
chengduoZH 已提交
46
    This layer applies the NMS to the output of network and computes the
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
    predict bounding box location. The output's shape of this layer could
    be zero if there is no valid bounding box.

    Args:
        scores(Variable): A 3-D Tensor with shape [N, C, M] represents the
            predicted confidence predictions. N is the batch size, C is the
            class number, M is number of bounding boxes. For each category
            there are total M scores which corresponding M bounding boxes.
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
            coordinate of the anchor box.
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
            of variance.
        background_label(float): The index of background label,
            the background label will be ignored. If set to -1, then all
            categories will be considered.
        nms_threshold(float): The threshold to be used in NMS.
        nms_top_k(int): Maximum number of detections to be kept according
            to the confidences aftern the filtering detections based on
            score_threshold.
        keep_top_k(int): Number of total bboxes to be kept per image after
            NMS step. -1 means keeping all bboxes after NMS step.
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
        nms_eta(float): The parameter for adaptive NMS.

    Returns:
        The detected bounding boxes which are a Tensor.

    Examples:
        .. code-block:: python

        pb = layers.data(name='prior_box', shape=[10, 4],
                         append_batch_size=False, dtype='float32')
        pbv = layers.data(name='prior_box_var', shape=[10, 4],
                          append_batch_size=False, dtype='float32')
        loc = layers.data(name='target_box', shape=[21, 4],
                          append_batch_size=False, dtype='float32')
        scores = layers.data(name='scores', shape=[2, 21, 10],
                          append_batch_size=False, dtype='float32')
        nmsed_outs = fluid.layers.detection_output(scores=scores,
                                       loc=loc,
                                       prior_box=pb,
                                       prior_box_var=pbv)
    """

    helper = LayerHelper("detection_output", **locals())
    decoded_box = helper.create_tmp_variable(dtype=loc.dtype)
    helper.append_op(
        type="box_coder",
        inputs={
            'PriorBox': prior_box,
            'PriorBoxVar': prior_box_var,
            'TargetBox': loc
        },
        outputs={'OutputBox': decoded_box},
        attrs={'code_type': 'decode_center_size'})
    nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype)

    helper.append_op(
        type="multiclass_nms",
        inputs={'Scores': scores,
                'BBoxes': decoded_box},
        outputs={'Out': nmsed_outs},
        attrs={
            'background_label': 0,
            'nms_threshold': nms_threshold,
            'nms_top_k': nms_top_k,
            'keep_top_k': keep_top_k,
            'score_threshold': score_threshold,
            'nms_eta': 1.0
        })
    return nmsed_outs
C
chengduoZH 已提交
127 128


C
chengduoZH 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
def prior_box(inputs,
              image,
              min_ratio,
              max_ratio,
              aspect_ratios,
              base_size,
              steps=None,
              step_w=None,
              step_h=None,
              offset=0.5,
              variance=[0.1, 0.1, 0.1, 0.1],
              flip=False,
              clip=False,
              min_sizes=None,
              max_sizes=None,
              name=None):
C
chengduoZH 已提交
145 146 147
    """
    **Prior_boxes**

148 149 150 151
    Generate prior boxes for SSD(Single Shot MultiBox Detector)
    algorithm. The details of this algorithm, please refer the
    section 2.2 of SSD paper (SSD: Single Shot MultiBox Detector)
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
152

C
chengduoZH 已提交
153
    Args:
154
       inputs(list|tuple): The list of input Variables, the format
155 156 157
            of all Variables is NCHW.
       image(Variable): The input image data of PriorBoxOp,
            the layout is NCHW.
C
chengduoZH 已提交
158 159
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
160
       aspect_ratios(list|tuple): the aspect ratios of generated prior
161 162 163
            boxes. The length of input and aspect_ratios must be equal.
       base_size(int): the base_size is used to get min_size
            and max_size according to min_ratio and max_ratio.
164
       step_w(list|tuple|None): Prior boxes step
165 166
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically calculated.
167
       step_h(list|tuple|None): Prior boxes step
168 169
            across height, If step_h[i] == 0.0, the prior boxes
            step across height of the inputs[i] will be automatically calculated.
C
chengduoZH 已提交
170
       offset(float, optional, default=0.5): Prior boxes center offset.
171
       variance(list|tuple|[0.1, 0.1, 0.1, 0.1]): the variances
C
chengduoZH 已提交
172
            to be encoded in prior boxes.
173
       flip(bool|False): Whether to flip
174 175 176
            aspect ratios.
       clip(bool, optional, default=False): Whether to clip
            out-of-boundary boxes.
177
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
178 179
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs.
180
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
181 182
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs.
183
       name(str|None): Name of the prior box layer.
C
chengduoZH 已提交
184

C
chengduoZH 已提交
185
    Returns:
186
        boxes(Variable): the output prior boxes of PriorBox.
187 188
             The layout is [num_priors, 4]. num_priors is the total
             box count of each position of inputs.
189
        Variances(Variable): the expanded variances of PriorBox.
190 191
             The layout is [num_priors, 4]. num_priors is the total
             box count of each position of inputs
C
chengduoZH 已提交
192

C
chengduoZH 已提交
193 194
    Examples:
        .. code-block:: python
C
chengduoZH 已提交
195

C
chengduoZH 已提交
196
          prior_box(
C
chengduoZH 已提交
197 198 199 200 201
             inputs = [conv1, conv2, conv3, conv4, conv5, conv6],
             image = data,
             min_ratio = 20, # 0.20
             max_ratio = 90, # 0.90
             offset = 0.5,
C
chengduoZH 已提交
202
             base_size = 300,
C
chengduoZH 已提交
203
             variance = [0.1,0.1,0.1,0.1],
C
chengduoZH 已提交
204
             aspect_ratios = [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
C
chengduoZH 已提交
205 206 207
             flip=True,
             clip=True)
    """
C
chengduoZH 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

    def _prior_box_(input,
                    image,
                    min_sizes,
                    max_sizes,
                    aspect_ratios,
                    variance,
                    flip=False,
                    clip=False,
                    step_w=0.0,
                    step_h=0.0,
                    offset=0.5,
                    name=None):
        helper = LayerHelper("prior_box", **locals())
        dtype = helper.input_dtype()

        box = helper.create_tmp_variable(dtype)
        var = helper.create_tmp_variable(dtype)
        helper.append_op(
            type="prior_box",
            inputs={"Input": input,
                    "Image": image},
            outputs={"Boxes": box,
                     "Variances": var},
            attrs={
                'min_sizes': min_sizes,
                'max_sizes': max_sizes,
                'aspect_ratios': aspect_ratios,
                'variances': variance,
                'flip': flip,
                'clip': clip,
                'step_w': step_w,
                'step_h': step_h,
                'offset': offset
            })
        return box, var

    def _reshape_with_axis_(input, axis=1):
        if not (axis > 0 and axis < len(input.shape)):
247 248 249 250 251
            raise ValueError("The axis should be smaller than "
                             "the arity of input and bigger than 0.")
        new_shape = [
            -1, reduce(lambda x, y: x * y, input.shape[axis:len(input.shape)])
        ]
252
        out = ops.reshape(x=input, shape=new_shape)
C
chengduoZH 已提交
253 254
        return out

255 256 257 258 259 260 261 262 263 264
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')

C
chengduoZH 已提交
265 266
    num_layer = len(inputs)

C
chengduoZH 已提交
267 268 269 270 271 272
    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
    else:
        min_sizes = []
        max_sizes = []
C
chengduoZH 已提交
273 274 275 276 277 278 279
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
        for ratio in xrange(min_ratio, max_ratio + 1, step):
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
280
    if aspect_ratios:
281 282 283 284
        _is_list_or_tuple_and_equal(
            aspect_ratios, num_layer,
            'aspect_ratios should be list and the length of inputs '
            'and aspect_ratios should be the same.')
C
chengduoZH 已提交
285
    if step_h:
286 287 288 289
        _is_list_or_tuple_and_equal(
            step_h, num_layer,
            'step_h should be list and the length of inputs and '
            'step_h should be the same.')
C
chengduoZH 已提交
290
    if step_w:
291 292 293 294
        _is_list_or_tuple_and_equal(
            step_w, num_layer,
            'step_w should be list and the length of inputs and '
            'step_w should be the same.')
C
chengduoZH 已提交
295
    if steps:
296 297 298 299
        _is_list_or_tuple_and_equal(
            steps, num_layer,
            'steps should be list and the length of inputs and '
            'step_w should be the same.')
C
chengduoZH 已提交
300 301 302 303 304 305 306 307 308
        step_w = steps
        step_h = steps

    box_results = []
    var_results = []
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
        max_size = max_sizes[i]
        aspect_ratio = []
309
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
310
            min_size = [min_size]
311
        if not _is_list_or_tuple_(max_size):
C
chengduoZH 已提交
312 313 314
            max_size = [max_size]
        if aspect_ratios:
            aspect_ratio = aspect_ratios[i]
315
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
316 317
                aspect_ratio = [aspect_ratio]

C
chengduoZH 已提交
318 319 320 321
        box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio,
                               variance, flip, clip, step_w[i]
                               if step_w else 0.0, step_h[i]
                               if step_w else 0.0, offset)
C
chengduoZH 已提交
322 323 324 325 326 327 328 329 330 331 332

        box_results.append(box)
        var_results.append(var)

    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
C
chengduoZH 已提交
333 334
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
C
chengduoZH 已提交
335

336 337
        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
C
chengduoZH 已提交
338 339

    return box, var
C
chengduoZH 已提交
340 341


C
chengduoZH 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354
def multi_box_head(inputs,
                   num_classes,
                   min_sizes=None,
                   max_sizes=None,
                   min_ratio=None,
                   max_ratio=None,
                   aspect_ratios=None,
                   flip=False,
                   share_location=True,
                   kernel_size=1,
                   pad=1,
                   stride=1,
                   use_batchnorm=False,
355
                   base_size=None):
C
chengduoZH 已提交
356 357 358
    """
    **Multi Box Head**

359 360 361 362
    Generate prior boxes' location and confidence for SSD(Single
    Shot MultiBox Detector)algorithm. The details of this algorithm,
    please refer the section 2.1 of SSD paper (SSD: Single Shot
    MultiBox Detector)<https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
363 364

    Args:
365
       inputs(list|tuple): The list of input Variables, the format
C
chengduoZH 已提交
366
            of all Variables is NCHW.
367 368 369
       num_classes(int): The number of classes.
       min_sizes(list|tuple|None): The number of
            min_sizes is used to compute the number of predicted box.
C
chengduoZH 已提交
370 371
            If the min_size is None, it will be computed according
            to min_ratio and max_ratio.
372 373 374
       max_sizes(list|tuple|None): The number of max_sizes
            is used to compute the the number of predicted box.
       min_ratio(int|None): If the min_sizes is None, min_ratio and max_ratio
C
chengduoZH 已提交
375
            will be used to compute the min_sizes and max_sizes.
376
       max_ratio(int|None): If the min_sizes is None, max_ratio and min_ratio
C
chengduoZH 已提交
377
            will be used to compute the min_sizes and max_sizes.
378
       aspect_ratios(list|tuple): The number of the aspect ratios is used to
C
chengduoZH 已提交
379 380 381
            compute the number of prior box.
       base_size(int): the base_size is used to get min_size
            and max_size according to min_ratio and max_ratio.
382
       flip(bool|False): Whether to flip
C
chengduoZH 已提交
383
            aspect ratios.
384
       name(str|None): Name of the prior box layer.
C
chengduoZH 已提交
385 386 387

    Returns:

388 389 390 391 392 393
        mbox_loc(list): The predicted boxes' location of the inputs.
             The layout of each element is [N, H, W, Priors]. Priors
             is the number of predicted boxof each position of each input.
        mbox_conf(list): The predicted boxes' confidence of the inputs.
             The layout of each element is [N, H, W, Priors]. Priors
             is the number of predicted box of each position of each input.
C
chengduoZH 已提交
394 395 396 397

    Examples:
        .. code-block:: python

C
chengduoZH 已提交
398 399 400 401 402 403 404 405
            mbox_locs, mbox_confs = detection.multi_box_head(
                inputs=[conv1, conv2, conv3, conv4, conv5, conv5],
                num_classes=21,
                min_ratio=20,
                max_ratio=90,
                aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
                base_size=300,
                flip=True)
C
chengduoZH 已提交
406 407
    """

408 409 410
    def _is_equal_(len1, len2, err_info):
        if not (len1 == len2):
            raise ValueError(err_info)
411

412 413
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))
414

415 416
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
417 418

    if min_sizes is not None:
419 420 421 422
        _is_equal_(
            len(inputs),
            len(min_sizes), 'the length of min_sizes '
            'and inputs should be equal.')
C
chengduoZH 已提交
423 424

    if max_sizes is not None:
425 426 427 428
        _is_equal_(
            len(inputs),
            len(max_sizes), 'the length of max_sizes '
            'and inputs should be equal.')
C
chengduoZH 已提交
429 430

    if aspect_ratios is not None:
431 432 433 434
        _is_equal_(
            len(inputs),
            len(aspect_ratios), 'the length of aspect_ratios '
            'and inputs should be equal.')
C
chengduoZH 已提交
435 436

    if min_sizes is None:
C
chengduoZH 已提交
437 438 439 440 441 442
        # If min_sizes is None, min_sizes and max_sizes
        #  will be set according to max_ratio and min_ratio.
        num_layer = len(inputs)
        assert max_ratio is not None and min_ratio is not None,\
            'max_ratio and min_ratio must be not None.'
        assert num_layer >= 3, 'The length of the input data is at least three.'
C
chengduoZH 已提交
443 444 445 446 447 448 449 450 451 452 453 454 455
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
        for ratio in xrange(min_ratio, max_ratio + 1, step):
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

    mbox_locs = []
    mbox_confs = []
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
456
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
457 458 459 460 461
            min_size = [min_size]

        max_size = []
        if max_sizes is not None:
            max_size = max_sizes[i]
462
            if not _is_list_or_tuple_(max_size):
C
chengduoZH 已提交
463
                max_size = [max_size]
464 465 466 467
            _is_equal_(
                len(max_size),
                len(min_size),
                'the length of max_size and min_size should be equal.')
C
chengduoZH 已提交
468 469 470 471

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
472
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
473 474
                aspect_ratio = [aspect_ratio]

C
chengduoZH 已提交
475
        # get the number of prior box on each location
C
chengduoZH 已提交
476 477
        num_priors_per_location = 0
        if max_sizes is not None:
C
chengduoZH 已提交
478 479 480
            num_priors_per_location = len(min_size) + \
                                      len(aspect_ratio) * len(min_size) +\
                                      len(max_size)
C
chengduoZH 已提交
481
        else:
C
chengduoZH 已提交
482 483
            num_priors_per_location = len(min_size) +\
                                      len(aspect_ratio) * len(min_size)
C
chengduoZH 已提交
484 485 486
        if flip:
            num_priors_per_location += len(aspect_ratio) * len(min_size)

C
chengduoZH 已提交
487
        # get mbox_loc
C
chengduoZH 已提交
488 489 490 491
        num_loc_output = num_priors_per_location * 4
        if share_location:
            num_loc_output *= num_classes

492
        mbox_loc = nn.conv2d(
C
chengduoZH 已提交
493
            input=input,
494 495 496 497 498
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)

499
        mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
C
chengduoZH 已提交
500 501
        mbox_locs.append(mbox_loc)

C
chengduoZH 已提交
502
        # get conf_loc
C
chengduoZH 已提交
503
        num_conf_output = num_priors_per_location * num_classes
504
        conf_loc = nn.conv2d(
C
chengduoZH 已提交
505
            input=input,
506 507 508 509
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)
510
        conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
C
chengduoZH 已提交
511 512 513
        mbox_confs.append(conf_loc)

    return mbox_locs, mbox_confs