detection.py 171.9 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15 16 17
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

18 19
from __future__ import print_function

20 21
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
22
from ..layer_helper import LayerHelper
23 24
from ..framework import Variable, in_dygraph_mode
from .. import core
25
from .loss import softmax_with_cross_entropy
26 27
from . import tensor
from . import nn
28
from . import ops
M
minqiyang 已提交
29
from ... import compat as cpt
30
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
C
chengduoZH 已提交
31
import math
M
minqiyang 已提交
32
import six
33
import numpy as np
34
from functools import reduce
35
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
36

C
chengduoZH 已提交
37
__all__ = [
38 39 40 41 42 43 44 45
    'prior_box',
    'density_prior_box',
    'multi_box_head',
    'bipartite_match',
    'target_assign',
    'detection_output',
    'ssd_loss',
    'rpn_target_assign',
46
    'retinanet_target_assign',
47
    'sigmoid_focal_loss',
48 49 50 51
    'anchor_generator',
    'roi_perspective_transform',
    'generate_proposal_labels',
    'generate_proposals',
52
    'generate_mask_labels',
53 54 55 56
    'iou_similarity',
    'box_coder',
    'polygon_box_transform',
    'yolov3_loss',
D
dengkaipeng 已提交
57
    'yolo_box',
58
    'box_clip',
J
jerrywgz 已提交
59
    'multiclass_nms',
60
    'locality_aware_nms',
Y
Yang Zhang 已提交
61
    'matrix_nms',
62
    'retinanet_detection_output',
63
    'distribute_fpn_proposals',
64
    'box_decoder_and_assign',
65
    'collect_fpn_proposals',
C
chengduoZH 已提交
66
]
67 68


69 70 71 72 73 74 75 76 77 78 79 80
def retinanet_target_assign(bbox_pred,
                            cls_logits,
                            anchor_box,
                            anchor_var,
                            gt_boxes,
                            gt_labels,
                            is_crowd,
                            im_info,
                            num_classes=1,
                            positive_overlap=0.5,
                            negative_overlap=0.4):
    """
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
    **Target Assign Layer for the detector RetinaNet.**

    This OP finds out positive and negative samples from all anchors
    for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
    and assigns target labels for classification along with target locations for
    regression to each sample, then takes out the part belonging to positive and
    negative samples from category prediction( :attr:`cls_logits`) and location
    prediction( :attr:`bbox_pred`) which belong to all anchors.

    The searching principles for positive and negative samples are as followed:

    1. Anchors are assigned to ground-truth boxes when it has the highest IoU
    overlap with a ground-truth box.

    2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
    higher than :attr:`positive_overlap` with any ground-truth box.

    3. Anchors are assigned to background when its IoU overlap is lower than
    :attr:`negative_overlap` for all ground-truth boxes.

    4. Anchors which do not meet the above conditions do not participate in
    the training process.

    Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
T
tianshuo78520a 已提交
105
    regression for each anchor, hence the target label for each positive(or negative)
106 107 108 109 110 111 112 113 114 115 116 117 118 119
    sample is a :math:`C`-vector and the target locations for each positive sample
    is a 4-vector. As for a positive sample, if the category of its assigned
    ground-truth box is class :math:`i`, the corresponding entry in its length
    :math:`C` label vector is set to 1 and all other entries is set to 0, its box
    regression targets are computed as the offset between itself and its assigned
    ground-truth box. As for a negative sample, all entries in its length :math:`C`
    label vector are set to 0 and box regression targets are omitted because
    negative samples do not participate in the training process of location
    regression.

    After the assignment, the part belonging to positive and negative samples is
    taken out from category prediction( :attr:`cls_logits` ), and the part
    belonging to positive samples is taken out from location
    prediction( :attr:`bbox_pred` ).
120 121

    Args:
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
        bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
            the predicted locations of all anchors. :math:`N` is the batch size( the
            number of images in a mini-batch), :math:`M` is the number of all anchors
            of one image, and each anchor has 4 coordinate values. The data type of
            :attr:`bbox_pred` is float32 or float64.
        cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
            the predicted categories of all anchors. :math:`N` is the batch size,
            :math:`M` is the number of all anchors of one image, and :math:`C` is
            the number of categories (**Notice: excluding background**). The data type
            of :attr:`cls_logits` is float32 or float64.
        anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
            the locations of all anchors. :math:`M` is the number of all anchors of
            one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
            :math:`[xmin, ymin]` is the left top coordinate of the anchor box,
            :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
            The data type of :attr:`anchor_box` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator` 
            for the generation of :attr:`anchor_box`.
        anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded 
            factors of anchor locations used in loss function. :math:`M` is number of
            all anchors of one image, each anchor possesses a 4-vector expanded factor.
            The data type of :attr:`anchor_var` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator`
            for the generation of :attr:`anchor_var`.
        gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
            locations of all ground-truth boxes. :math:`G` is the total number of
            all ground-truth boxes in a mini-batch, and each ground-truth box has 4
            coordinate values. The data type of :attr:`gt_boxes` is float32 or
            float64.
        gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
            categories of all ground-truth boxes, and the values are in the range of
            :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
            in a mini-batch, and each ground-truth box has one category. The data type
            of :attr:`gt_labels` is int32.
        is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
            indicates whether a ground-truth box is a crowd. If the value is 1, the
            corresponding box is a crowd, it is ignored during training. :math:`G` is
            the total number of all ground-truth boxes in a mini-batch. The data type
            of :attr:`is_crowd` is int32.
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
163
            information of each image is a 3-vector which are the height and width
164 165 166 167 168 169 170 171 172 173 174 175
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
        num_classes(int32): The number of categories for classification, the default
            value is 1.
        positive_overlap(float32): Minimum overlap required between an anchor
            and ground-truth box for the anchor to be a positive sample, the default
            value is 0.5.
        negative_overlap(float32): Maximum overlap allowed between an anchor
            and ground-truth box for the anchor to be a negative sample, the default
            value is 0.4. :attr:`negative_overlap` should be less than or equal to
            :attr:`positive_overlap`, if not, the actual value of
            :attr:`positive_overlap` is :attr:`negative_overlap`.
176 177

    Returns:
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
        A tuple with 6 Variables:
        
        **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
        category prediction belonging to positive and negative samples. :math:`F`
        is the number of positive samples in a mini-batch, :math:`B` is the number
        of negative samples, and :math:`C` is the number of categories
        (**Notice: excluding background**). The data type of :attr:`predict_scores`
        is float32 or float64.

        **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        location prediction belonging to positive samples. :math:`F` is the number
        of positive samples. :math:`F` is the number of positive samples, and each
        sample has 4 coordinate values. The data type of :attr:`predict_location`
        is float32 or float64.

        **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
        target labels for classification belonging to positive and negative
        samples. :math:`F` is the number of positive samples, :math:`B` is the
        number of negative, and each sample has one target category. The data type
        of :attr:`target_label` is int32.

        **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        target locations for box regression belonging to positive samples.
        :math:`F` is the number of positive samples, and each sample has 4
        coordinate values. The data type of :attr:`target_bbox` is float32 or
        float64.

        **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
        represents whether a positive sample is fake positive, if a positive
        sample is false positive, the corresponding entries in
        :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
        of total positive samples in a mini-batch, and each sample has 4
        coordinate values. The data type of :attr:`bbox_inside_weight` is float32
        or float64.

        **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
        of positive samples. :math:`N` is the batch size. **Notice: The number
        of positive samples is used as the denominator of later loss function,
        to avoid the condition that the denominator is zero, this OP has added 1
        to the actual number of positive samples of each image.** The data type of
        :attr:`fg_num` is int32.
219 220 221 222 223

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
224 225 226 227 228 229 230 231 232 233 234
          bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
                            dtype='float32')
          cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
                            dtype='float32')
          anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
                            dtype='float32')
          anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
                            dtype='float32')
          gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
                            dtype='float32')
          gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
235
                            dtype='int32')
236
          is_crowd = fluid.data(name='is_crowd', shape=[1],
237
                            dtype='int32')
238
          im_info = fluid.data(name='im_info', shape=[1, 3],
239
                            dtype='float32')
240
          score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
241 242 243 244 245
                fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
                anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)

    """

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
    check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
                             'retinanet_target_assign')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'retinanet_target_assign')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'retinanet_target_assign')

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
    helper = LayerHelper('retinanet_target_assign', **locals())
    # Assign target label to anchors
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    fg_num = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type="retinanet_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'GtLabels': gt_labels,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
            'ForegroundNumber': fg_num
        },
        attrs={
            'positive_overlap': positive_overlap,
            'negative_overlap': negative_overlap
        })

    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
    bbox_inside_weight.stop_gradient = True
    fg_num.stop_gradient = True

    cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)

    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num


310 311
def rpn_target_assign(bbox_pred,
                      cls_logits,
Y
Yuan Gao 已提交
312
                      anchor_box,
313
                      anchor_var,
314 315 316
                      gt_boxes,
                      is_crowd,
                      im_info,
Y
Yuan Gao 已提交
317
                      rpn_batch_size_per_im=256,
318 319
                      rpn_straddle_thresh=0.0,
                      rpn_fg_fraction=0.5,
Y
Yuan Gao 已提交
320
                      rpn_positive_overlap=0.7,
321 322
                      rpn_negative_overlap=0.3,
                      use_random=True):
Y
Yuan Gao 已提交
323
    """
H
haowang101779990 已提交
324
    **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
Y
Yuan Gao 已提交
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

    This layer can be, for given the  Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each each anchor, these target labels are used for
    train RPN. The classification targets is a binary class label (of being
    an object or not). Following the paper of Faster-RCNN, the positive labels
    are two kinds of anchors: (i) the anchor/anchors with the highest IoU
    overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
    higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
    that a single ground-truth box may assign positive labels to multiple
    anchors. A non-positive anchor is when its IoU ratio is lower than
    rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
    neither positive nor negative do not contribute to the training objective.
    The regression targets are the encoded ground-truth boxes associated with
    the positive anchors.

    Args:
342
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Y
Yuan Gao 已提交
343 344
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
345
            is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
346 347 348
        cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
            predicted confidence predictions. N is the batch size, 1 is the
            frontground and background sigmoid, M is number of bounding boxes.
349
            The data type can be float32 or float64.
Y
Yuan Gao 已提交
350 351 352 353 354
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
355
            coordinate of the anchor box. The data type can be float32 or float64.
356
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded 
357
            variances of anchors. The data type can be float32 or float64.
翟飞跃 已提交
358
        gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
Y
Yuan Gao 已提交
359
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
360
            bboxes of mini-batch input. The data type can be float32 or float64.
361
        is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
362
                             The data type must be int32.
363 364
        im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
        3 is the height, width and scale.
Y
Yuan Gao 已提交
365
        rpn_batch_size_per_im(int): Total number of RPN examples per image.
366
                                    The data type must be int32.
367
        rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
368
            by straddle_thresh pixels. The data type must be float32.
369
        rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
370
            foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
Y
Yuan Gao 已提交
371 372
        rpn_positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
373
            example. The data type must be float32.
Y
Yuan Gao 已提交
374 375
        rpn_negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
376
            examples. The data type must be float32.
Y
Yuan Gao 已提交
377 378

    Returns:
M
minqiyang 已提交
379
        tuple:
380 381 382 383 384 385 386 387 388 389 390 391 392
        A tuple(predicted_scores, predicted_location, target_label,
        target_bbox, bbox_inside_weight) is returned. The predicted_scores 
        and predicted_location is the predicted result of the RPN.
        The target_label and target_bbox is the ground truth,
        respectively. The predicted_location is a 2D Tensor with shape
        [F, 4], and the shape of target_bbox is same as the shape of
        the predicted_location, F is the number of the foreground
        anchors. The predicted_scores is a 2D Tensor with shape
        [F + B, 1], and the shape of target_label is same as the shape
        of the predicted_scores, B is the number of the background
        anchors, the F and B is depends on the input of this operator.
        Bbox_inside_weight represents whether the predicted loc is fake_fg
        or not and the shape is [F, 4].
Y
Yuan Gao 已提交
393 394 395 396

    Examples:
        .. code-block:: python

B
Bai Yifan 已提交
397
            import paddle.fluid as fluid
398 399 400 401 402 403 404
            bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
            cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
            anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
            anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
            im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
405 406
            loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
                bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
H
haowang101779990 已提交
407

Y
Yuan Gao 已提交
408 409 410
    """

    helper = LayerHelper('rpn_target_assign', **locals())
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426

    check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'rpn_target_assign')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'rpn_target_assign')

427
    # Assign target label to anchors
J
jerrywgz 已提交
428 429 430 431 432 433 434
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
Y
Yuan Gao 已提交
435 436
    helper.append_op(
        type="rpn_target_assign",
437 438 439 440 441 442
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
Y
Yuan Gao 已提交
443 444 445
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
446
            'TargetLabel': target_label,
J
jerrywgz 已提交
447
            'TargetBBox': target_bbox,
J
jerrywgz 已提交
448
            'BBoxInsideWeight': bbox_inside_weight
Y
Yuan Gao 已提交
449 450 451
        },
        attrs={
            'rpn_batch_size_per_im': rpn_batch_size_per_im,
452
            'rpn_straddle_thresh': rpn_straddle_thresh,
Y
Yuan Gao 已提交
453 454
            'rpn_positive_overlap': rpn_positive_overlap,
            'rpn_negative_overlap': rpn_negative_overlap,
455 456
            'rpn_fg_fraction': rpn_fg_fraction,
            'use_random': use_random
Y
Yuan Gao 已提交
457 458
        })

459 460 461 462
    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
J
jerrywgz 已提交
463
    bbox_inside_weight.stop_gradient = True
Y
Yuan Gao 已提交
464

465 466 467 468
    cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
469

J
jerrywgz 已提交
470
    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
Y
Yuan Gao 已提交
471 472


473
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
474
    """
475 476 477
	:alias_main: paddle.nn.functional.sigmoid_focal_loss
	:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
	:old_api: paddle.fluid.layers.sigmoid_focal_loss
S
swtkiwi 已提交
478

479 480
    **Sigmoid Focal Loss Operator.**

481 482 483 484 485
    `Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
    class imbalance existed on the training phase of many computer vision tasks. This OP computes
    the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
    measured between the sigmoid value and target label. 

486 487 488
    The focal loss is given as followed:

    .. math::
489 490 491 492 493 494 495
  
        \\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
        \\begin{array}{rcl}
        - \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
        - \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
        \\end{array} \\right.

496 497 498 499 500 501 502

    We know that
    
    .. math::
        \\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}


503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
    Args:
        x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
            all samples. :math:`N` is the number of all samples responsible for optimization in
            a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
            is the total number of positive and negative samples in a mini-batch; Samples are images
            for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
            is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
            float32 or float64.
        label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
            classification. :math:`N` is the number of all samples responsible for optimization in a
            mini-batch, each sample has one target category. The values for positive samples are in the
            range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
            is int32.
        fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
            mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
518
        gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
519
            set to 2.0.
520
        alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
521 522 523
            is set to 0.25.

    Returns:
524 525 526
        Variable(the data type is float32 or float64): 
            A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
            tensor :attr:`x`.
527 528 529 530

    Examples:
        .. code-block:: python

531
            import numpy as np
532
            import paddle.fluid as fluid
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
            
            num_classes = 10  # exclude background
            image_width = 16
            image_height = 16
            batch_size = 32
            max_iter = 20
            
            
            def gen_train_data():
                x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
                                                    image_width)).astype('float64')
                label_data = np.random.randint(0, num_classes,
                                               (batch_size, 1)).astype('int32')
                return {"x": x_data, "label": label_data}
            
            
            def get_focal_loss(pred, label, fg_num, num_classes):
                pred = fluid.layers.reshape(pred, [-1, num_classes])
                label = fluid.layers.reshape(label, [-1, 1])
                label.stop_gradient = True
                loss = fluid.layers.sigmoid_focal_loss(
                    pred, label, fg_num, gamma=2.0, alpha=0.25)
                loss = fluid.layers.reduce_sum(loss)
                return loss
            
            
            def build_model(mode='train'):
                x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
                output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
                output = fluid.layers.fc(
                    input=output,
                    size=num_classes,
                    # Notice: size is set to be the number of target classes (excluding backgorund)
                    # because sigmoid activation will be done in the sigmoid_focal_loss op.
                    act=None)
                if mode == 'train':
                    label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
                    # Obtain the fg_num needed by the sigmoid_focal_loss op:
                    # 0 in label represents background, >=1 in label represents foreground,
                    # find the elements in label which are greater or equal than 1, then
                    # computed the numbers of these elements.
                    data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
                    fg_label = fluid.layers.greater_equal(label, data)
                    fg_label = fluid.layers.cast(fg_label, dtype='int32')
                    fg_num = fluid.layers.reduce_sum(fg_label)
                    fg_num.stop_gradient = True
                    avg_loss = get_focal_loss(output, label, fg_num, num_classes)
                    return avg_loss
                else:
                    # During evaluating or testing phase,
                    # output of the final fc layer should be connected to a sigmoid layer.
                    pred = fluid.layers.sigmoid(output)
                    return pred
            
            
            loss = build_model('train')
            moment_optimizer = fluid.optimizer.MomentumOptimizer(
                learning_rate=0.001, momentum=0.9)
            moment_optimizer.minimize(loss)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for i in range(max_iter):
                outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
                print(outs)
598 599
    """

600 601 602 603 604
    check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                             'sigmoid_focal_loss')
    check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
    check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
    helper = LayerHelper("sigmoid_focal_loss", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="sigmoid_focal_loss",
        inputs={"X": x,
                "Label": label,
                "FgNum": fg_num},
        attrs={"gamma": gamma,
               'alpha': alpha},
        outputs={"Out": out})
    return out


Y
Yuan Gao 已提交
620 621
def detection_output(loc,
                     scores,
622 623 624 625 626 627 628
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
629 630
                     nms_eta=1.0,
                     return_index=False):
631
    """
S
swtkiwi 已提交
632

Q
qingqing01 已提交
633 634
    Given the regression locations, classification confidences and prior boxes,
    calculate the detection outputs by performing following steps:
635

Q
qingqing01 已提交
636 637
    1. Decode input bounding box predictions according to the prior boxes and
       regression locations.
638 639 640 641 642
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.
643 644 645

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Q
qingqing01 已提交
646 647
            predicted locations of M bounding bboxes. Data type should be
            float32 or float64. N is the batch size,
648 649
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
Y
Yuan Gao 已提交
650
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
Q
qingqing01 已提交
651 652 653
            predicted confidence predictions. Data type should be float32
            or float64. N is the batch size, C is the
            class number, M is number of bounding boxes.
654
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
Q
qingqing01 已提交
655 656
            each box is represented as [xmin, ymin, xmax, ymax]. Data type
            should be float32 or float64.
657
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
Q
qingqing01 已提交
658 659
            of variance. Data type should be float32 or float64.
        background_label(int): The index of background label,
660
            the background label will be ignored. If set to -1, then all
Q
qingqing01 已提交
661 662
            categories will be considered. Default: 0.
        nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
663
        nms_top_k(int): Maximum number of detections to be kept according
T
tianshuo78520a 已提交
664
            to the confidences after filtering detections based on
Q
qingqing01 已提交
665
            score_threshold and before NMS. Default: 400.
666
        keep_top_k(int): Number of total bboxes to be kept per image after
Q
qingqing01 已提交
667
            NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
668 669
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
Q
qingqing01 已提交
670 671 672
            Default: 0.01.
        nms_eta(float): The parameter for adaptive NMS. It works only when the
            value is less than 1.0. Default: 1.0.
673
        return_index(bool): Whether return selected index. Default: False
674 675

    Returns:
M
minqiyang 已提交
676

677 678 679
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, a tuple with one Variable(Out) is returned. 

Q
qingqing01 已提交
680 681 682 683 684 685 686 687 688 689 690 691
        Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
        Data type is the same as input (loc). Each row has six values:
        [label, confidence, xmin, ymin, xmax, ymax]. `No` is
        the total number of detections in this mini-batch. For each instance,
        the offsets in first dimension are called LoD, the offset number is
        N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
        detected results, if it is 0, the i-th image has no detected results.

        Index (Variable): Only return when return_index is True. A 2-D LoDTensor
        with shape [No, 1] represents the selected index which type is Integer.
        The index is the absolute value cross batches. No is the same number
        as Out. If the index is used to gather other attribute such as age,
692 693 694
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
        N is the batch size and M is the number of boxes.

695 696 697 698

    Examples:
        .. code-block:: python

699
            import paddle.fluid as fluid
700 701 702
            import paddle

            paddle.enable_static()
703

Q
qingqing01 已提交
704 705 706 707
            pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
            pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
            loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
708
            nmsed_outs, index = fluid.layers.detection_output(scores=scores,
709 710
                                       loc=loc,
                                       prior_box=pb,
711 712
                                       prior_box_var=pbv,
                                       return_index=True)
713 714
    """
    helper = LayerHelper("detection_output", **locals())
715 716 717 718 719
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size')
720
    scores = nn.softmax(input=scores)
Y
Yuan Gao 已提交
721
    scores = nn.transpose(scores, perm=[0, 2, 1])
722
    scores.stop_gradient = True
X
Xin Pan 已提交
723 724
    nmsed_outs = helper.create_variable_for_type_inference(
        dtype=decoded_box.dtype)
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
    if return_index:
        index = helper.create_variable_for_type_inference(dtype='int')
        helper.append_op(
            type="multiclass_nms2",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs,
                     'Index': index},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
        index.stop_gradient = True
    else:
        helper.append_op(
            type="multiclass_nms",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
756
    nmsed_outs.stop_gradient = True
757 758
    if return_index:
        return nmsed_outs, index
759
    return nmsed_outs
C
chengduoZH 已提交
760 761


X
Xin Pan 已提交
762
@templatedoc()
763
def iou_similarity(x, y, box_normalized=True, name=None):
X
Xin Pan 已提交
764
    """
765 766 767
	:alias_main: paddle.nn.functional.iou_similarity
	:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
	:old_api: paddle.fluid.layers.iou_similarity
S
swtkiwi 已提交
768

X
Xin Pan 已提交
769 770 771
    ${comment}

    Args:
L
LielinJiang 已提交
772 773
        x (Variable): ${x_comment}.The data type is float32 or float64.
        y (Variable): ${y_comment}.The data type is float32 or float64.
T
tianshuo78520a 已提交
774
        box_normalized(bool): Whether treat the priorbox as a normalized box.
775
            Set true by default.
X
Xin Pan 已提交
776
    Returns:
L
LielinJiang 已提交
777
        Variable: ${out_comment}.The data type is same with x.
778 779 780 781

    Examples:
        .. code-block:: python

L
LielinJiang 已提交
782
            import numpy as np
783 784
            import paddle.fluid as fluid

L
LielinJiang 已提交
785 786 787 788 789 790
            use_gpu = False
            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)

            x = fluid.data(name='x', shape=[None, 4], dtype='float32')
            y = fluid.data(name='y', shape=[None, 4], dtype='float32')
791
            iou = fluid.layers.iou_similarity(x=x, y=y)
L
LielinJiang 已提交
792 793 794 795 796 797 798 799 800 801 802

            exe.run(fluid.default_startup_program())
            test_program = fluid.default_main_program().clone(for_test=True)

            [out_iou] = exe.run(test_program,
                    fetch_list=iou,
                    feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
                                         [0., 0., 1.0, 1.0]]).astype('float32'),
                          'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
            # out_iou is [[0.2857143],
            #             [0.       ]] with shape: [2, 1]
X
Xin Pan 已提交
803 804
    """
    helper = LayerHelper("iou_similarity", **locals())
805
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
806 807 808 809 810

    helper.append_op(
        type="iou_similarity",
        inputs={"X": x,
                "Y": y},
811
        attrs={"box_normalized": box_normalized},
X
Xin Pan 已提交
812 813 814 815 816 817 818 819 820 821
        outputs={"Out": out})
    return out


@templatedoc()
def box_coder(prior_box,
              prior_box_var,
              target_box,
              code_type="encode_center_size",
              box_normalized=True,
822 823
              name=None,
              axis=0):
X
Xin Pan 已提交
824
    """
S
swtkiwi 已提交
825

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
    **Box Coder Layer**

    Encode/Decode the target bounding box with the priorbox information.
    
    The Encoding schema described below:

    .. math::

        ox = (tx - px) / pw / pxv

        oy = (ty - py) / ph / pyv

        ow = \log(\abs(tw / pw)) / pwv 

        oh = \log(\abs(th / ph)) / phv 

    The Decoding schema described below:
    
    .. math::
  
        ox = (pw * pxv * tx * + px) - tw / 2

        oy = (ph * pyv * ty * + py) - th / 2

        ow = \exp(pwv * tw) * pw + tw / 2

        oh = \exp(phv * th) * ph + th / 2   

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, 
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote 
    the priorbox's (anchor) center coordinates, width and height. `pxv`, 
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, 
    `ow`, `oh` denote the encoded/decoded coordinates, width and height. 

    During Box Decoding, two modes for broadcast are supported. Say target 
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or 
    [M, 4]. Then prior box will broadcast to target box along the 
    assigned axis. 
X
Xin Pan 已提交
864 865

    Args:
866
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape 
W
wangguanzhong 已提交
867 868 869 870 871 872 873 874 875 876
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the 
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system. 
            [xmax, ymax] is the right bottom coordinate of the anchor box.       
        prior_box_var(List|Variable|None): prior_box_var supports three types 
            of input. One is variable with shape [M, 4] which holds M group and 
            data type is float32 or float64. The second is list consist of 
            4 elements shared by all boxes and data type is float32 or float64. 
            Other is None and not involved in calculation. 
877
        target_box(Variable): This input can be a 2-D LoDTensor with shape 
W
wangguanzhong 已提交
878 879 880 881 882 883 884 885
            [N, 4] when code_type is 'encode_center_size'. This input also can 
            be a 3-D Tensor with shape [N, M, 4] when code_type is 
            'decode_center_size'. Each box is represented as 
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64. 
            This tensor can contain LoD information to represent a batch of inputs. 
        code_type(str): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size` 
            by default.
T
tianshuo78520a 已提交
886
        box_normalized(bool): Whether treat the priorbox as a normalized box.
W
wangguanzhong 已提交
887 888 889 890
            Set true by default.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
891
        axis(int): Which axis in PriorBox to broadcast for box decode, 
W
wangguanzhong 已提交
892 893 894 895
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and 
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is 
            `decode_center_size`. Set 0 by default. 
X
Xin Pan 已提交
896 897

    Returns:
W
wangguanzhong 已提交
898 899
        Variable:

900
        output_box(Variable): When code_type is 'encode_center_size', the 
W
wangguanzhong 已提交
901 902 903
        output tensor of box_coder_op with shape [N, M, 4] representing the 
        result of N target boxes encoded with M Prior boxes and variances. 
        When code_type is 'decode_center_size', N represents the batch size 
T
tianshuo78520a 已提交
904
        and M represents the number of decoded boxes.
905 906 907 908 909

    Examples:
 
        .. code-block:: python
 
910
            import paddle.fluid as fluid
911 912
            import paddle
            paddle.enable_static()
W
wangguanzhong 已提交
913
            # For encode
914
            prior_box_encode = fluid.data(name='prior_box_encode',
W
wangguanzhong 已提交
915
                                  shape=[512, 4],
916 917 918 919
                                  dtype='float32')
            target_box_encode = fluid.data(name='target_box_encode',
                                   shape=[81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
920 921 922 923 924
            output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_encode,
                                    code_type="encode_center_size")
            # For decode
925
            prior_box_decode = fluid.data(name='prior_box_decode',
W
wangguanzhong 已提交
926
                                  shape=[512, 4],
927 928 929 930
                                  dtype='float32')
            target_box_decode = fluid.data(name='target_box_decode',
                                   shape=[512, 81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
931 932 933 934 935 936
            output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_decode,
                                    code_type="decode_center_size",
                                    box_normalized=False,
                                    axis=1)
X
Xin Pan 已提交
937
    """
938 939 940 941
    check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
                             'box_coder')
    check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
                             'box_coder')
X
Xin Pan 已提交
942 943
    helper = LayerHelper("box_coder", **locals())

944 945
    output_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)
X
Xin Pan 已提交
946

947 948 949 950 951 952 953 954 955 956 957 958
    inputs = {"PriorBox": prior_box, "TargetBox": target_box}
    attrs = {
        "code_type": code_type,
        "box_normalized": box_normalized,
        "axis": axis
    }
    if isinstance(prior_box_var, Variable):
        inputs['PriorBoxVar'] = prior_box_var
    elif isinstance(prior_box_var, list):
        attrs['variance'] = prior_box_var
    else:
        raise TypeError("Input variance of box_coder must be Variable or lisz")
X
Xin Pan 已提交
959 960
    helper.append_op(
        type="box_coder",
961 962
        inputs=inputs,
        attrs=attrs,
X
Xin Pan 已提交
963 964 965 966 967 968 969 970 971 972
        outputs={"OutputBox": output_box})
    return output_box


@templatedoc()
def polygon_box_transform(input, name=None):
    """
    ${comment}

    Args:
973 974 975 976
        input(Variable): The input with shape [batch_size, geometry_channels, height, width].
                         A Tensor with type float32, float64.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
X
Xin Pan 已提交
977 978

    Returns:
979
        Variable: The output with the same shape as input. A Tensor with type float32, float64.
B
Bai Yifan 已提交
980 981 982 983 984

    Examples:
        .. code-block:: python
            
            import paddle.fluid as fluid
B
Bai Yifan 已提交
985
            input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
B
Bai Yifan 已提交
986
            out = fluid.layers.polygon_box_transform(input)
X
Xin Pan 已提交
987
    """
988 989
    check_variable_and_dtype(input, "input", ['float32', 'float64'],
                             'polygon_box_transform')
X
Xin Pan 已提交
990
    helper = LayerHelper("polygon_box_transform", **locals())
991
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
Xin Pan 已提交
992 993 994 995 996 997 998 999 1000

    helper.append_op(
        type="polygon_box_transform",
        inputs={"Input": input},
        attrs={},
        outputs={"Output": output})
    return output


D
dengkaipeng 已提交
1001 1002
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
1003 1004
                gt_box,
                gt_label,
D
dengkaipeng 已提交
1005
                anchors,
1006
                anchor_mask,
D
dengkaipeng 已提交
1007 1008
                class_num,
                ignore_thresh,
1009
                downsample_ratio,
1010
                gt_score=None,
D
dengkaipeng 已提交
1011
                use_label_smooth=True,
1012 1013
                name=None,
                scale_x_y=1.):
D
dengkaipeng 已提交
1014
    """
S
swtkiwi 已提交
1015

D
dengkaipeng 已提交
1016 1017 1018
    ${comment}

    Args:
X
xiaoting 已提交
1019
        x (Variable): ${x_comment}The data type is float32 or float64. 
1020
        gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
T
tianshuo78520a 已提交
1021 1022
                          in the third dimension, x, y, w, h should be stored. 
                          x,y is the center coordinate of boxes, w, h are the
1023 1024
                          width and height, x, y, w, h should be divided by 
                          input image height to scale to [0, 1].
D
dengkaipeng 已提交
1025
                          N is the batch number and B is the max box number in 
X
xiaoting 已提交
1026
                          an image.The data type is float32 or float64. 
T
tianshuo78520a 已提交
1027
        gt_label (Variable): class id of ground truth boxes, should be in shape
X
xiaoting 已提交
1028
                            of [N, B].The data type is int32. 
D
dengkaipeng 已提交
1029
        anchors (list|tuple): ${anchors_comment}
1030
        anchor_mask (list|tuple): ${anchor_mask_comment}
D
dengkaipeng 已提交
1031 1032
        class_num (int): ${class_num_comment}
        ignore_thresh (float): ${ignore_thresh_comment}
1033
        downsample_ratio (int): ${downsample_ratio_comment}
X
xiaoting 已提交
1034 1035 1036
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
T
tianshuo78520a 已提交
1037
        gt_score (Variable): mixup score of ground truth boxes, should be in shape
1038
                            of [N, B]. Default None.
1039
        use_label_smooth (bool): ${use_label_smooth_comment}
1040
        scale_x_y (float): ${scale_x_y_comment}
D
dengkaipeng 已提交
1041 1042

    Returns:
1043
        Variable: A 1-D tensor with shape [N], the value of yolov3 loss
D
dengkaipeng 已提交
1044 1045 1046

    Raises:
        TypeError: Input x of yolov3_loss must be Variable
D
dengkaipeng 已提交
1047 1048
        TypeError: Input gtbox of yolov3_loss must be Variable
        TypeError: Input gtlabel of yolov3_loss must be Variable
D
dengkaipeng 已提交
1049
        TypeError: Input gtscore of yolov3_loss must be None or Variable
D
dengkaipeng 已提交
1050 1051 1052
        TypeError: Attr anchors of yolov3_loss must be list or tuple
        TypeError: Attr class_num of yolov3_loss must be an integer
        TypeError: Attr ignore_thresh of yolov3_loss must be a float number
1053
        TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
D
dengkaipeng 已提交
1054 1055

    Examples:
1056 1057
      .. code-block:: python

1058
          import paddle.fluid as fluid
1059 1060
          import paddle
          paddle.enable_static()
X
xiaoting 已提交
1061 1062 1063 1064
          x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
          gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
          gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
          gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
1065 1066
          anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
          anchor_mask = [0, 1, 2]
1067 1068
          loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
                                          gt_score=gt_score, anchors=anchors, 
1069 1070
                                          anchor_mask=anchor_mask, class_num=80,
                                          ignore_thresh=0.7, downsample_ratio=32)
D
dengkaipeng 已提交
1071 1072 1073 1074 1075
    """
    helper = LayerHelper('yolov3_loss', **locals())

    if not isinstance(x, Variable):
        raise TypeError("Input x of yolov3_loss must be Variable")
1076
    if not isinstance(gt_box, Variable):
D
dengkaipeng 已提交
1077
        raise TypeError("Input gtbox of yolov3_loss must be Variable")
1078
    if not isinstance(gt_label, Variable):
D
dengkaipeng 已提交
1079
        raise TypeError("Input gtlabel of yolov3_loss must be Variable")
1080
    if gt_score is not None and not isinstance(gt_score, Variable):
1081
        raise TypeError("Input gtscore of yolov3_loss must be Variable")
D
dengkaipeng 已提交
1082 1083
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
        raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
1084 1085
    if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
        raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
D
dengkaipeng 已提交
1086 1087 1088 1089 1090
    if not isinstance(class_num, int):
        raise TypeError("Attr class_num of yolov3_loss must be an integer")
    if not isinstance(ignore_thresh, float):
        raise TypeError(
            "Attr ignore_thresh of yolov3_loss must be a float number")
1091 1092 1093
    if not isinstance(use_label_smooth, bool):
        raise TypeError(
            "Attr use_label_smooth of yolov3_loss must be a bool value")
D
dengkaipeng 已提交
1094

1095
    loss = helper.create_variable_for_type_inference(dtype=x.dtype)
D
dengkaipeng 已提交
1096

1097 1098 1099
    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

1100 1101
    inputs = {
        "X": x,
1102 1103
        "GTBox": gt_box,
        "GTLabel": gt_label,
1104
    }
1105
    if gt_score is not None:
1106
        inputs["GTScore"] = gt_score
1107

D
dengkaipeng 已提交
1108 1109
    attrs = {
        "anchors": anchors,
1110
        "anchor_mask": anchor_mask,
D
dengkaipeng 已提交
1111 1112
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
1113
        "downsample_ratio": downsample_ratio,
1114
        "use_label_smooth": use_label_smooth,
1115
        "scale_x_y": scale_x_y,
D
dengkaipeng 已提交
1116 1117 1118 1119
    }

    helper.append_op(
        type='yolov3_loss',
1120
        inputs=inputs,
1121 1122 1123 1124 1125
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask
        },
D
dengkaipeng 已提交
1126 1127 1128 1129
        attrs=attrs)
    return loss


D
dengkaipeng 已提交
1130
@templatedoc(op_type="yolo_box")
1131 1132 1133 1134 1135 1136
def yolo_box(x,
             img_size,
             anchors,
             class_num,
             conf_thresh,
             downsample_ratio,
1137
             clip_bbox=True,
1138 1139
             name=None,
             scale_x_y=1.):
D
dengkaipeng 已提交
1140
    """
S
swtkiwi 已提交
1141

D
dengkaipeng 已提交
1142 1143 1144
    ${comment}

    Args:
X
xiaoting 已提交
1145 1146
        x (Variable): ${x_comment} The data type is float32 or float64. 
        img_size (Variable): ${img_size_comment} The data type is int32. 
D
dengkaipeng 已提交
1147 1148 1149 1150
        anchors (list|tuple): ${anchors_comment}
        class_num (int): ${class_num_comment}
        conf_thresh (float): ${conf_thresh_comment}
        downsample_ratio (int): ${downsample_ratio_comment}
1151
        clip_bbox (bool): ${clip_bbox_comment}
1152
        scale_x_y (float): ${scale_x_y_comment}
X
xiaoting 已提交
1153 1154 1155
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
D
dengkaipeng 已提交
1156 1157

    Returns:
D
dengkaipeng 已提交
1158
        Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
D
dengkaipeng 已提交
1159 1160
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification 
        scores of boxes.
D
dengkaipeng 已提交
1161 1162 1163 1164 1165 1166 1167 1168

    Raises:
        TypeError: Input x of yolov_box must be Variable
        TypeError: Attr anchors of yolo box must be list or tuple
        TypeError: Attr class_num of yolo box must be an integer
        TypeError: Attr conf_thresh of yolo box must be a float number

    Examples:
D
dengkaipeng 已提交
1169

D
dengkaipeng 已提交
1170 1171
    .. code-block:: python

X
xiaoting 已提交
1172
        import paddle.fluid as fluid
1173 1174
        import paddle
        paddle.enable_static()
X
xiaoting 已提交
1175 1176
        x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
        img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
D
dengkaipeng 已提交
1177
        anchors = [10, 13, 16, 30, 33, 23]
X
xiaoting 已提交
1178
        boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors, 
D
dengkaipeng 已提交
1179 1180 1181 1182 1183
                                        conf_thresh=0.01, downsample_ratio=32)
    """
    helper = LayerHelper('yolo_box', **locals())

    if not isinstance(x, Variable):
1184 1185 1186
        raise TypeError("Input x of yolo_box must be Variable")
    if not isinstance(img_size, Variable):
        raise TypeError("Input img_size of yolo_box must be Variable")
D
dengkaipeng 已提交
1187
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
1188
        raise TypeError("Attr anchors of yolo_box must be list or tuple")
D
dengkaipeng 已提交
1189
    if not isinstance(class_num, int):
1190
        raise TypeError("Attr class_num of yolo_box must be an integer")
D
dengkaipeng 已提交
1191
    if not isinstance(conf_thresh, float):
1192
        raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
D
dengkaipeng 已提交
1193 1194 1195 1196 1197 1198 1199

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
D
dengkaipeng 已提交
1200
        "conf_thresh": conf_thresh,
D
dengkaipeng 已提交
1201
        "downsample_ratio": downsample_ratio,
1202
        "clip_bbox": clip_bbox,
1203
        "scale_x_y": scale_x_y,
D
dengkaipeng 已提交
1204 1205 1206 1207
    }

    helper.append_op(
        type='yolo_box',
1208 1209 1210 1211
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
D
dengkaipeng 已提交
1212 1213 1214 1215 1216 1217 1218 1219
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs)
    return boxes, scores


X
Xin Pan 已提交
1220
@templatedoc()
1221 1222
def detection_map(detect_res,
                  label,
1223 1224
                  class_num,
                  background_label=0,
1225 1226
                  overlap_threshold=0.3,
                  evaluate_difficult=True,
1227 1228 1229 1230
                  has_state=None,
                  input_states=None,
                  out_states=None,
                  ap_version='integral'):
X
Xin Pan 已提交
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
    """
    ${comment}

    Args:
        detect_res: ${detect_res_comment}
        label:  ${label_comment}
        class_num: ${class_num_comment}
        background_label: ${background_label_comment}
        overlap_threshold: ${overlap_threshold_comment}
        evaluate_difficult: ${evaluate_difficult_comment}
        has_state: ${has_state_comment}
1242 1243 1244 1245 1246 1247 1248 1249
        input_states: (tuple|None) If not None, It contains 3 elements:
            (1) pos_count ${pos_count_comment}.
            (2) true_pos ${true_pos_comment}.
            (3) false_pos ${false_pos_comment}.
        out_states: (tuple|None) If not None, it contains 3 elements.
            (1) accum_pos_count ${accum_pos_count_comment}.
            (2) accum_true_pos ${accum_true_pos_comment}.
            (3) accum_false_pos ${accum_false_pos_comment}.
X
Xin Pan 已提交
1250 1251 1252 1253 1254 1255 1256 1257 1258
        ap_version: ${ap_type_comment}

    Returns:
        ${map_comment}


    Examples:
          .. code-block:: python

1259
            import paddle.fluid as fluid
1260
            from fluid.layers import detection
1261
            detect_res = fluid.data(
X
Xin Pan 已提交
1262 1263 1264
                name='detect_res',
                shape=[10, 6],
                dtype='float32')
1265
            label = fluid.data(
X
Xin Pan 已提交
1266 1267 1268 1269
                name='label',
                shape=[10, 6],
                dtype='float32')

1270
            map_out = detection.detection_map(detect_res, label, 21)
X
Xin Pan 已提交
1271
    """
1272 1273
    helper = LayerHelper("detection_map", **locals())

1274
    def __create_var(type):
X
Xin Pan 已提交
1275
        return helper.create_variable_for_type_inference(dtype=type)
1276 1277

    map_out = __create_var('float32')
Z
zhongpu 已提交
1278 1279 1280 1281 1282 1283
    accum_pos_count_out = out_states[
        0] if out_states is not None else __create_var('int32')
    accum_true_pos_out = out_states[
        1] if out_states is not None else __create_var('float32')
    accum_false_pos_out = out_states[
        2] if out_states is not None else __create_var('float32')
1284

Z
zhongpu 已提交
1285 1286 1287
    pos_count = input_states[0] if input_states is not None else None
    true_pos = input_states[1] if input_states is not None else None
    false_pos = input_states[2] if input_states is not None else None
1288

1289 1290 1291 1292 1293
    helper.append_op(
        type="detection_map",
        inputs={
            'Label': label,
            'DetectRes': detect_res,
1294
            'HasState': has_state,
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
            'PosCount': pos_count,
            'TruePos': true_pos,
            'FalsePos': false_pos
        },
        outputs={
            'MAP': map_out,
            'AccumPosCount': accum_pos_count_out,
            'AccumTruePos': accum_true_pos_out,
            'AccumFalsePos': accum_false_pos_out
        },
        attrs={
            'overlap_threshold': overlap_threshold,
            'evaluate_difficult': evaluate_difficult,
1308 1309
            'ap_type': ap_version,
            'class_num': class_num,
1310
        })
1311
    return map_out
1312 1313


1314 1315 1316 1317
def bipartite_match(dist_matrix,
                    match_type=None,
                    dist_threshold=None,
                    name=None):
1318
    """
S
swtkiwi 已提交
1319

Y
yuyang18 已提交
1320 1321
    This operator implements a greedy bipartite matching algorithm, which is
    used to obtain the matching with the maximum distance based on the input
1322
    distance matrix. For input 2D matrix, the bipartite matching algorithm can
Y
yuyang18 已提交
1323 1324 1325 1326
    find the matched column for each row (matched means the largest distance),
    also can find the matched row for each column. And this operator only
    calculate matched indices from column to row. For each instance,
    the number of matched indices is the column number of the input distance
W
wangguanzhong 已提交
1327
    matrix. **The OP only supports CPU**.
Y
yuyang18 已提交
1328 1329 1330

    There are two outputs, matched indices and distance.
    A simple description, this algorithm matched the best (maximum distance)
1331 1332 1333
    row entity to the column entity and the matched indices are not duplicated
    in each row of ColToRowMatchIndices. If the column entity is not matched
    any row entity, set -1 in ColToRowMatchIndices.
C
chengduoZH 已提交
1334

Y
yuyang18 已提交
1335
    NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1336 1337 1338
    If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
    If Tensor, the height of ColToRowMatchIndices is 1.

Y
yuyang18 已提交
1339 1340 1341
    NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
    layer. Please consider to use :code:`ssd_loss` instead.

1342 1343
    Args:
        dist_matrix(Variable): This input is a 2-D LoDTensor with shape
W
wangguanzhong 已提交
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
            [K, M]. The data type is float32 or float64. It is pair-wise 
            distance matrix between the entities represented by each row and 
            each column. For example, assumed one entity is A with shape [K], 
            another entity is B with shape [M]. The dist_matrix[i][j] is the 
            distance between A[i] and B[j]. The bigger the distance is, the 
            better matching the pairs are. NOTE: This tensor can contain LoD 
            information to represent a batch of inputs. One instance of this 
            batch can contain different numbers of entities.
        match_type(str, optional): The type of matching method, should be
           'bipartite' or 'per_prediction'. None ('bipartite') by default.
        dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1355
            this threshold is to determine the extra matching bboxes based
Y
yuyang18 已提交
1356
            on the maximum distance, 0.5 by default.
W
wangguanzhong 已提交
1357 1358 1359 1360
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.
 
1361
    Returns:
W
wangguanzhong 已提交
1362
        Tuple:
Y
yuyang18 已提交
1363

W
wangguanzhong 已提交
1364 1365
        matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
        type is int32. N is the batch size. If match_indices[i][j] is -1, it
Y
yuyang18 已提交
1366 1367 1368 1369 1370
        means B[j] does not match any entity in i-th instance.
        Otherwise, it means B[j] is matched to row
        match_indices[i][j] in i-th instance. The row number of
        i-th instance is saved in match_indices[i][j].

W
wangguanzhong 已提交
1371 1372
        matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
        type is float32. N is batch size. If match_indices[i][j] is -1,
Y
yuyang18 已提交
1373 1374 1375 1376 1377 1378 1379
        match_distance[i][j] is also -1.0. Otherwise, assumed
        match_distance[i][j] = d, and the row offsets of each instance
        are called LoD. Then match_distance[i][j] =
        dist_matrix[d+LoD[i]][j].

    Examples:

1380
        >>> import paddle.fluid as fluid
1381 1382
        >>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
        >>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
Y
yuyang18 已提交
1383 1384
        >>> iou = fluid.layers.iou_similarity(x=x, y=y)
        >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
1385 1386
    """
    helper = LayerHelper('bipartite_match', **locals())
X
Xin Pan 已提交
1387 1388 1389
    match_indices = helper.create_variable_for_type_inference(dtype='int32')
    match_distance = helper.create_variable_for_type_inference(
        dtype=dist_matrix.dtype)
1390 1391 1392
    helper.append_op(
        type='bipartite_match',
        inputs={'DistMat': dist_matrix},
1393 1394 1395 1396
        attrs={
            'match_type': match_type,
            'dist_threshold': dist_threshold,
        },
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
        outputs={
            'ColToRowMatchIndices': match_indices,
            'ColToRowMatchDist': match_distance
        })
    return match_indices, match_distance


def target_assign(input,
                  matched_indices,
                  negative_indices=None,
                  mismatch_value=None,
                  name=None):
    """
S
swtkiwi 已提交
1410

1411 1412 1413 1414
    This operator can be, for given the target bounding boxes or labels,
    to assign classification and regression targets to each prediction as well as
    weights to prediction. The weights is used to specify which prediction would
    not contribute to training loss.
C
chengduoZH 已提交
1415

1416 1417 1418 1419 1420
    For each instance, the output `out` and`out_weight` are assigned based on
    `match_indices` and `negative_indices`.
    Assumed that the row offset for each instance in `input` is called lod,
    this operator assigns classification/regression targets by performing the
    following steps:
C
chengduoZH 已提交
1421

1422
    1. Assigning all outputs based on `match_indices`:
C
chengduoZH 已提交
1423

1424 1425 1426
    .. code-block:: text

        If id = match_indices[i][j] > 0,
C
chengduoZH 已提交
1427

1428 1429
            out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
            out_weight[i][j] = 1.
C
chengduoZH 已提交
1430

1431
        Otherwise,
C
chengduoZH 已提交
1432

1433 1434
            out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][j] = 0.
C
chengduoZH 已提交
1435

Q
qingqing01 已提交
1436
    2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
C
chengduoZH 已提交
1437

Q
qingqing01 已提交
1438 1439
    Assumed that i-th instance in `neg_indices` is called `neg_indice`,
    for i-th instance:
M
minqiyang 已提交
1440

1441
    .. code-block:: text
C
chengduoZH 已提交
1442

Q
qingqing01 已提交
1443 1444 1445
        for id in neg_indice:
            out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][id] = 1.0
1446 1447

    Args:
Q
qingqing01 已提交
1448 1449 1450
       input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
           Data type should be int32 or float32.
       matched_indices (Variable): The input matched indices
1451 1452 1453
           is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
           the j-th entity of column is not matched to any entity of row in
           i-th instance.
Q
qingqing01 已提交
1454 1455
       negative_indices (Variable, optional): The input negative example indices
           are an optional input with shape [Neg, 1] and int32 type, where Neg is
1456
           the total number of negative example indices.
Q
qingqing01 已提交
1457 1458 1459 1460 1461
       mismatch_value (float32, optional): Fill this value to the mismatched
           location.
       name (string): The default value is None.  Normally there is no need for
           user to set this property.  For more information, please refer
           to :ref:`api_guide_Name`.
1462 1463

    Returns:
Q
qingqing01 已提交
1464 1465 1466 1467 1468 1469 1470 1471
        tuple: A tuple(out, out_weight) is returned.

        out (Variable): a 3D Tensor with shape [N, P, K] and same data type
        with `input`, N and P is the same as they are in `matched_indices`,
        K is the same as it in input of X.

        out_weight (Variable): the weight for output with the shape of [N, P, 1].
        Data type is float32.
1472 1473 1474 1475 1476

    Examples:

        .. code-block:: python

1477
            import paddle.fluid as fluid
1478 1479
            import paddle
            paddle.enable_static()
Q
qingqing01 已提交
1480
            x = fluid.data(
1481 1482 1483
                name='x',
                shape=[4, 20, 4],
                dtype='float',
Q
qingqing01 已提交
1484 1485
                lod_level=1)
            matched_id = fluid.data(
1486 1487
                name='indices',
                shape=[8, 20],
Q
qingqing01 已提交
1488
                dtype='int32')
1489 1490 1491 1492
            trg, trg_weight = fluid.layers.target_assign(
                x,
                matched_id,
                mismatch_value=0)
1493 1494
    """
    helper = LayerHelper('target_assign', **locals())
X
Xin Pan 已提交
1495 1496
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_weight = helper.create_variable_for_type_inference(dtype='float32')
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
    helper.append_op(
        type='target_assign',
        inputs={
            'X': input,
            'MatchIndices': matched_indices,
            'NegIndices': negative_indices
        },
        outputs={'Out': out,
                 'OutWeight': out_weight},
        attrs={'mismatch_value': mismatch_value})
    return out, out_weight


def ssd_loss(location,
             confidence,
             gt_box,
             gt_label,
             prior_box,
             prior_box_var=None,
             background_label=0,
             overlap_threshold=0.5,
             neg_pos_ratio=3.0,
             neg_overlap=0.5,
             loc_loss_weight=1.0,
             conf_loss_weight=1.0,
             match_type='per_prediction',
             mining_type='max_negative',
1524
             normalize=True,
1525 1526
             sample_size=None):
    """
1527 1528 1529
	:alias_main: paddle.nn.functional.ssd_loss
	:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
	:old_api: paddle.fluid.layers.ssd_loss
S
swtkiwi 已提交
1530

Y
yuyang18 已提交
1531
    **Multi-box loss layer for object detection algorithm of SSD**
1532

翟飞跃 已提交
1533 1534
    This layer is to compute detection loss for SSD given the location offset
    predictions, confidence predictions, prior boxes and ground-truth bounding
1535 1536 1537 1538
    boxes and labels, and the type of hard example mining. The returned loss
    is a weighted sum of the localization loss (or regression loss) and
    confidence loss (or classification loss) by performing the following steps:

Y
yuyang18 已提交
1539
    1. Find matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1540

1541
      1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
Y
yuyang18 已提交
1542

T
tianshuo78520a 已提交
1543
      1.2 Compute matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1544

1545
    2. Compute confidence for mining hard examples
Y
yuyang18 已提交
1546

1547
      2.1. Get the target label based on matched indices.
Y
yuyang18 已提交
1548

1549
      2.2. Compute confidence loss.
Y
yuyang18 已提交
1550

1551 1552
    3. Apply hard example mining to get the negative example indices and update
       the matched indices.
Y
yuyang18 已提交
1553

1554
    4. Assign classification and regression targets
Y
yuyang18 已提交
1555

1556
      4.1. Encoded bbox according to the prior boxes.
Y
yuyang18 已提交
1557

1558
      4.2. Assign regression targets.
Y
yuyang18 已提交
1559

1560
      4.3. Assign classification targets.
Y
yuyang18 已提交
1561

1562
    5. Compute the overall objective loss.
Y
yuyang18 已提交
1563

1564
      5.1 Compute confidence loss.
Y
yuyang18 已提交
1565

1566
      5.2 Compute localization loss.
Y
yuyang18 已提交
1567

1568 1569 1570 1571 1572 1573
      5.3 Compute the overall weighted loss.

    Args:
        location (Variable): The location predictions are a 3D Tensor with
            shape [N, Np, 4], N is the batch size, Np is total number of
            predictions for each instance. 4 is the number of coordinate values,
1574 1575
            the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
            float64.
1576 1577
        confidence (Variable): The confidence predictions are a 3D Tensor
            with shape [N, Np, C], N and Np are the same as they are in
1578 1579
            `location`, C is the class number.The data type is float32 or
            float64.
翟飞跃 已提交
1580
        gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
1581
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
1582
            bboxes of mini-batch input.The data type is float32 or float64.
1583
        gt_label (Variable): The ground-truth labels are a 2D LoDTensor
1584 1585 1586
            with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
            mini-batch input, 1 is the number of class. The data type is float32
            or float64.
1587
        prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
1588 1589
            Np and 4 are the same as they are in `location`. The data type is
            float32 or float64.
1590
        prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
1591
            with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
1592 1593
        background_label (int): The index of background label, 0 by default.
        overlap_threshold (float): If match_type is 'per_prediction', use
1594 1595
            'overlap_threshold' to determine the extra matching bboxes when finding \
            matched boxes. 0.5 by default.
1596
        neg_pos_ratio (float): The ratio of the negative boxes to the positive
翟飞跃 已提交
1597
            boxes, used only when mining_type is 'max_negative', 3.0 by default.
1598
        neg_overlap (float): The negative overlap upper bound for the unmatched
1599
            predictions. Use only when mining_type is 'max_negative',
1600 1601 1602 1603
            0.5 by default.
        loc_loss_weight (float): Weight for localization loss, 1.0 by default.
        conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
        match_type (str): The type of matching method during training, should
翟飞跃 已提交
1604
            be 'bipartite' or 'per_prediction', 'per_prediction' by default.
1605 1606
        mining_type (str): The hard example mining type, should be 'hard_example'
            or 'max_negative', now only support `max_negative`.
1607
        normalize (bool): Whether to normalize the SSD loss by the total number
Y
yuyang18 已提交
1608
            of output locations, True by default.
1609 1610
        sample_size (int): The max sample size of negative box, used only when
            mining_type is 'hard_example'.
1611 1612

    Returns:
1613 1614 1615
        Variable(Tensor):  The weighted sum of the localization loss and confidence loss, \
        with shape [N * Np, 1], N and Np are the same as they are in
        `location`.The data type is float32 or float64.
1616 1617

    Raises:
Y
yuyang18 已提交
1618 1619
        ValueError: If mining_type is 'hard_example', now only support mining \
        type of `max_negative`.
Y
yuyang18 已提交
1620 1621

    Examples:
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

        .. code-block:: python

            import paddle.fluid as fluid
            pb = fluid.data(
                           name='prior_box',
                           shape=[10, 4],
                           dtype='float32')
            pbv = fluid.data(
                           name='prior_box_var',
                           shape=[10, 4],
                           dtype='float32')
            loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
            gt_box = fluid.data(
                 name='gt_box', shape=[4], lod_level=1, dtype='float32')
            gt_label = fluid.data(
                 name='gt_label', shape=[1], lod_level=1, dtype='float32')
            loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
1641 1642 1643 1644 1645 1646 1647
    """

    helper = LayerHelper('ssd_loss', **locals())
    if mining_type != 'max_negative':
        raise ValueError("Only support mining_type == max_negative now.")

    num, num_prior, num_class = confidence.shape
G
merge  
gongweibao 已提交
1648
    conf_shape = nn.shape(confidence)
1649 1650

    def __reshape_to_2d(var):
1651
        return nn.flatten(x=var, axis=2)
1652

T
tianshuo78520a 已提交
1653
    # 1. Find matched bounding box by prior box.
1654 1655
    #   1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
    iou = iou_similarity(x=gt_box, y=prior_box)
T
tianshuo78520a 已提交
1656
    #   1.2 Compute matched bounding box by bipartite matching algorithm.
1657 1658
    matched_indices, matched_dist = bipartite_match(iou, match_type,
                                                    overlap_threshold)
1659 1660 1661

    # 2. Compute confidence for mining hard examples
    # 2.1. Get the target label based on matched indices
1662 1663
    gt_label = nn.reshape(
        x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
1664
    gt_label.stop_gradient = True
1665 1666 1667 1668 1669 1670 1671
    target_label, _ = target_assign(
        gt_label, matched_indices, mismatch_value=background_label)
    # 2.2. Compute confidence loss.
    # Reshape confidence to 2D tensor.
    confidence = __reshape_to_2d(confidence)
    target_label = tensor.cast(x=target_label, dtype='int64')
    target_label = __reshape_to_2d(target_label)
1672
    target_label.stop_gradient = True
1673
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1674
    # 3. Mining hard examples
G
merge  
gongweibao 已提交
1675
    actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
1676
    actual_shape.stop_gradient = True
1677 1678
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1679
    conf_loss = nn.reshape(
1680
        x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
1681
    conf_loss.stop_gradient = True
X
Xin Pan 已提交
1682
    neg_indices = helper.create_variable_for_type_inference(dtype='int32')
1683
    dtype = matched_indices.dtype
X
Xin Pan 已提交
1684 1685
    updated_matched_indices = helper.create_variable_for_type_inference(
        dtype=dtype)
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
    helper.append_op(
        type='mine_hard_examples',
        inputs={
            'ClsLoss': conf_loss,
            'LocLoss': None,
            'MatchIndices': matched_indices,
            'MatchDist': matched_dist,
        },
        outputs={
            'NegIndices': neg_indices,
            'UpdatedMatchIndices': updated_matched_indices
        },
        attrs={
            'neg_pos_ratio': neg_pos_ratio,
B
Bai Yifan 已提交
1700
            'neg_dist_threshold': neg_overlap,
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
            'mining_type': mining_type,
            'sample_size': sample_size,
        })

    # 4. Assign classification and regression targets
    # 4.1. Encoded bbox according to the prior boxes.
    encoded_bbox = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=gt_box,
        code_type='encode_center_size')
    # 4.2. Assign regression targets
    target_bbox, target_loc_weight = target_assign(
        encoded_bbox, updated_matched_indices, mismatch_value=background_label)
    # 4.3. Assign classification targets
    target_label, target_conf_weight = target_assign(
        gt_label,
        updated_matched_indices,
        negative_indices=neg_indices,
        mismatch_value=background_label)

    # 5. Compute loss.
    # 5.1 Compute confidence loss.
    target_label = __reshape_to_2d(target_label)
    target_label = tensor.cast(x=target_label, dtype='int64')
1726

1727
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1728 1729 1730
    target_conf_weight = __reshape_to_2d(target_conf_weight)
    conf_loss = conf_loss * target_conf_weight

1731 1732 1733 1734
    # the target_label and target_conf_weight do not have gradient.
    target_label.stop_gradient = True
    target_conf_weight.stop_gradient = True

1735 1736 1737 1738 1739 1740 1741 1742
    # 5.2 Compute regression loss.
    location = __reshape_to_2d(location)
    target_bbox = __reshape_to_2d(target_bbox)

    loc_loss = nn.smooth_l1(location, target_bbox)
    target_loc_weight = __reshape_to_2d(target_loc_weight)
    loc_loss = loc_loss * target_loc_weight

1743 1744 1745 1746
    # the target_bbox and target_loc_weight do not have gradient.
    target_bbox.stop_gradient = True
    target_loc_weight.stop_gradient = True

1747 1748
    # 5.3 Compute overall weighted loss.
    loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
1749
    # reshape to [N, Np], N is the batch size and Np is the prior box number.
1750 1751 1752
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
    loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
1753 1754 1755 1756 1757
    loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
    if normalize:
        normalizer = nn.reduce_sum(target_loc_weight)
        loss = loss / normalizer

1758
    return loss
C
chengduoZH 已提交
1759 1760


1761 1762 1763 1764
def prior_box(input,
              image,
              min_sizes,
              max_sizes=None,
1765
              aspect_ratios=[1.],
1766 1767 1768 1769 1770
              variance=[0.1, 0.1, 0.2, 0.2],
              flip=False,
              clip=False,
              steps=[0.0, 0.0],
              offset=0.5,
1771 1772
              name=None,
              min_max_aspect_ratios_order=False):
1773
    """
S
swtkiwi 已提交
1774

R
ruri 已提交
1775
    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
1776 1777 1778 1779 1780
    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

R
ruri 已提交
1781
    Parameters:
T
tianshuo78520a 已提交
1782
       input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
R
ruri 已提交
1783 1784 1785 1786
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes(list|tuple|float): the min sizes of generated prior boxes.
       max_sizes(list|tuple|None): the max sizes of generated prior boxes.
1787
            Default: None.
R
ruri 已提交
1788
       aspect_ratios(list|tuple|float): the aspect ratios of generated
1789
            prior boxes. Default: [1.].
1790 1791 1792 1793
       variance(list|tuple): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1794
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1795 1796
            step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
            height or weight of the input will be automatically calculated.
1797
            Default: [0., 0.]
1798
       offset(float): Prior boxes center offset. Default: 0.5
1799
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1800
            in order of [min, max, aspect_ratios], which is consistent with
1801 1802 1803
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
R
ruri 已提交
1804
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1805 1806

    Returns:
R
ruri 已提交
1807
        Tuple: A tuple with two Variable (boxes, variances)
Q
update  
qiaolongfei 已提交
1808

R
ruri 已提交
1809 1810
        boxes(Variable): the output prior boxes of PriorBox.
	4-D tensor, the layout is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1811
        H is the height of input, W is the width of input,
R
ruri 已提交
1812
        num_priors is the total box count of each position of input.
Q
update  
qiaolongfei 已提交
1813

R
ruri 已提交
1814 1815
        variances(Variable): the expanded variances of PriorBox.
    	4-D tensor, the layput is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1816
        H is the height of input, W is the width of input
R
ruri 已提交
1817
        num_priors is the total box count of each position of input
1818 1819 1820

    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1821

R
ruri 已提交
1822 1823 1824
	    #declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
1825 1826
        import paddle
        paddle.enable_static()
R
ruri 已提交
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
	    input = fluid.data(name="input", shape=[None,3,6,9])
	    image = fluid.data(name="image", shape=[None,3,9,12])
	    box, var = fluid.layers.prior_box(
                 input=input,
                 image=image,
		 min_sizes=[100.],
                 clip=True,
                 flip=True)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
 
	    # prepare a batch of data
	    input_data = np.random.rand(1,3,6,9).astype("float32")
	    image_data = np.random.rand(1,3,9,12).astype("float32")
 
	    box_out, var_out = exe.run(fluid.default_main_program(),
                feed={"input":input_data,"image":image_data},
                fetch_list=[box,var],
                return_numpy=True)
 
	    # print(box_out.shape)
	    # (6, 9, 1, 4)
	    # print(var_out.shape)
	    # (6, 9, 1, 4)

	    # imperative mode
	    import paddle.fluid.dygraph as dg

	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		image = dg.to_variable(image_data)
    		box, var = fluid.layers.prior_box(
		    input=input,
		    image=image,
		    min_sizes=[100.],
		    clip=True,
		    flip=True)
		# print(box.shape)
		# [6L, 9L, 1L, 4L]
                # print(var.shape)
		# [6L, 9L, 1L, 4L]

1871 1872 1873
    """
    helper = LayerHelper("prior_box", **locals())
    dtype = helper.input_dtype()
1874 1875
    check_variable_and_dtype(
        input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
1876

1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(min_sizes):
        min_sizes = [min_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    min_sizes = list(map(float, min_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    steps = list(map(float, steps))

1892 1893 1894 1895 1896 1897 1898 1899
    attrs = {
        'min_sizes': min_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'flip': flip,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
1900 1901
        'offset': offset,
        'min_max_aspect_ratios_order': min_max_aspect_ratios_order
1902 1903
    }
    if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
1904 1905
        if not _is_list_or_tuple_(max_sizes):
            max_sizes = [max_sizes]
1906 1907
        attrs['max_sizes'] = max_sizes

X
Xin Pan 已提交
1908 1909
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
    helper.append_op(
        type="prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


R
ruri 已提交
1922 1923 1924 1925 1926 1927 1928 1929 1930
def density_prior_box(input,
                      image,
                      densities=None,
                      fixed_sizes=None,
                      fixed_ratios=None,
                      variance=[0.1, 0.1, 0.2, 0.2],
                      clip=False,
                      steps=[0.0, 0.0],
                      offset=0.5,
1931
                      flatten_to_2d=False,
R
ruri 已提交
1932 1933 1934
                      name=None):
    """

R
ruri 已提交
1935
    This op generates density prior boxes for SSD(Single Shot MultiBox Detector) 
R
ruri 已提交
1936 1937 1938 1939 1940 1941
    algorithm. Each position of the input produce N prior boxes, N is 
    determined by the count of densities, fixed_sizes and fixed_ratios. 
    Boxes center at grid points around each input position is generated by 
    this operator, and the grid points is determined by densities and 
    the count of density prior box is determined by fixed_sizes and fixed_ratios. 
    Obviously, the number of fixed_sizes is equal to the number of densities.
R
ruri 已提交
1942
    
R
ruri 已提交
1943
    For densities_i in densities:
R
ruri 已提交
1944 1945
    
    .. math::
R
ruri 已提交
1946

R
ruri 已提交
1947 1948 1949 1950 1951 1952 1953
        N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)

    N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.

    Parameters:
       input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
R
ruri 已提交
1954
            the layout is NCHW.
R
ruri 已提交
1955
       densities(list|tuple|None): The densities of generated density prior 
R
ruri 已提交
1956 1957
            boxes, this attribute should be a list or tuple of integers. 
            Default: None.
R
ruri 已提交
1958
       fixed_sizes(list|tuple|None): The fixed sizes of generated density
R
ruri 已提交
1959 1960
            prior boxes, this attribute should a list or tuple of same 
            length with :attr:`densities`. Default: None.
R
ruri 已提交
1961
       fixed_ratios(list|tuple|None): The fixed ratios of generated density
R
ruri 已提交
1962 1963 1964
            prior boxes, if this attribute is not set and :attr:`densities`
            and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
            to generate density prior boxes.
R
ruri 已提交
1965
       variance(list|tuple): The variances to be encoded in density prior boxes.
R
ruri 已提交
1966
            Default:[0.1, 0.1, 0.2, 0.2].
R
ruri 已提交
1967
       clip(bool): Whether to clip out of boundary boxes. Default: False.
翟飞跃 已提交
1968
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1969 1970
            step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
            height or weight of the input will be automatically calculated.
R
ruri 已提交
1971 1972
            Default: [0., 0.]
       offset(float): Prior boxes center offset. Default: 0.5
1973 1974
       flatten_to_2d(bool): Whether to flatten output prior boxes and variance
           to 2D shape, the second dim is 4. Default: False.
R
ruri 已提交
1975 1976
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    
R
ruri 已提交
1977
    Returns:
R
ruri 已提交
1978
        Tuple: A tuple with two Variable (boxes, variances)
R
ruri 已提交
1979 1980

        boxes: the output density prior boxes of PriorBox.
R
ruri 已提交
1981 1982 1983
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1984 1985

        variances: the expanded variances of PriorBox.
R
ruri 已提交
1986 1987 1988
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1989 1990 1991


    Examples:
R
ruri 已提交
1992

R
ruri 已提交
1993 1994
        .. code-block:: python

R
ruri 已提交
1995
            #declarative mode
R
ruri 已提交
1996

R
ruri 已提交
1997 1998
            import paddle.fluid as fluid
            import numpy as np
1999 2000
            import paddle
            paddle.enable_static()
R
ruri 已提交
2001

R
ruri 已提交
2002 2003 2004
            input = fluid.data(name="input", shape=[None,3,6,9])
            image = fluid.data(name="image", shape=[None,3,9,12])
            box, var = fluid.layers.density_prior_box(
R
ruri 已提交
2005 2006 2007 2008 2009 2010 2011 2012
                 input=input,
                 image=image,
                 densities=[4, 2, 1],
                 fixed_sizes=[32.0, 64.0, 128.0],
                 fixed_ratios=[1.],
                 clip=True,
                 flatten_to_2d=True)

R
ruri 已提交
2013 2014 2015
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
R
ruri 已提交
2016
 
R
ruri 已提交
2017 2018 2019 2020 2021 2022
            # prepare a batch of data
            input_data = np.random.rand(1,3,6,9).astype("float32")
            image_data = np.random.rand(1,3,9,12).astype("float32")

            box_out, var_out = exe.run(
                fluid.default_main_program(),
R
ruri 已提交
2023
                feed={"input":input_data,
R
ruri 已提交
2024
                      "image":image_data},
R
ruri 已提交
2025 2026 2027
                fetch_list=[box,var],
                return_numpy=True)

R
ruri 已提交
2028 2029 2030 2031
            # print(box_out.shape)
            # (1134, 4)
            # print(var_out.shape)
            # (1134, 4)
R
ruri 已提交
2032 2033


R
ruri 已提交
2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
            #imperative mode
            import paddle.fluid.dygraph as dg

            with dg.guard(place) as g:
                input = dg.to_variable(input_data)
                image = dg.to_variable(image_data)
                box, var = fluid.layers.density_prior_box(
                    input=input,
                    image=image,
                    densities=[4, 2, 1],
                    fixed_sizes=[32.0, 64.0, 128.0],
                    fixed_ratios=[1.],
                    clip=True)

                # print(box.shape)
                # [6L, 9L, 21L, 4L]
                # print(var.shape)
                # [6L, 9L, 21L, 4L]
R
ruri 已提交
2052

R
ruri 已提交
2053 2054 2055
    """
    helper = LayerHelper("density_prior_box", **locals())
    dtype = helper.input_dtype()
2056 2057
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'density_prior_box')
R
ruri 已提交
2058 2059 2060 2061

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

2062 2063 2064
    check_type(densities, 'densities', (list, tuple), 'density_prior_box')
    check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
    check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
R
ruri 已提交
2065 2066
    if len(densities) != len(fixed_sizes):
        raise ValueError('densities and fixed_sizes length should be euqal.')
2067

R
ruri 已提交
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    densities = list(map(int, densities))
    fixed_sizes = list(map(float, fixed_sizes))
    fixed_ratios = list(map(float, fixed_ratios))
    steps = list(map(float, steps))

    attrs = {
        'variances': variance,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
        'offset': offset,
2083 2084 2085 2086
        'densities': densities,
        'fixed_sizes': fixed_sizes,
        'fixed_ratios': fixed_ratios,
        'flatten_to_2d': flatten_to_2d,
R
ruri 已提交
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
    }
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="density_prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


C
chengduoZH 已提交
2102
def multi_box_head(inputs,
C
chengduoZH 已提交
2103 2104
                   image,
                   base_size,
C
chengduoZH 已提交
2105
                   num_classes,
C
chengduoZH 已提交
2106
                   aspect_ratios,
2107 2108
                   min_ratio=None,
                   max_ratio=None,
C
chengduoZH 已提交
2109 2110
                   min_sizes=None,
                   max_sizes=None,
C
chengduoZH 已提交
2111 2112 2113 2114
                   steps=None,
                   step_w=None,
                   step_h=None,
                   offset=0.5,
2115 2116
                   variance=[0.1, 0.1, 0.2, 0.2],
                   flip=True,
C
chengduoZH 已提交
2117
                   clip=False,
C
chengduoZH 已提交
2118
                   kernel_size=1,
C
chengduoZH 已提交
2119
                   pad=0,
C
chengduoZH 已提交
2120
                   stride=1,
2121 2122
                   name=None,
                   min_max_aspect_ratios_order=False):
C
chengduoZH 已提交
2123
    """
2124
	:api_attr: Static Graph
S
swtkiwi 已提交
2125

Q
qingqing01 已提交
2126 2127 2128 2129
    Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
    regression location and classification confidence on multiple input feature
    maps, then output the concatenate results. The details of this algorithm,
    please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
C
chengduoZH 已提交
2130
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
2131 2132

    Args:
Q
qingqing01 已提交
2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
       inputs (list(Variable)|tuple(Variable)): The list of input variables,
           the format of all Variables are 4-D Tensor, layout is NCHW.
           Data type should be float32 or float64.
       image (Variable): The input image, layout is NCHW. Data type should be
           the same as inputs.
       base_size(int): the base_size is input image size. When len(inputs) > 2
           and `min_size` and `max_size` are None, the `min_size` and `max_size`
           are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
           formula is as follows:

              ..  code-block:: text

                  min_sizes = []
                  max_sizes = []
                  step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
                  for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
                      min_sizes.append(base_size * ratio / 100.)
                      max_sizes.append(base_size * (ratio + step) / 100.)
                      min_sizes = [base_size * .10] + min_sizes
                      max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2154
       num_classes(int): The number of classes.
Q
qingqing01 已提交
2155 2156
       aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
           prior boxes. The length of input and aspect_ratios must be equal.
C
chengduoZH 已提交
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       steps(list|tuple): If step_w and step_h are the same,
            step_w and step_h can be replaced by steps.
       step_w(list|tuple): Prior boxes step
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically
            calculated. Default: None.
       step_h(list|tuple): Prior boxes step across height, If
            step_h[i] == 0.0, the prior boxes step across height of
            the inputs[i] will be automatically calculated. Default: None.
       offset(float): Prior boxes center offset. Default: 0.5
       variance(list|tuple): the variances to be encoded in prior boxes.
2176
            Default:[0.1, 0.1, 0.2, 0.2].
C
chengduoZH 已提交
2177 2178 2179 2180 2181
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
       kernel_size(int): The kernel size of conv2d. Default: 1.
       pad(int|list|tuple): The padding of conv2d. Default:0.
       stride(int|list|tuple): The stride of conv2d. Default:1,
Q
qingqing01 已提交
2182 2183 2184
       name(str): The default value is None.  Normally there is no need
           for user to set this property.  For more information, please
           refer to :ref:`api_guide_Name`.
2185
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
2186
            in order of [min, max, aspect_ratios], which is consistent with
2187
            Caffe. Please note, this order affects the weights order of
T
tianshuo78520a 已提交
2188
            convolution layer followed by and does not affect the final
2189
            detection results. Default: False.
C
chengduoZH 已提交
2190 2191

    Returns:
Q
update  
qiaolongfei 已提交
2192 2193
        tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)

Q
qingqing01 已提交
2194 2195 2196
        mbox_loc (Variable): The predicted boxes' location of the inputs. The
        layout is [N, num_priors, 4], where N is batch size, ``num_priors``
        is the number of prior boxes. Data type is the same as input.
Q
update  
qiaolongfei 已提交
2197

Q
qingqing01 已提交
2198 2199 2200 2201
        mbox_conf (Variable): The predicted boxes' confidence of the inputs.
        The layout is [N, num_priors, C], where ``N`` and ``num_priors`` 
        has the same meaning as above. C is the number of Classes.
        Data type is the same as input.
Q
update  
qiaolongfei 已提交
2202

Q
qingqing01 已提交
2203 2204 2205
        boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
        The meaning of num_priors is the same as above.
        Data type is the same as input.
C
chengduoZH 已提交
2206

Q
qingqing01 已提交
2207 2208
        variances (Variable): the expanded variances for prior boxes.
        The layout is [num_priors, 4]. Data type is the same as input.
C
chengduoZH 已提交
2209

Q
qingqing01 已提交
2210
    Examples 1: set min_ratio and max_ratio:
C
chengduoZH 已提交
2211
        .. code-block:: python
Q
update  
qiaolongfei 已提交
2212

2213 2214
          import paddle.fluid as fluid

Q
qingqing01 已提交
2215 2216 2217 2218 2219 2220 2221
          images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
2222

Q
update  
qiaolongfei 已提交
2223
          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
2224
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
C
chengduoZH 已提交
2225 2226 2227 2228 2229 2230 2231 2232 2233
            image=images,
            num_classes=21,
            min_ratio=20,
            max_ratio=90,
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)
Q
qingqing01 已提交
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259

    Examples 2: set min_sizes and max_sizes:
        .. code-block:: python

          import paddle.fluid as fluid

          images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')

          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
            image=images,
            num_classes=21,
            min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
            max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)

C
chengduoZH 已提交
2260 2261
    """

C
chengduoZH 已提交
2262
    def _reshape_with_axis_(input, axis=1):
2263
        out = nn.flatten(x=input, axis=axis)
C
chengduoZH 已提交
2264
        return out
2265

2266 2267
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))
2268

C
chengduoZH 已提交
2269 2270 2271 2272
    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

2273 2274
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
2275

C
chengduoZH 已提交
2276 2277 2278 2279 2280
    num_layer = len(inputs)

    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
2281
    elif min_sizes is None and max_sizes is None:
C
chengduoZH 已提交
2282 2283 2284
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
M
minqiyang 已提交
2285
        for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
C
chengduoZH 已提交
2286 2287 2288 2289 2290
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2291 2292 2293 2294 2295
    if aspect_ratios:
        _is_list_or_tuple_and_equal(
            aspect_ratios, num_layer,
            'aspect_ratios should be list or tuple, and the length of inputs '
            'and aspect_ratios should be the same.')
Z
zhongpu 已提交
2296
    if step_h is not None:
C
chengduoZH 已提交
2297 2298 2299 2300
        _is_list_or_tuple_and_equal(
            step_h, num_layer,
            'step_h should be list or tuple, and the length of inputs and '
            'step_h should be the same.')
Z
zhongpu 已提交
2301
    if step_w is not None:
C
chengduoZH 已提交
2302 2303 2304 2305
        _is_list_or_tuple_and_equal(
            step_w, num_layer,
            'step_w should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
Z
zhongpu 已提交
2306
    if steps is not None:
C
chengduoZH 已提交
2307 2308 2309 2310 2311 2312 2313
        _is_list_or_tuple_and_equal(
            steps, num_layer,
            'steps should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
        step_w = steps
        step_h = steps

C
chengduoZH 已提交
2314 2315
    mbox_locs = []
    mbox_confs = []
C
chengduoZH 已提交
2316 2317
    box_results = []
    var_results = []
C
chengduoZH 已提交
2318 2319
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
C
chengduoZH 已提交
2320 2321
        max_size = max_sizes[i]

2322
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
2323
            min_size = [min_size]
C
chengduoZH 已提交
2324 2325
        if not _is_list_or_tuple_(max_size):
            max_size = [max_size]
C
chengduoZH 已提交
2326 2327 2328 2329

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
2330
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
2331
                aspect_ratio = [aspect_ratio]
2332
        step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
C
chengduoZH 已提交
2333

2334
        box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
2335 2336
                             variance, flip, clip, step, offset, None,
                             min_max_aspect_ratios_order)
C
chengduoZH 已提交
2337 2338 2339 2340 2341

        box_results.append(box)
        var_results.append(var)

        num_boxes = box.shape[2]
C
chengduoZH 已提交
2342

2343
        # get loc
Y
Yuan Gao 已提交
2344
        num_loc_output = num_boxes * 4
2345
        mbox_loc = nn.conv2d(
C
chengduoZH 已提交
2346
            input=input,
2347 2348 2349 2350 2351
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)

2352
        mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
2353
        mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
Y
Yuan Gao 已提交
2354
        mbox_locs.append(mbox_loc_flatten)
C
chengduoZH 已提交
2355

2356
        # get conf
C
chengduoZH 已提交
2357
        num_conf_output = num_boxes * num_classes
2358
        conf_loc = nn.conv2d(
C
chengduoZH 已提交
2359
            input=input,
2360 2361 2362 2363
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)
2364
        conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
2365
        conf_loc_flatten = nn.flatten(conf_loc, axis=1)
Y
Yuan Gao 已提交
2366
        mbox_confs.append(conf_loc_flatten)
C
chengduoZH 已提交
2367

C
chengduoZH 已提交
2368 2369 2370
    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
Y
Yuan Gao 已提交
2371 2372
        mbox_locs_concat = mbox_locs[0]
        mbox_confs_concat = mbox_confs[0]
C
chengduoZH 已提交
2373 2374 2375 2376 2377 2378 2379 2380 2381
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))

        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
Y
Yuan Gao 已提交
2382
        mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
2383
        mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
Y
Yuan Gao 已提交
2384
        mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
2385 2386
        mbox_confs_concat = nn.reshape(
            mbox_confs_concat, shape=[0, -1, num_classes])
C
chengduoZH 已提交
2387

2388 2389
    box.stop_gradient = True
    var.stop_gradient = True
Y
Yuan Gao 已提交
2390
    return mbox_locs_concat, mbox_confs_concat, box, var
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400


def anchor_generator(input,
                     anchor_sizes=None,
                     aspect_ratios=None,
                     variance=[0.1, 0.1, 0.2, 0.2],
                     stride=None,
                     offset=0.5,
                     name=None):
    """
S
swtkiwi 已提交
2401

2402 2403 2404 2405 2406 2407 2408 2409
    **Anchor generator operator**

    Generate anchors for Faster RCNN algorithm.
    Each position of the input produce N anchors, N =
    size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
    is firstly aspect_ratios loop then anchor_sizes loop.

    Args:
W
wangguanzhong 已提交
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
       input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
       anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
          anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
          For instance, the anchor size of 64 means the area of this anchor 
          equals to 64**2. None by default.
       aspect_ratios(float32|list|tuple, optional): The height / width ratios 
           of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
       variance(list|tuple, optional): The variances to be used in box 
           regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by 
           default.
       stride(list|tuple, optional): The anchors stride across width and height.
           The data type is float32. e.g. [16.0, 16.0]. None by default.
       offset(float32, optional): Prior boxes center offset. 0.5 by default.
       name(str, optional): For detailed information, please refer 
           to :ref:`api_guide_Name`. Usually name is no need to set and None 
           by default. 
2426 2427

    Returns:
W
wangguanzhong 已提交
2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
        Tuple:

        Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
        H is the height of input, W is the width of input,
        num_anchors is the box count of each position. 
        Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
 
        Variances(Variable): The expanded variances of anchors
        with a layout of [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_anchors is the box count of each position.
        Each variance is in (xcenter, ycenter, w, h) format.
2440 2441 2442 2443 2444 2445


    Examples:

        .. code-block:: python

2446
            import paddle.fluid as fluid
2447 2448 2449
            import paddle

            paddle.enable_static()
2450
            conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
J
jerrywgz 已提交
2451
            anchor, var = fluid.layers.anchor_generator(
2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
                input=conv1,
                anchor_sizes=[64, 128, 256, 512],
                aspect_ratios=[0.5, 1.0, 2.0],
                variance=[0.1, 0.1, 0.2, 0.2],
                stride=[16.0, 16.0],
                offset=0.5)
    """
    helper = LayerHelper("anchor_generator", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(anchor_sizes):
        anchor_sizes = [anchor_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(stride) and len(stride) == 2):
        raise ValueError('stride should be a list or tuple ',
                         'with length 2, (stride_width, stride_height).')

    anchor_sizes = list(map(float, anchor_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    stride = list(map(float, stride))

    attrs = {
        'anchor_sizes': anchor_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'stride': stride,
        'offset': offset
    }

X
Xin Pan 已提交
2485 2486
    anchor = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
2487 2488 2489 2490 2491 2492 2493 2494 2495
    helper.append_op(
        type="anchor_generator",
        inputs={"Input": input},
        outputs={"Anchors": anchor,
                 "Variances": var},
        attrs=attrs, )
    anchor.stop_gradient = True
    var.stop_gradient = True
    return anchor, var
2496 2497


W
whs 已提交
2498 2499 2500 2501
def roi_perspective_transform(input,
                              rois,
                              transformed_height,
                              transformed_width,
S
SunGaofeng 已提交
2502 2503
                              spatial_scale=1.0,
                              name=None):
W
whs 已提交
2504
    """
S
SunGaofeng 已提交
2505
    **The** `rois` **of this op should be a LoDTensor.**
W
whs 已提交
2506

S
SunGaofeng 已提交
2507 2508 2509 2510 2511
    ROI perspective transform op applies perspective transform to map each roi into an 
    rectangular region. Perspective transform is a type of transformation in linear algebra.

    Parameters:
        input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of 
W
whs 已提交
2512 2513
                          input tensor is NCHW. Where N is batch size, C is the
                          number of input channels, H is the height of the feature,
S
SunGaofeng 已提交
2514 2515 2516
                          and W is the width of the feature. The data type is float32.
        rois (Variable):  2-D LoDTensor, ROIs (Regions of Interest) to be transformed. 
                          It should be a 2-D LoDTensor of shape (num_rois, 8). Given as 
W
whs 已提交
2517 2518 2519
                          [[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the 
                          top left coordinates, and (x2, y2) is the top right 
                          coordinates, and (x3, y3) is the bottom right coordinates, 
S
SunGaofeng 已提交
2520 2521 2522 2523
                          and (x4, y4) is the bottom left coordinates. The data type is the
                          same as `input` 
        transformed_height (int): The height of transformed output.
        transformed_width (int): The width of transformed output.
W
whs 已提交
2524
        spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
S
SunGaofeng 已提交
2525 2526 2527
        name(str, optional): The default value is None.  
                             Normally there is no need for user to set this property.  
                             For more information, please refer to :ref:`api_guide_Name`
W
whs 已提交
2528 2529

    Returns:
S
SunGaofeng 已提交
2530
            A tuple with three Variables. (out, mask, transform_matrix)
2531 2532

            out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2533
            (num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
2534 2535

            mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2536
            (num_rois, 1, transformed_h, transformed_w). The data type is int32
2537 2538

            transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
S
SunGaofeng 已提交
2539 2540 2541 2542
            a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`

    Return Type:
        tuple
W
whs 已提交
2543 2544 2545 2546

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
2547
            import paddle.fluid as fluid
2548

S
SunGaofeng 已提交
2549 2550
            x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
2551
            out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
W
whs 已提交
2552
    """
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
    check_variable_and_dtype(input, 'input', ['float32'],
                             'roi_perspective_transform')
    check_variable_and_dtype(rois, 'rois', ['float32'],
                             'roi_perspective_transform')
    check_type(transformed_height, 'transformed_height', int,
               'roi_perspective_transform')
    check_type(transformed_width, 'transformed_width', int,
               'roi_perspective_transform')
    check_type(spatial_scale, 'spatial_scale', float,
               'roi_perspective_transform')

W
whs 已提交
2564 2565
    helper = LayerHelper('roi_perspective_transform', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2566
    out = helper.create_variable_for_type_inference(dtype)
2567 2568
    mask = helper.create_variable_for_type_inference(dtype="int32")
    transform_matrix = helper.create_variable_for_type_inference(dtype)
2569 2570
    out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
    out2in_w = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
2571 2572 2573 2574
    helper.append_op(
        type="roi_perspective_transform",
        inputs={"X": input,
                "ROIs": rois},
2575 2576 2577
        outputs={
            "Out": out,
            "Out2InIdx": out2in_idx,
2578 2579 2580
            "Out2InWeights": out2in_w,
            "Mask": mask,
            "TransformMatrix": transform_matrix
2581
        },
W
whs 已提交
2582 2583 2584 2585 2586
        attrs={
            "transformed_height": transformed_height,
            "transformed_width": transformed_width,
            "spatial_scale": spatial_scale
        })
2587
    return out, mask, transform_matrix
W
whs 已提交
2588 2589


2590 2591
def generate_proposal_labels(rpn_rois,
                             gt_classes,
2592
                             is_crowd,
2593
                             gt_boxes,
2594
                             im_info,
2595 2596 2597 2598 2599 2600
                             batch_size_per_im=256,
                             fg_fraction=0.25,
                             fg_thresh=0.25,
                             bg_thresh_hi=0.5,
                             bg_thresh_lo=0.0,
                             bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
2601
                             class_nums=None,
2602 2603 2604
                             use_random=True,
                             is_cls_agnostic=False,
                             is_cascade_rcnn=False):
2605
    """
S
swtkiwi 已提交
2606

2607
    **Generate Proposal Labels of Faster-RCNN**
2608

B
buxingyuan 已提交
2609
    This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
2610
    to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
2611 2612 2613

    RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
    were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
2614
    If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
2615 2616
    If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
    then it was considered as a background sample.
B
buxingyuan 已提交
2617
    After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
2618
    then we apply random sampling to make sure
B
buxingyuan 已提交
2619
    the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
2620 2621 2622 2623 2624

    For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
    Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.

    Args:
2625 2626 2627
        rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
        is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
B
buxingyuan 已提交
2628 2629 2630
        gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
        im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.

2631 2632 2633 2634 2635 2636 2637
        batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
        fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
        fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
        bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
        bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
        bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
        class_nums(int): Class number. The data type must be int32.
B
buxingyuan 已提交
2638
        use_random(bool): Use random sampling to choose foreground and background boxes.
2639 2640
        is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
        is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
B
Bai Yifan 已提交
2641

2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
    Returns:
        tuple:
        A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.

        - **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
        - **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
        - **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
        - **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
        - **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.


B
Bai Yifan 已提交
2653 2654 2655 2656
    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
2657 2658 2659 2660 2661
            rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
            gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
2662
            rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
B
Bai Yifan 已提交
2663 2664 2665
                           rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
                           class_nums=10)

2666 2667 2668 2669
    """

    helper = LayerHelper('generate_proposal_labels', **locals())

2670 2671 2672 2673 2674 2675 2676
    check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
                             'generate_proposal_labels')
    check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
                             'generate_proposal_labels')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'generate_proposal_labels')

X
Xin Pan 已提交
2677 2678 2679 2680 2681 2682 2683 2684 2685
    rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
    labels_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    bbox_targets = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_inside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_outside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
2686 2687 2688 2689 2690 2691

    helper.append_op(
        type="generate_proposal_labels",
        inputs={
            'RpnRois': rpn_rois,
            'GtClasses': gt_classes,
2692
            'IsCrowd': is_crowd,
2693
            'GtBoxes': gt_boxes,
2694
            'ImInfo': im_info
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709
        },
        outputs={
            'Rois': rois,
            'LabelsInt32': labels_int32,
            'BboxTargets': bbox_targets,
            'BboxInsideWeights': bbox_inside_weights,
            'BboxOutsideWeights': bbox_outside_weights
        },
        attrs={
            'batch_size_per_im': batch_size_per_im,
            'fg_fraction': fg_fraction,
            'fg_thresh': fg_thresh,
            'bg_thresh_hi': bg_thresh_hi,
            'bg_thresh_lo': bg_thresh_lo,
            'bbox_reg_weights': bbox_reg_weights,
2710
            'class_nums': class_nums,
2711 2712 2713
            'use_random': use_random,
            'is_cls_agnostic': is_cls_agnostic,
            'is_cascade_rcnn': is_cascade_rcnn
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724
        })

    rois.stop_gradient = True
    labels_int32.stop_gradient = True
    bbox_targets.stop_gradient = True
    bbox_inside_weights.stop_gradient = True
    bbox_outside_weights.stop_gradient = True

    return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights


2725 2726 2727
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
                         labels_int32, num_classes, resolution):
    """
S
swtkiwi 已提交
2728

Q
qingqing01 已提交
2729
    **Generate Mask Labels for Mask-RCNN**
2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764

    This operator can be, for given the RoIs and corresponding labels,
    to sample foreground RoIs. This mask branch also has
    a :math: `K \\times M^{2}` dimensional output targets for each foreground
    RoI, which encodes K binary masks of resolution M x M, one for each of the
    K classes. This mask targets are used to compute loss of mask branch.

    Please note, the data format of groud-truth segmentation, assumed the
    segmentations are as follows. The first instance has two gt objects.
    The second instance has one gt object, this object has two gt segmentations.

        .. code-block:: python

            #[
            #  [[[229.14, 370.9, 229.14, 370.9, ...]],
            #   [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
            #  [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
            #]

            batch_masks = []
            for semgs in batch_semgs:
                gt_masks = []
                for semg in semgs:
                    gt_segm = []
                    for polys in semg:
                        gt_segm.append(np.array(polys).reshape(-1, 2))
                    gt_masks.append(gt_segm)
                batch_masks.append(gt_masks)
            
            
            place = fluid.CPUPlace()
            feeder = fluid.DataFeeder(place=place, feed_list=feeds)
            feeder.feed(batch_masks)

    Args:
Q
qingqing01 已提交
2765 2766 2767 2768 2769 2770
        im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
            data type. N is the batch size, each element is
            [height, width, scale] of image. Image scale is
            target_size / original_size, target_size is the size after resize,
            original_size is the original image size.
        gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
T
tianshuo78520a 已提交
2771
            should be int. M is the total number of ground-truth, each
Q
qingqing01 已提交
2772 2773 2774 2775 2776 2777 2778
            element is a class label.
        is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
            as gt_classes, each element is a flag indicating whether a
            groundtruth is crowd.
        gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
            float32 data type, it's LoD level is 3.
            Usually users do not needs to understand LoD,
2779
            The users should return correct data format in reader.
Q
qingqing01 已提交
2780
            The LoD[0] represents the ground-truth objects number of
2781 2782 2783 2784
            each instance. LoD[1] represents the segmentation counts of each
            objects. LoD[2] represents the polygons number of each segmentation.
            S the total number of polygons coordinate points. Each element is
            (x, y) coordinate points.
Q
qingqing01 已提交
2785 2786 2787 2788
        rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
            float32. R is the total number of RoIs, each element is a bounding
            box with (xmin, ymin, xmax, ymax) format in the range of original image.
        labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
T
tianshuo78520a 已提交
2789
            of int32. R is the same as it in `rois`. Each element represents
2790
            a class label of a RoI.
Q
qingqing01 已提交
2791 2792
        num_classes (int): Class number.
        resolution (int): Resolution of mask predictions.
2793 2794

    Returns:
Q
qingqing01 已提交
2795 2796 2797
        mask_rois (Variable):  A 2D LoDTensor with shape [P, 4] and same data
        type as `rois`. P is the total number of sampled RoIs. Each element
        is a bounding box with [xmin, ymin, xmax, ymax] format in range of
T
tianshuo78520a 已提交
2798
        original image size.
Q
qingqing01 已提交
2799 2800

        mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
T
tianshuo78520a 已提交
2801
        and int data type, each element represents the output mask RoI
Q
qingqing01 已提交
2802 2803 2804 2805
        index with regard to input RoIs.

        mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
        data type, K is the classes number and M is the resolution of mask
T
tianshuo78520a 已提交
2806
        predictions. Each element represents the binary mask targets.
2807 2808 2809 2810

    Examples:
        .. code-block:: python

2811 2812
          import paddle.fluid as fluid

Q
qingqing01 已提交
2813
          im_info = fluid.data(name="im_info", shape=[None, 3],
2814
              dtype="float32")
Q
qingqing01 已提交
2815
          gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
2816
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2817
          is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
2818
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2819
          gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
2820
              dtype="float32", lod_level=3)
2821
          # rois, roi_labels can be the output of
2822
          # fluid.layers.generate_proposal_labels.
Q
qingqing01 已提交
2823
          rois = fluid.data(name="rois", shape=[None, 4],
2824
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2825
          roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
2826
              dtype="int32", lod_level=1)
2827 2828 2829 2830 2831 2832
          mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
              im_info=im_info,
              gt_classes=gt_classes,
              is_crowd=is_crowd,
              gt_segms=gt_masks,
              rois=rois,
2833
              labels_int32=roi_labels,
2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
              num_classes=81,
              resolution=14)
    """

    helper = LayerHelper('generate_mask_labels', **locals())

    mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
    roi_has_mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)

    helper.append_op(
        type="generate_mask_labels",
        inputs={
            'ImInfo': im_info,
            'GtClasses': gt_classes,
            'IsCrowd': is_crowd,
            'GtSegms': gt_segms,
            'Rois': rois,
            'LabelsInt32': labels_int32
        },
        outputs={
            'MaskRois': mask_rois,
            'RoiHasMaskInt32': roi_has_mask_int32,
            'MaskInt32': mask_int32
        },
        attrs={'num_classes': num_classes,
               'resolution': resolution})

    mask_rois.stop_gradient = True
    roi_has_mask_int32.stop_gradient = True
    mask_int32.stop_gradient = True

    return mask_rois, roi_has_mask_int32, mask_int32


2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
def generate_proposals(scores,
                       bbox_deltas,
                       im_info,
                       anchors,
                       variances,
                       pre_nms_top_n=6000,
                       post_nms_top_n=1000,
                       nms_thresh=0.5,
                       min_size=0.1,
                       eta=1.0,
2881 2882
                       return_rois_num=False,
                       name=None):
2883
    """
S
swtkiwi 已提交
2884

H
haowang101779990 已提交
2885 2886
    **Generate proposal Faster-RCNN**

2887 2888 2889 2890
    This operation proposes RoIs according to each box with their
    probability to be a foreground object and 
    the box can be calculated by anchors. Bbox_deltais and scores
    to be an object are the output of RPN. Final proposals
H
haowang101779990 已提交
2891 2892 2893 2894
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

2895 2896
    1. Transposes and resizes scores and bbox_deltas in size of
       (H*W*A, 1) and (H*W*A, 4)
H
haowang101779990 已提交
2897 2898 2899 2900 2901 2902
    2. Calculate box locations as proposals candidates. 
    3. Clip boxes to image
    4. Remove predicted boxes with small area. 
    5. Apply NMS to get final proposals as output.

    Args:
2903 2904 2905
        scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
2906
            width of the feature map. The data type must be float32.
2907
        bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
T
tianshuo78520a 已提交
2908
            represents the difference between predicted box location and
2909
            anchor location. The data type must be float32.
2910
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
2911 2912
            image information for N batch. Height and width are the input sizes 
            and scale is the ratio of network input size and original size. 
2913
            The data type can be float32 or float64.
2914 2915 2916
        anchors(Variable):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
2917 2918
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
2919
            [H, W, num_priors, 4]. Each variance is in
2920
            (xcenter, ycenter, w, h) format. The data type must be float32.
2921
        pre_nms_top_n(float): Number of total bboxes to be kept per
2922
            image before NMS. The data type must be float32. `6000` by default.
2923
        post_nms_top_n(float): Number of total bboxes to be kept per
2924 2925
            image after NMS. The data type must be float32. `1000` by default.
        nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
2926
        min_size(float): Remove predicted boxes with either height or
2927 2928 2929
            width < min_size. The data type must be float32. `0.1` by default.
        eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration.
F
FDInSky 已提交
2930 2931 2932 2933
        return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's 
            num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
            the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model. 
            'False' by default. 
2934 2935 2936 2937
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

2938 2939 2940 2941 2942 2943
    Returns:
        tuple:
        A tuple with format ``(rpn_rois, rpn_roi_probs)``.

        - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
B
Bai Yifan 已提交
2944 2945 2946 2947 2948

    Examples:
        .. code-block:: python
        
            import paddle.fluid as fluid
2949 2950
            import paddle
            paddle.enable_static()
2951 2952 2953 2954 2955
            scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
            bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
            anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
            variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
B
Bai Yifan 已提交
2956 2957 2958
            rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
                         im_info, anchors, variances)

2959
    """
2960 2961 2962 2963 2964 2965 2966 2967
    if in_dygraph_mode():
        assert return_rois_num, "return_rois_num should be True in dygraph mode."
        attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,
                 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta)
        rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals(
            scores, bbox_deltas, im_info, anchors, variances, *attrs)
        return rpn_rois, rpn_roi_probs, rpn_rois_num

2968 2969
    helper = LayerHelper('generate_proposals', **locals())

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
    check_variable_and_dtype(scores, 'scores', ['float32'],
                             'generate_proposals')
    check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
                             'generate_proposals')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'generate_proposals')
    check_variable_and_dtype(anchors, 'anchors', ['float32'],
                             'generate_proposals')
    check_variable_and_dtype(variances, 'variances', ['float32'],
                             'generate_proposals')

X
Xin Pan 已提交
2981 2982 2983 2984
    rpn_rois = helper.create_variable_for_type_inference(
        dtype=bbox_deltas.dtype)
    rpn_roi_probs = helper.create_variable_for_type_inference(
        dtype=scores.dtype)
2985 2986 2987 2988 2989 2990 2991 2992
    outputs = {
        'RpnRois': rpn_rois,
        'RpnRoiProbs': rpn_roi_probs,
    }
    if return_rois_num:
        rpn_rois_num = helper.create_variable_for_type_inference(dtype='int32')
        rpn_rois_num.stop_gradient = True
        outputs['RpnRoisNum'] = rpn_rois_num
F
FDInSky 已提交
2993

2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
    helper.append_op(
        type="generate_proposals",
        inputs={
            'Scores': scores,
            'BboxDeltas': bbox_deltas,
            'ImInfo': im_info,
            'Anchors': anchors,
            'Variances': variances
        },
        attrs={
            'pre_nms_topN': pre_nms_top_n,
            'post_nms_topN': post_nms_top_n,
            'nms_thresh': nms_thresh,
            'min_size': min_size,
            'eta': eta
        },
3010
        outputs=outputs)
3011 3012 3013
    rpn_rois.stop_gradient = True
    rpn_roi_probs.stop_gradient = True

F
FDInSky 已提交
3014
    if return_rois_num:
3015
        return rpn_rois, rpn_roi_probs, rpn_rois_num
F
FDInSky 已提交
3016 3017
    else:
        return rpn_rois, rpn_roi_probs
J
jerrywgz 已提交
3018 3019


J
jerrywgz 已提交
3020
def box_clip(input, im_info, name=None):
J
jerrywgz 已提交
3021
    """
S
swtkiwi 已提交
3022
	
J
jerrywgz 已提交
3023
    Clip the box into the size given by im_info
J
jerrywgz 已提交
3024
    For each input box, The formula is given as follows:
3025 3026 3027
        
    .. code-block:: text

J
jerrywgz 已提交
3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
        xmin = max(min(xmin, im_w - 1), 0)
        ymin = max(min(ymin, im_h - 1), 0) 
        xmax = max(min(xmax, im_w - 1), 0)
        ymax = max(min(ymax, im_h - 1), 0)
    
    where im_w and im_h are computed from im_info:
 
    .. code-block:: text

        im_h = round(height / scale)
        im_w = round(weight / scale)
J
jerrywgz 已提交
3039 3040

    Args:
W
wangguanzhong 已提交
3041 3042 3043
        input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
            the last dimension is 4 and data type is float32 or float64.
        im_info(Variable): The 2-D Tensor with shape [N, 3] with layout 
T
tianshuo78520a 已提交
3044
            (height, width, scale) representing the information of image. 
3045
            Height and width are the input sizes and scale is the ratio of network input
W
wangguanzhong 已提交
3046 3047 3048 3049
            size and original size. The data type is float32 or float64.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3050 3051
    
    Returns:
W
wangguanzhong 已提交
3052 3053
        Variable:

T
tianshuo78520a 已提交
3054
        output(Variable): The clipped tensor with data type float32 or float64. 
W
wangguanzhong 已提交
3055 3056
        The shape is same as input.

3057
        
J
jerrywgz 已提交
3058 3059
    Examples:
        .. code-block:: python
3060
        
3061
            import paddle.fluid as fluid
3062 3063
            import paddle
            paddle.enable_static()
3064 3065 3066
            boxes = fluid.data(
                name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
            im_info = fluid.data(name='im_info', shape=[-1 ,3])
J
jerrywgz 已提交
3067
            out = fluid.layers.box_clip(
J
jerrywgz 已提交
3068
                input=boxes, im_info=im_info)
J
jerrywgz 已提交
3069 3070
    """

3071 3072 3073 3074
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'box_clip')

J
jerrywgz 已提交
3075
    helper = LayerHelper("box_clip", **locals())
J
jerrywgz 已提交
3076
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
3077
    inputs = {"Input": input, "ImInfo": im_info}
J
jerrywgz 已提交
3078
    helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
J
jerrywgz 已提交
3079

3080 3081
    return output

J
jerrywgz 已提交
3082

3083 3084 3085 3086 3087 3088 3089 3090
def retinanet_detection_output(bboxes,
                               scores,
                               anchors,
                               im_info,
                               score_threshold=0.05,
                               nms_top_k=1000,
                               keep_top_k=100,
                               nms_threshold=0.3,
3091
                               nms_eta=1.0):
3092
    """
3093
    **Detection Output Layer for the detector RetinaNet.**
3094

3095 3096 3097 3098
    In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many 
    `FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
    and location predictions, this OP is to get the detection results by
    performing following steps:
3099

3100 3101 3102
    1. For each FPN level, decode box predictions according to the anchor
       boxes from at most :attr:`nms_top_k` top-scoring predictions after
       thresholding detector confidence at :attr:`score_threshold`.
3103 3104 3105 3106
    2. Merge top predictions from all levels and apply multi-class non 
       maximum suppression (NMS) on them to get the final detections.

    Args:
3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123
        bboxes(List): A list of Tensors from multiple FPN levels represents
            the location prediction for all anchor boxes. Each element is
            a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
            batch size, :math:`Mi` is the number of bounding boxes from
            :math:`i`-th FPN level and each bounding box has four coordinate
            values and the layout is [xmin, ymin, xmax, ymax]. The data type
            of each element is float32 or float64.
        scores(List): A list of Tensors from multiple FPN levels represents
            the category prediction for all anchor boxes. Each element is a
            3-D Tensor with shape :math:`[N, Mi, C]`,  :math:`N` is the batch
            size, :math:`C` is the class number (**excluding background**),
            :math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
            level. The data type of each element is float32 or float64.
        anchors(List): A list of Tensors from multiple FPN levels represents
            the locations of all anchor boxes. Each element is a 2-D Tensor
            with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
            boxes from :math:`i`-th FPN level, and each bounding box has four
3124
            coordinate values and the layout is [xmin, ymin, xmax, ymax].
3125 3126 3127
            The data type of each element is float32 or float64.
        im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
3128
            information of each image is a 3-vector which are the height and width
3129 3130
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
3131
        score_threshold(float): Threshold to filter out bounding boxes
3132
            with a confidence score before NMS, default value is set to 0.05.
3133
        nms_top_k(int): Maximum number of detections per FPN layer to be
3134 3135
            kept according to the confidences before NMS, default value is set to
            1000.
3136
        keep_top_k(int): Number of total bounding boxes to be kept per image after
3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154
            NMS step. Default value is set to 100, -1 means keeping all bounding
            boxes after NMS step.
        nms_threshold(float): The Intersection-over-Union(IoU) threshold used to 
            filter out boxes in NMS.
        nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
            Default value is set to 1., which represents the value of
            :attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
            to be lower than 1. and the value of :attr:`nms_threshold` is set to
            be higher than 0.5, everytime a bounding box is filtered out,
            the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
            = :attr:`nms_threshold` * :attr:`nms_eta`  will not be stopped until
            the actual value of :attr:`nms_threshold` is lower than or equal to
            0.5.

    **Notice**: In some cases where the image sizes are very small, it's possible
    that there is no detection if :attr:`score_threshold` are used at all
    levels. Hence, this OP do not filter out anchors from the highest FPN level
    before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
T
tianshuo78520a 已提交
3155
    :attr:`anchors` is required to be from the highest FPN level.
3156 3157

    Returns:
3158 3159
        Variable(The data type is float32 or float64):
            The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
3160
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
3161 3162 3163
            :math:`No` is the total number of detections in this mini-batch.
            The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
            results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
3164 3165 3166 3167 3168 3169
            has no detected results. If all images have no detected results,
            LoD will be set to 0, and the output tensor is empty (None).

    Examples:
        .. code-block:: python

3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186
           import paddle.fluid as fluid

           bboxes_low = fluid.data(
               name='bboxes_low', shape=[1, 44, 4], dtype='float32')
           bboxes_high = fluid.data(
               name='bboxes_high', shape=[1, 11, 4], dtype='float32')
           scores_low = fluid.data(
               name='scores_low', shape=[1, 44, 10], dtype='float32')
           scores_high = fluid.data(
               name='scores_high', shape=[1, 11, 10], dtype='float32')
           anchors_low = fluid.data(
               name='anchors_low', shape=[44, 4], dtype='float32')
           anchors_high = fluid.data(
               name='anchors_high', shape=[11, 4], dtype='float32')
           im_info = fluid.data(
               name="im_info", shape=[1, 3], dtype='float32')
           nmsed_outs = fluid.layers.retinanet_detection_output(
3187 3188 3189 3190 3191 3192 3193 3194 3195
               bboxes=[bboxes_low, bboxes_high],
               scores=[scores_low, scores_high],
               anchors=[anchors_low, anchors_high],
               im_info=im_info,
               score_threshold=0.05,
               nms_top_k=1000,
               keep_top_k=100,
               nms_threshold=0.45,
               nms_eta=1.0)
3196 3197
    """

3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
    check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
    for i, bbox in enumerate(bboxes):
        check_variable_and_dtype(bbox, 'bbox{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_type(scores, 'scores', (list), 'retinanet_detection_output')
    for i, score in enumerate(scores):
        check_variable_and_dtype(score, 'score{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
    for i, anchor in enumerate(anchors):
        check_variable_and_dtype(anchor, 'anchor{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'retinanet_detection_output')

3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238
    helper = LayerHelper('retinanet_detection_output', **locals())
    output = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('scores'))
    helper.append_op(
        type="retinanet_detection_output",
        inputs={
            'BBoxes': bboxes,
            'Scores': scores,
            'Anchors': anchors,
            'ImInfo': im_info
        },
        attrs={
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'keep_top_k': keep_top_k,
            'nms_eta': 1.,
        },
        outputs={'Out': output})
    output.stop_gradient = True
    return output


J
jerrywgz 已提交
3239 3240 3241 3242 3243
def multiclass_nms(bboxes,
                   scores,
                   score_threshold,
                   nms_top_k,
                   keep_top_k,
J
jerrywgz 已提交
3244
                   nms_threshold=0.3,
J
jerrywgz 已提交
3245 3246
                   normalized=True,
                   nms_eta=1.,
3247 3248
                   background_label=0,
                   name=None):
J
jerrywgz 已提交
3249
    """
S
swtkiwi 已提交
3250

3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264
    **Multiclass NMS**
    
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
    See below for an example:

    .. code-block:: text

        if:
            box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
            box1.scores = (0.7, 0.2, 0.4)  which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)

            box2.data = (3.0, 4.0, 8.0, 5.0)
            box2.score = (0.3, 0.3, 0.1)

            nms_threshold = 0.3
            background_label = 0
            score_threshold = 0
3279

3280 3281 3282 3283 3284 3285 3286

        Then:
            iou = 4/11 > 0.3
            out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],    
                         [2, 0.4, 2.0, 3.0, 7.0, 5.0]]
                         
            Out format is (label, confidence, xmin, ymin, xmax, ymax)
3287 3288 3289 3290 3291 3292 3293 3294
    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is 
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
X
xiaoting 已提交
3295
                           The data type is float32 or float64.
3296 3297
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
                           M is the number of bounding boxes, C is the 
X
xiaoting 已提交
3298
                           class number. The data type is float32 or float64.   
3299 3300 3301 3302 3303 3304 3305
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is 
                           number of bounding boxes. For each category there 
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
X
xiaoting 已提交
3306
                           of BBoxes.The data type is float32 or float64. 
3307 3308 3309
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
X
xiaoting 已提交
3310
                           case with shape [M, C, 4].The data type is float32 or float64. 
3311 3312 3313 3314 3315 3316 3317
        background_label (int): The index of background label, the background 
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided, 
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3318
                         the confidences after the filtering detections based
3319 3320 3321 3322 3323 3324 3325 3326 3327
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
X
xiaoting 已提交
3328
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
3329 3330 3331 3332 3333
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values: 
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the 
             total number of detections. If there is no detected boxes for all
J
jerrywgz 已提交
3334 3335 3336 3337
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed 
             from {0} to {1}) 
3338

3339

3340 3341 3342
    Examples:
        .. code-block:: python

3343

3344
            import paddle.fluid as fluid
3345 3346
            import paddle
            paddle.enable_static()
X
xiaoting 已提交
3347
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
3348
                                      dtype='float32', lod_level=1)
X
xiaoting 已提交
3349
            scores = fluid.data(name='scores', shape=[None,81],
3350 3351 3352 3353 3354 3355 3356 3357 3358
                                      dtype='float32', lod_level=1)
            out = fluid.layers.multiclass_nms(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
J
jerrywgz 已提交
3359
    """
X
xiaoting 已提交
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
    check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
                             'multiclass_nms')
    check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
                             'multiclass_nms')
    check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
    check_type(normalized, 'normalized', bool, 'multiclass_nms')
    check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
    check_type(background_label, 'background_label', int, 'multiclass_nms')

J
jerrywgz 已提交
3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388
    helper = LayerHelper('multiclass_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    helper.append_op(
        type="multiclass_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True
J
jerrywgz 已提交
3389 3390

    return output
3391 3392


3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440
def locality_aware_nms(bboxes,
                       scores,
                       score_threshold,
                       nms_top_k,
                       keep_top_k,
                       nms_threshold=0.3,
                       normalized=True,
                       nms_eta=1.,
                       background_label=-1,
                       name=None):
    """
    **Local Aware NMS**
    
    `Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
    suppression (LANMS) on boxes and scores.

    Firstly, this operator merge box and score according their IOU
    (intersection over union). In the NMS step, this operator greedily selects a
    subset of detection bounding boxes that have high scores larger than score_threshold,
    if providing this threshold, then selects the largest nms_top_k confidences scores
    if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
    IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
    of nms_threshold and nms_eta.

    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
                           represents the predicted locations of M bounding
                           bboxes, N is the batch size. Each bounding box
                           has four coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
                           predicted confidence predictions. N is the batch
                           size, C is the class number, M is number of bounding
                           boxes. Now only support 1 class. For each category
                           there are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension of
                           BBoxes. The data type is float32 or float64.
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: -1
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided,
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3441
                         the confidences after the filtering detections based
3442 3443 3444
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
3445 3446
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
                          Default: None.

    Returns:
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values:
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
             total number of detections. If there is no detected boxes for all
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1}). The data type is float32 or float64.


    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
                                      dtype='float32')
            scores = fluid.data(name='scores', shape=[None, 1, 81],
                                      dtype='float32')
            out = fluid.layers.locality_aware_nms(bboxes=boxes,
                                              scores=scores,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
    """
3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492
    check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
                             'locality_aware_nms')
    check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
                             'locality_aware_nms')
    check_type(background_label, 'background_label', int, 'locality_aware_nms')
    check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
    check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
    check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
    check_type(normalized, 'normalized', bool, 'locality_aware_nms')

3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522
    shape = scores.shape
    assert len(shape) == 3, "dim size of scores must be 3"
    assert shape[
        1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"

    helper = LayerHelper('locality_aware_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    out = {'Out': output}

    helper.append_op(
        type="locality_aware_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True

    return output


Y
Yang Zhang 已提交
3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649
def matrix_nms(bboxes,
               scores,
               score_threshold,
               post_threshold,
               nms_top_k,
               keep_top_k,
               use_gaussian=False,
               gaussian_sigma=2.,
               background_label=0,
               normalized=True,
               return_index=False,
               name=None):
    """
    **Matrix NMS**

    This operator does matrix non maximum suppression (NMS).

    First selects a subset of candidate bounding boxes that have higher scores
    than score_threshold (if provided), then the top k candidate is selected if
    nms_top_k is larger than -1. Score of the remaining candidate are then
    decayed according to the Matrix NMS scheme.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is
                           number of bounding boxes. For each category there
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes. The data type is float32 or float64.
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score.
        post_threshold (float): Threshold to filter out bounding boxes with
                                low confidence score AFTER decaying.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences after the filtering detections based
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        use_gaussian (bool): Use Gaussian as the decay function. Default: False
        gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        normalized (bool): Whether detections are normalized. Default: True
        return_index(bool): Whether return selected index. Default: False
        name(str): Name of the matrix nms op. Default: None.

    Returns:
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, one Variable(Out) is returned.

        Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
             detection results.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1})

        Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
            selected indices, which are absolute values cross batches.

    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
                                      dtype='float32', lod_level=1)
            scores = fluid.data(name='scores', shape=[None,81],
                                      dtype='float32', lod_level=1)
            out = fluid.layers.matrix_nms(bboxes=boxes,
                                          scores=scores,
                                          background_label=0,
                                          score_threshold=0.5,
                                          post_threshold=0.1,
                                          nms_top_k=400,
                                          keep_top_k=200,
                                          normalized=False)
    """
    check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
                             'matrix_nms')
    check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
                             'matrix_nms')
    check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
    check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
    check_type(normalized, 'normalized', bool, 'matrix_nms')
    check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
    check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
    check_type(background_label, 'background_label', int, 'matrix_nms')

    helper = LayerHelper('matrix_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    index = helper.create_variable_for_type_inference(dtype='int')
    helper.append_op(
        type="matrix_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'post_threshold': post_threshold,
            'nms_top_k': nms_top_k,
            'gaussian_sigma': gaussian_sigma,
            'use_gaussian': use_gaussian,
            'keep_top_k': keep_top_k,
            'normalized': normalized
        },
        outputs={'Out': output,
                 'Index': index})
    output.stop_gradient = True

    if return_index:
        return output, index
    else:
        return output


3650 3651 3652 3653 3654
def distribute_fpn_proposals(fpn_rois,
                             min_level,
                             max_level,
                             refer_level,
                             refer_scale,
3655
                             rois_num=None,
3656 3657
                             name=None):
    """
S
swtkiwi 已提交
3658
	
W
wangguanzhong 已提交
3659 3660 3661 3662 3663 3664
    **This op only takes LoDTensor as input.** In Feature Pyramid Networks 
    (FPN) models, it is needed to distribute all proposals into different FPN 
    level, with respect to scale of the proposals, the referring scale and the 
    referring level. Besides, to restore the order of proposals, we return an 
    array which indicates the original index of rois in current proposals. 
    To compute FPN level for each roi, the formula is given as follows:
3665
    
J
jerrywgz 已提交
3666
    .. math::
3667

J
jerrywgz 已提交
3668
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
3669

J
jerrywgz 已提交
3670 3671 3672
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)

    where BBoxArea is a function to compute the area of each roi.
3673 3674

    Args:
W
wangguanzhong 已提交
3675 3676 3677 3678 3679 3680 3681 3682 3683

        fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is 
            float32 or float64. The input fpn_rois.
        min_level(int32): The lowest level of FPN layer where the proposals come 
            from.
        max_level(int32): The highest level of FPN layer where the proposals
            come from.
        refer_level(int32): The referring level of FPN layer with specified scale.
        refer_scale(int32): The referring scale of FPN layer with specified level.
3684 3685 3686 3687 3688
        rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image. 
            The shape is [B] and data type is int32. B is the number of images.
            If it is not None then return a list of 1-D Tensor. Each element 
            is the output RoIs' number of each image on the corresponding level
            and the shape is [B]. None by default.
W
wangguanzhong 已提交
3689 3690 3691
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3692

3693
    Returns:
W
wangguanzhong 已提交
3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
        Tuple:

        multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4] 
        and data type of float32 and float64. The length is 
        max_level-min_level+1. The proposals in each FPN level.

        restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is 
        the number of total rois. The data type is int32. It is
        used to restore the order of fpn_rois.

3704 3705 3706 3707
        rois_num_per_level(List): A list of 1-D Tensor and each Tensor is 
        the RoIs' number in each image on the corresponding level. The shape 
        is [B] and data type of int32. B is the number of images

3708 3709 3710 3711

    Examples:
        .. code-block:: python

3712
            import paddle.fluid as fluid
3713 3714
            import paddle
            paddle.enable_static()
3715 3716
            fpn_rois = fluid.data(
                name='data', shape=[None, 4], dtype='float32', lod_level=1)
3717
            multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
3718 3719 3720
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
3721 3722 3723
                refer_level=4,
                refer_scale=224)
    """
3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
    num_lvl = max_level - min_level + 1

    if in_dygraph_mode():
        assert rois_num is not None, "rois_num should not be None in dygraph mode."
        attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',
                 refer_level, 'refer_scale', refer_scale)
        multi_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(
            fpn_rois, rois_num, num_lvl, num_lvl, *attrs)
        return multi_rois, restore_ind, rois_num_per_level

3734 3735
    check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
                             'distribute_fpn_proposals')
3736
    helper = LayerHelper('distribute_fpn_proposals', **locals())
3737
    dtype = helper.input_dtype('fpn_rois')
3738 3739 3740
    multi_rois = [
        helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
    ]
3741

3742
    restore_ind = helper.create_variable_for_type_inference(dtype='int32')
3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757

    inputs = {'FpnRois': fpn_rois}
    outputs = {
        'MultiFpnRois': multi_rois,
        'RestoreIndex': restore_ind,
    }

    if rois_num is not None:
        inputs['RoisNum'] = rois_num
        rois_num_per_level = [
            helper.create_variable_for_type_inference(dtype='int32')
            for i in range(num_lvl)
        ]
        outputs['MultiLevelRoIsNum'] = rois_num_per_level

3758 3759
    helper.append_op(
        type='distribute_fpn_proposals',
3760 3761
        inputs=inputs,
        outputs=outputs,
3762 3763 3764 3765 3766 3767
        attrs={
            'min_level': min_level,
            'max_level': max_level,
            'refer_level': refer_level,
            'refer_scale': refer_scale
        })
3768 3769
    if rois_num is not None:
        return multi_rois, restore_ind, rois_num_per_level
3770
    return multi_rois, restore_ind
3771 3772


3773
@templatedoc()
J
jerrywgz 已提交
3774 3775 3776 3777 3778 3779
def box_decoder_and_assign(prior_box,
                           prior_box_var,
                           target_box,
                           box_score,
                           box_clip,
                           name=None):
3780
    """
S
swtkiwi 已提交
3781
	
3782 3783 3784 3785 3786 3787
    ${comment}
    Args:
        prior_box(${prior_box_type}): ${prior_box_comment}
        prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
        target_box(${target_box_type}): ${target_box_comment}
        box_score(${box_score_type}): ${box_score_comment}
J
jerrywgz 已提交
3788
        box_clip(${box_clip_type}): ${box_clip_comment}
W
wangguanzhong 已提交
3789 3790 3791 3792
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

3793
    Returns:
W
wangguanzhong 已提交
3794
        Tuple:
J
jerrywgz 已提交
3795

W
wangguanzhong 已提交
3796 3797 3798
        decode_box(${decode_box_type}): ${decode_box_comment}

        output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
J
jerrywgz 已提交
3799 3800


3801 3802 3803
    Examples:
        .. code-block:: python

3804
            import paddle.fluid as fluid
3805 3806
            import paddle
            paddle.enable_static()
3807 3808 3809 3810 3811 3812 3813 3814
            pb = fluid.data(
                name='prior_box', shape=[None, 4], dtype='float32')
            pbv = fluid.data(
                name='prior_box_var', shape=[4], dtype='float32')
            loc = fluid.data(
                name='target_box', shape=[None, 4*81], dtype='float32')
            scores = fluid.data(
                name='scores', shape=[None, 81], dtype='float32')
J
jerrywgz 已提交
3815
            decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
J
jerrywgz 已提交
3816
                pb, pbv, loc, scores, 4.135)
3817 3818

    """
3819 3820 3821 3822 3823 3824
    check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
                             'box_decoder_and_assign')
    check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
                             'box_decoder_and_assign')
    check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
                             'box_decoder_and_assign')
3825 3826
    helper = LayerHelper("box_decoder_and_assign", **locals())

J
jerrywgz 已提交
3827
    decoded_box = helper.create_variable_for_type_inference(
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
        dtype=prior_box.dtype)
    output_assign_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)

    helper.append_op(
        type="box_decoder_and_assign",
        inputs={
            "PriorBox": prior_box,
            "PriorBoxVar": prior_box_var,
            "TargetBox": target_box,
            "BoxScore": box_score
        },
        attrs={"box_clip": box_clip},
        outputs={
J
jerrywgz 已提交
3842
            "DecodeBox": decoded_box,
3843 3844
            "OutputAssignBox": output_assign_box
        })
J
jerrywgz 已提交
3845
    return decoded_box, output_assign_box
3846 3847 3848 3849 3850 3851 3852


def collect_fpn_proposals(multi_rois,
                          multi_scores,
                          min_level,
                          max_level,
                          post_nms_top_n,
3853
                          rois_num_per_level=None,
3854 3855
                          name=None):
    """
S
swtkiwi 已提交
3856
	
W
wangguanzhong 已提交
3857 3858 3859
    **This OP only supports LoDTensor as input**. Concat multi-level RoIs 
    (Region of Interest) and select N RoIs with respect to multi_scores. 
    This operation performs the following steps:
3860 3861 3862 3863 3864 3865 3866 3867

    1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
    2. Concat multi-level RoIs and scores
    3. Sort scores and select post_nms_top_n scores
    4. Gather RoIs by selected indices from scores
    5. Re-sort RoIs by corresponding batch_id

    Args:
W
wangguanzhong 已提交
3868 3869 3870 3871 3872 3873
        multi_rois(list): List of RoIs to collect. Element in list is 2-D 
            LoDTensor with shape [N, 4] and data type is float32 or float64, 
            N is the number of RoIs.
        multi_scores(list): List of scores of RoIs to collect. Element in list 
            is 2-D LoDTensor with shape [N, 1] and data type is float32 or
            float64, N is the number of RoIs.
3874 3875 3876
        min_level(int): The lowest level of FPN layer to collect
        max_level(int): The highest level of FPN layer to collect
        post_nms_top_n(int): The number of selected RoIs
3877 3878 3879 3880 3881 3882
        rois_num_per_level(list, optional): The List of RoIs' numbers. 
            Each element is 1-D Tensor which contains the RoIs' number of each 
            image on each level and the shape is [B] and data type is 
            int32, B is the number of images. If it is not None then return 
            a 1-D Tensor contains the output RoIs' number of each image and 
            the shape is [B]. Default: None
W
wangguanzhong 已提交
3883 3884 3885 3886
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.        

3887
    Returns:
W
wangguanzhong 已提交
3888 3889 3890 3891 3892
        Variable:

        fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is 
        float32 or float64. Selected RoIs. 

3893 3894 3895
        rois_num(Tensor): 1-D Tensor contains the RoIs's number of each 
        image. The shape is [B] and data type is int32. B is the number of 
        images. 
3896 3897 3898 3899

    Examples:
        .. code-block:: python
           
3900
            import paddle.fluid as fluid
3901 3902
            import paddle
            paddle.enable_static()
3903 3904 3905
            multi_rois = []
            multi_scores = []
            for i in range(4):
3906 3907
                multi_rois.append(fluid.data(
                    name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
3908
            for i in range(4):
3909 3910
                multi_scores.append(fluid.data(
                    name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
3911 3912 3913 3914 3915 3916 3917 3918

            fpn_rois = fluid.layers.collect_fpn_proposals(
                multi_rois=multi_rois, 
                multi_scores=multi_scores,
                min_level=2, 
                max_level=5, 
                post_nms_top_n=2000)
    """
3919 3920
    check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
    check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
3921 3922 3923 3924 3925 3926 3927 3928 3929 3930
    num_lvl = max_level - min_level + 1
    input_rois = multi_rois[:num_lvl]
    input_scores = multi_scores[:num_lvl]

    if in_dygraph_mode():
        assert rois_num_per_level is not None, "rois_num_per_level should not be None in dygraph mode."
        attrs = ('post_nms_topN', post_nms_top_n)
        output_rois, rois_num = core.ops.collect_fpn_proposals(
            input_rois, input_scores, rois_num_per_level, *attrs)

3931 3932
    helper = LayerHelper('collect_fpn_proposals', **locals())
    dtype = helper.input_dtype('multi_rois')
3933 3934
    check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
                'collect_fpn_proposals')
3935 3936
    output_rois = helper.create_variable_for_type_inference(dtype)
    output_rois.stop_gradient = True
3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947

    inputs = {
        'MultiLevelRois': input_rois,
        'MultiLevelScores': input_scores,
    }
    outputs = {'FpnRois': output_rois}
    if rois_num_per_level is not None:
        inputs['MultiLevelRoIsNum'] = rois_num_per_level
        rois_num = helper.create_variable_for_type_inference(dtype='int32')
        rois_num.stop_gradient = True
        outputs['RoisNum'] = rois_num
3948 3949
    helper.append_op(
        type='collect_fpn_proposals',
3950 3951
        inputs=inputs,
        outputs=outputs,
3952
        attrs={'post_nms_topN': post_nms_top_n})
3953 3954
    if rois_num_per_level is not None:
        return output_rois, rois_num
3955
    return output_rois