detection.py 150.4 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15 16 17
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

18 19
from __future__ import print_function

20 21
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
22
from ..layer_helper import LayerHelper
D
dengkaipeng 已提交
23
from ..framework import Variable
24
from .loss import softmax_with_cross_entropy
25 26
from . import tensor
from . import nn
27
from . import ops
M
minqiyang 已提交
28
from ... import compat as cpt
C
chengduoZH 已提交
29
import math
M
minqiyang 已提交
30
import six
31
import numpy
32
from functools import reduce
33
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
34

C
chengduoZH 已提交
35
__all__ = [
36 37 38 39 40 41 42 43
    'prior_box',
    'density_prior_box',
    'multi_box_head',
    'bipartite_match',
    'target_assign',
    'detection_output',
    'ssd_loss',
    'rpn_target_assign',
44
    'retinanet_target_assign',
45
    'sigmoid_focal_loss',
46 47 48 49
    'anchor_generator',
    'roi_perspective_transform',
    'generate_proposal_labels',
    'generate_proposals',
50
    'generate_mask_labels',
51 52 53 54
    'iou_similarity',
    'box_coder',
    'polygon_box_transform',
    'yolov3_loss',
D
dengkaipeng 已提交
55
    'yolo_box',
56
    'box_clip',
J
jerrywgz 已提交
57
    'multiclass_nms',
58
    'locality_aware_nms',
59
    'retinanet_detection_output',
60
    'distribute_fpn_proposals',
61
    'box_decoder_and_assign',
62
    'collect_fpn_proposals',
C
chengduoZH 已提交
63
]
64 65


66 67 68 69 70 71 72 73 74 75 76 77
def retinanet_target_assign(bbox_pred,
                            cls_logits,
                            anchor_box,
                            anchor_var,
                            gt_boxes,
                            gt_labels,
                            is_crowd,
                            im_info,
                            num_classes=1,
                            positive_overlap=0.5,
                            negative_overlap=0.4):
    """
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
    **Target Assign Layer for the detector RetinaNet.**

    This OP finds out positive and negative samples from all anchors
    for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
    and assigns target labels for classification along with target locations for
    regression to each sample, then takes out the part belonging to positive and
    negative samples from category prediction( :attr:`cls_logits`) and location
    prediction( :attr:`bbox_pred`) which belong to all anchors.

    The searching principles for positive and negative samples are as followed:

    1. Anchors are assigned to ground-truth boxes when it has the highest IoU
    overlap with a ground-truth box.

    2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
    higher than :attr:`positive_overlap` with any ground-truth box.

    3. Anchors are assigned to background when its IoU overlap is lower than
    :attr:`negative_overlap` for all ground-truth boxes.

    4. Anchors which do not meet the above conditions do not participate in
    the training process.

    Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
T
tianshuo78520a 已提交
102
    regression for each anchor, hence the target label for each positive(or negative)
103 104 105 106 107 108 109 110 111 112 113 114 115 116
    sample is a :math:`C`-vector and the target locations for each positive sample
    is a 4-vector. As for a positive sample, if the category of its assigned
    ground-truth box is class :math:`i`, the corresponding entry in its length
    :math:`C` label vector is set to 1 and all other entries is set to 0, its box
    regression targets are computed as the offset between itself and its assigned
    ground-truth box. As for a negative sample, all entries in its length :math:`C`
    label vector are set to 0 and box regression targets are omitted because
    negative samples do not participate in the training process of location
    regression.

    After the assignment, the part belonging to positive and negative samples is
    taken out from category prediction( :attr:`cls_logits` ), and the part
    belonging to positive samples is taken out from location
    prediction( :attr:`bbox_pred` ).
117 118

    Args:
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
        bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
            the predicted locations of all anchors. :math:`N` is the batch size( the
            number of images in a mini-batch), :math:`M` is the number of all anchors
            of one image, and each anchor has 4 coordinate values. The data type of
            :attr:`bbox_pred` is float32 or float64.
        cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
            the predicted categories of all anchors. :math:`N` is the batch size,
            :math:`M` is the number of all anchors of one image, and :math:`C` is
            the number of categories (**Notice: excluding background**). The data type
            of :attr:`cls_logits` is float32 or float64.
        anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
            the locations of all anchors. :math:`M` is the number of all anchors of
            one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
            :math:`[xmin, ymin]` is the left top coordinate of the anchor box,
            :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
            The data type of :attr:`anchor_box` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator` 
            for the generation of :attr:`anchor_box`.
        anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded 
            factors of anchor locations used in loss function. :math:`M` is number of
            all anchors of one image, each anchor possesses a 4-vector expanded factor.
            The data type of :attr:`anchor_var` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator`
            for the generation of :attr:`anchor_var`.
        gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
            locations of all ground-truth boxes. :math:`G` is the total number of
            all ground-truth boxes in a mini-batch, and each ground-truth box has 4
            coordinate values. The data type of :attr:`gt_boxes` is float32 or
            float64.
        gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
            categories of all ground-truth boxes, and the values are in the range of
            :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
            in a mini-batch, and each ground-truth box has one category. The data type
            of :attr:`gt_labels` is int32.
        is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
            indicates whether a ground-truth box is a crowd. If the value is 1, the
            corresponding box is a crowd, it is ignored during training. :math:`G` is
            the total number of all ground-truth boxes in a mini-batch. The data type
            of :attr:`is_crowd` is int32.
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
160
            information of each image is a 3-vector which are the height and width
161 162 163 164 165 166 167 168 169 170 171 172
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
        num_classes(int32): The number of categories for classification, the default
            value is 1.
        positive_overlap(float32): Minimum overlap required between an anchor
            and ground-truth box for the anchor to be a positive sample, the default
            value is 0.5.
        negative_overlap(float32): Maximum overlap allowed between an anchor
            and ground-truth box for the anchor to be a negative sample, the default
            value is 0.4. :attr:`negative_overlap` should be less than or equal to
            :attr:`positive_overlap`, if not, the actual value of
            :attr:`positive_overlap` is :attr:`negative_overlap`.
173 174

    Returns:
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
        A tuple with 6 Variables:
        
        **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
        category prediction belonging to positive and negative samples. :math:`F`
        is the number of positive samples in a mini-batch, :math:`B` is the number
        of negative samples, and :math:`C` is the number of categories
        (**Notice: excluding background**). The data type of :attr:`predict_scores`
        is float32 or float64.

        **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        location prediction belonging to positive samples. :math:`F` is the number
        of positive samples. :math:`F` is the number of positive samples, and each
        sample has 4 coordinate values. The data type of :attr:`predict_location`
        is float32 or float64.

        **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
        target labels for classification belonging to positive and negative
        samples. :math:`F` is the number of positive samples, :math:`B` is the
        number of negative, and each sample has one target category. The data type
        of :attr:`target_label` is int32.

        **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        target locations for box regression belonging to positive samples.
        :math:`F` is the number of positive samples, and each sample has 4
        coordinate values. The data type of :attr:`target_bbox` is float32 or
        float64.

        **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
        represents whether a positive sample is fake positive, if a positive
        sample is false positive, the corresponding entries in
        :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
        of total positive samples in a mini-batch, and each sample has 4
        coordinate values. The data type of :attr:`bbox_inside_weight` is float32
        or float64.

        **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
        of positive samples. :math:`N` is the batch size. **Notice: The number
        of positive samples is used as the denominator of later loss function,
        to avoid the condition that the denominator is zero, this OP has added 1
        to the actual number of positive samples of each image.** The data type of
        :attr:`fg_num` is int32.
216 217 218 219 220

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
          bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
                            dtype='float32')
          cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
                            dtype='float32')
          anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
                            dtype='float32')
          anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
                            dtype='float32')
          gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
                            dtype='float32')
          gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
                            dtype='float32')
          is_crowd = fluid.data(name='is_crowd', shape=[1],
                            dtype='float32')
          im_info = fluid.data(name='im_infoss', shape=[1, 3],
                            dtype='float32')
237
          score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
                fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
                anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)

    """

    helper = LayerHelper('retinanet_target_assign', **locals())
    # Assign target label to anchors
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    fg_num = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type="retinanet_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'GtLabels': gt_labels,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
            'ForegroundNumber': fg_num
        },
        attrs={
            'positive_overlap': positive_overlap,
            'negative_overlap': negative_overlap
        })

    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
    bbox_inside_weight.stop_gradient = True
    fg_num.stop_gradient = True

    cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)

    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num


290 291
def rpn_target_assign(bbox_pred,
                      cls_logits,
Y
Yuan Gao 已提交
292
                      anchor_box,
293
                      anchor_var,
294 295 296
                      gt_boxes,
                      is_crowd,
                      im_info,
Y
Yuan Gao 已提交
297
                      rpn_batch_size_per_im=256,
298 299
                      rpn_straddle_thresh=0.0,
                      rpn_fg_fraction=0.5,
Y
Yuan Gao 已提交
300
                      rpn_positive_overlap=0.7,
301 302
                      rpn_negative_overlap=0.3,
                      use_random=True):
Y
Yuan Gao 已提交
303
    """
H
haowang101779990 已提交
304
    **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
Y
Yuan Gao 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

    This layer can be, for given the  Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each each anchor, these target labels are used for
    train RPN. The classification targets is a binary class label (of being
    an object or not). Following the paper of Faster-RCNN, the positive labels
    are two kinds of anchors: (i) the anchor/anchors with the highest IoU
    overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
    higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
    that a single ground-truth box may assign positive labels to multiple
    anchors. A non-positive anchor is when its IoU ratio is lower than
    rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
    neither positive nor negative do not contribute to the training objective.
    The regression targets are the encoded ground-truth boxes associated with
    the positive anchors.

    Args:
322
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Y
Yuan Gao 已提交
323 324
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
325
            is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
326 327 328
        cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
            predicted confidence predictions. N is the batch size, 1 is the
            frontground and background sigmoid, M is number of bounding boxes.
329
            The data type can be float32 or float64.
Y
Yuan Gao 已提交
330 331 332 333 334
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
335
            coordinate of the anchor box. The data type can be float32 or float64.
336
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded 
337
            variances of anchors. The data type can be float32 or float64.
翟飞跃 已提交
338
        gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
Y
Yuan Gao 已提交
339
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
340
            bboxes of mini-batch input. The data type can be float32 or float64.
341
        is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
342
                             The data type must be int32.
343 344
        im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
        3 is the height, width and scale.
Y
Yuan Gao 已提交
345
        rpn_batch_size_per_im(int): Total number of RPN examples per image.
346
                                    The data type must be int32.
347
        rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
348
            by straddle_thresh pixels. The data type must be float32.
349
        rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
350
            foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
Y
Yuan Gao 已提交
351 352
        rpn_positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
353
            example. The data type must be float32.
Y
Yuan Gao 已提交
354 355
        rpn_negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
356
            examples. The data type must be float32.
Y
Yuan Gao 已提交
357 358

    Returns:
M
minqiyang 已提交
359
        tuple:
360 361 362 363 364 365 366 367 368 369 370 371 372
        A tuple(predicted_scores, predicted_location, target_label,
        target_bbox, bbox_inside_weight) is returned. The predicted_scores 
        and predicted_location is the predicted result of the RPN.
        The target_label and target_bbox is the ground truth,
        respectively. The predicted_location is a 2D Tensor with shape
        [F, 4], and the shape of target_bbox is same as the shape of
        the predicted_location, F is the number of the foreground
        anchors. The predicted_scores is a 2D Tensor with shape
        [F + B, 1], and the shape of target_label is same as the shape
        of the predicted_scores, B is the number of the background
        anchors, the F and B is depends on the input of this operator.
        Bbox_inside_weight represents whether the predicted loc is fake_fg
        or not and the shape is [F, 4].
Y
Yuan Gao 已提交
373 374 375 376

    Examples:
        .. code-block:: python

B
Bai Yifan 已提交
377
            import paddle.fluid as fluid
378 379 380 381 382 383 384
            bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
            cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
            anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
            anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
            im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
385 386
            loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
                bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
H
haowang101779990 已提交
387

Y
Yuan Gao 已提交
388 389 390
    """

    helper = LayerHelper('rpn_target_assign', **locals())
391
    # Assign target label to anchors
J
jerrywgz 已提交
392 393 394 395 396 397 398
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
Y
Yuan Gao 已提交
399 400
    helper.append_op(
        type="rpn_target_assign",
401 402 403 404 405 406
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
Y
Yuan Gao 已提交
407 408 409
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
410
            'TargetLabel': target_label,
J
jerrywgz 已提交
411
            'TargetBBox': target_bbox,
J
jerrywgz 已提交
412
            'BBoxInsideWeight': bbox_inside_weight
Y
Yuan Gao 已提交
413 414 415
        },
        attrs={
            'rpn_batch_size_per_im': rpn_batch_size_per_im,
416
            'rpn_straddle_thresh': rpn_straddle_thresh,
Y
Yuan Gao 已提交
417 418
            'rpn_positive_overlap': rpn_positive_overlap,
            'rpn_negative_overlap': rpn_negative_overlap,
419 420
            'rpn_fg_fraction': rpn_fg_fraction,
            'use_random': use_random
Y
Yuan Gao 已提交
421 422
        })

423 424 425 426
    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
J
jerrywgz 已提交
427
    bbox_inside_weight.stop_gradient = True
Y
Yuan Gao 已提交
428

429 430 431 432
    cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
433

J
jerrywgz 已提交
434
    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
Y
Yuan Gao 已提交
435 436


437 438 439 440
def sigmoid_focal_loss(x, label, fg_num, gamma=2, alpha=0.25):
    """
    **Sigmoid Focal Loss Operator.**

441 442 443 444 445
    `Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
    class imbalance existed on the training phase of many computer vision tasks. This OP computes
    the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
    measured between the sigmoid value and target label. 

446 447 448
    The focal loss is given as followed:

    .. math::
449 450 451 452 453 454 455
  
        \\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
        \\begin{array}{rcl}
        - \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
        - \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
        \\end{array} \\right.

456 457 458 459 460 461 462

    We know that
    
    .. math::
        \\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}


463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
    Args:
        x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
            all samples. :math:`N` is the number of all samples responsible for optimization in
            a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
            is the total number of positive and negative samples in a mini-batch; Samples are images
            for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
            is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
            float32 or float64.
        label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
            classification. :math:`N` is the number of all samples responsible for optimization in a
            mini-batch, each sample has one target category. The values for positive samples are in the
            range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
            is int32.
        fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
            mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
478 479 480 481 482 483
        gamma(float): Hyper-parameter to balance the easy and hard examples. Default value is
            set to 2.0.
        alpha(float): Hyper-parameter to balance the positive and negative example. Default value
            is set to 0.25.

    Returns:
484 485 486
        Variable(the data type is float32 or float64): 
            A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
            tensor :attr:`x`.
487 488 489 490 491 492

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

493 494 495
            input = fluid.data(name='data', shape=[10,80], dtype='float32')
            label = fluid.data(name='label', shape=[10,1], dtype='int32')
            fg_num = fluid.data(name='fg_num', shape=[1], dtype='int32')
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
            loss = fluid.layers.sigmoid_focal_loss(x=input,
                                                   label=label,
                                                   fg_num=fg_num,
                                                   gamma=2.,
                                                   alpha=0.25)
    """

    helper = LayerHelper("sigmoid_focal_loss", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="sigmoid_focal_loss",
        inputs={"X": x,
                "Label": label,
                "FgNum": fg_num},
        attrs={"gamma": gamma,
               'alpha': alpha},
        outputs={"Out": out})
    return out


Y
Yuan Gao 已提交
518 519
def detection_output(loc,
                     scores,
520 521 522 523 524 525 526
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
527 528
                     nms_eta=1.0,
                     return_index=False):
529
    """
Q
qingqing01 已提交
530 531
    Given the regression locations, classification confidences and prior boxes,
    calculate the detection outputs by performing following steps:
532

Q
qingqing01 已提交
533 534
    1. Decode input bounding box predictions according to the prior boxes and
       regression locations.
535 536 537 538 539
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.
540 541 542

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Q
qingqing01 已提交
543 544
            predicted locations of M bounding bboxes. Data type should be
            float32 or float64. N is the batch size,
545 546
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
Y
Yuan Gao 已提交
547
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
Q
qingqing01 已提交
548 549 550
            predicted confidence predictions. Data type should be float32
            or float64. N is the batch size, C is the
            class number, M is number of bounding boxes.
551
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
Q
qingqing01 已提交
552 553
            each box is represented as [xmin, ymin, xmax, ymax]. Data type
            should be float32 or float64.
554
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
Q
qingqing01 已提交
555 556
            of variance. Data type should be float32 or float64.
        background_label(int): The index of background label,
557
            the background label will be ignored. If set to -1, then all
Q
qingqing01 已提交
558 559
            categories will be considered. Default: 0.
        nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
560
        nms_top_k(int): Maximum number of detections to be kept according
T
tianshuo78520a 已提交
561
            to the confidences after filtering detections based on
Q
qingqing01 已提交
562
            score_threshold and before NMS. Default: 400.
563
        keep_top_k(int): Number of total bboxes to be kept per image after
Q
qingqing01 已提交
564
            NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
565 566
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
Q
qingqing01 已提交
567 568 569
            Default: 0.01.
        nms_eta(float): The parameter for adaptive NMS. It works only when the
            value is less than 1.0. Default: 1.0.
570
        return_index(bool): Whether return selected index. Default: False
571 572

    Returns:
M
minqiyang 已提交
573

574 575 576
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, a tuple with one Variable(Out) is returned. 

Q
qingqing01 已提交
577 578 579 580 581 582 583 584 585 586 587 588
        Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
        Data type is the same as input (loc). Each row has six values:
        [label, confidence, xmin, ymin, xmax, ymax]. `No` is
        the total number of detections in this mini-batch. For each instance,
        the offsets in first dimension are called LoD, the offset number is
        N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
        detected results, if it is 0, the i-th image has no detected results.

        Index (Variable): Only return when return_index is True. A 2-D LoDTensor
        with shape [No, 1] represents the selected index which type is Integer.
        The index is the absolute value cross batches. No is the same number
        as Out. If the index is used to gather other attribute such as age,
589 590 591
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
        N is the batch size and M is the number of boxes.

592 593 594 595

    Examples:
        .. code-block:: python

596 597
            import paddle.fluid as fluid

Q
qingqing01 已提交
598 599 600 601
            pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
            pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
            loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
602
            nmsed_outs, index = fluid.layers.detection_output(scores=scores,
603 604
                                       loc=loc,
                                       prior_box=pb,
605 606
                                       prior_box_var=pbv,
                                       return_index=True)
607 608
    """
    helper = LayerHelper("detection_output", **locals())
609 610 611 612 613
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size')
614
    scores = nn.softmax(input=scores)
Y
Yuan Gao 已提交
615
    scores = nn.transpose(scores, perm=[0, 2, 1])
616
    scores.stop_gradient = True
X
Xin Pan 已提交
617 618
    nmsed_outs = helper.create_variable_for_type_inference(
        dtype=decoded_box.dtype)
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
    if return_index:
        index = helper.create_variable_for_type_inference(dtype='int')
        helper.append_op(
            type="multiclass_nms2",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs,
                     'Index': index},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
        index.stop_gradient = True
    else:
        helper.append_op(
            type="multiclass_nms",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
650
    nmsed_outs.stop_gradient = True
651 652
    if return_index:
        return nmsed_outs, index
653
    return nmsed_outs
C
chengduoZH 已提交
654 655


X
Xin Pan 已提交
656
@templatedoc()
657
def iou_similarity(x, y, box_normalized=True, name=None):
X
Xin Pan 已提交
658 659 660 661
    """
    ${comment}

    Args:
L
LielinJiang 已提交
662 663
        x (Variable): ${x_comment}.The data type is float32 or float64.
        y (Variable): ${y_comment}.The data type is float32 or float64.
T
tianshuo78520a 已提交
664
        box_normalized(bool): Whether treat the priorbox as a normalized box.
665
            Set true by default.
X
Xin Pan 已提交
666
    Returns:
L
LielinJiang 已提交
667
        Variable: ${out_comment}.The data type is same with x.
668 669 670 671

    Examples:
        .. code-block:: python

L
LielinJiang 已提交
672
            import numpy as np
673 674
            import paddle.fluid as fluid

L
LielinJiang 已提交
675 676 677 678 679 680
            use_gpu = False
            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)

            x = fluid.data(name='x', shape=[None, 4], dtype='float32')
            y = fluid.data(name='y', shape=[None, 4], dtype='float32')
681
            iou = fluid.layers.iou_similarity(x=x, y=y)
L
LielinJiang 已提交
682 683 684 685 686 687 688 689 690 691 692

            exe.run(fluid.default_startup_program())
            test_program = fluid.default_main_program().clone(for_test=True)

            [out_iou] = exe.run(test_program,
                    fetch_list=iou,
                    feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
                                         [0., 0., 1.0, 1.0]]).astype('float32'),
                          'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
            # out_iou is [[0.2857143],
            #             [0.       ]] with shape: [2, 1]
X
Xin Pan 已提交
693 694
    """
    helper = LayerHelper("iou_similarity", **locals())
695
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
696 697 698 699 700

    helper.append_op(
        type="iou_similarity",
        inputs={"X": x,
                "Y": y},
701
        attrs={"box_normalized": box_normalized},
X
Xin Pan 已提交
702 703 704 705 706 707 708 709 710 711
        outputs={"Out": out})
    return out


@templatedoc()
def box_coder(prior_box,
              prior_box_var,
              target_box,
              code_type="encode_center_size",
              box_normalized=True,
712 713
              name=None,
              axis=0):
X
Xin Pan 已提交
714
    """
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
    **Box Coder Layer**

    Encode/Decode the target bounding box with the priorbox information.
    
    The Encoding schema described below:

    .. math::

        ox = (tx - px) / pw / pxv

        oy = (ty - py) / ph / pyv

        ow = \log(\abs(tw / pw)) / pwv 

        oh = \log(\abs(th / ph)) / phv 

    The Decoding schema described below:
    
    .. math::
  
        ox = (pw * pxv * tx * + px) - tw / 2

        oy = (ph * pyv * ty * + py) - th / 2

        ow = \exp(pwv * tw) * pw + tw / 2

        oh = \exp(phv * th) * ph + th / 2   

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, 
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote 
    the priorbox's (anchor) center coordinates, width and height. `pxv`, 
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, 
    `ow`, `oh` denote the encoded/decoded coordinates, width and height. 

    During Box Decoding, two modes for broadcast are supported. Say target 
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or 
    [M, 4]. Then prior box will broadcast to target box along the 
    assigned axis. 
X
Xin Pan 已提交
753 754

    Args:
755
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape 
W
wangguanzhong 已提交
756 757 758 759 760 761 762 763 764 765
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the 
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system. 
            [xmax, ymax] is the right bottom coordinate of the anchor box.       
        prior_box_var(List|Variable|None): prior_box_var supports three types 
            of input. One is variable with shape [M, 4] which holds M group and 
            data type is float32 or float64. The second is list consist of 
            4 elements shared by all boxes and data type is float32 or float64. 
            Other is None and not involved in calculation. 
766
        target_box(Variable): This input can be a 2-D LoDTensor with shape 
W
wangguanzhong 已提交
767 768 769 770 771 772 773 774
            [N, 4] when code_type is 'encode_center_size'. This input also can 
            be a 3-D Tensor with shape [N, M, 4] when code_type is 
            'decode_center_size'. Each box is represented as 
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64. 
            This tensor can contain LoD information to represent a batch of inputs. 
        code_type(str): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size` 
            by default.
T
tianshuo78520a 已提交
775
        box_normalized(bool): Whether treat the priorbox as a normalized box.
W
wangguanzhong 已提交
776 777 778 779
            Set true by default.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
780
        axis(int): Which axis in PriorBox to broadcast for box decode, 
W
wangguanzhong 已提交
781 782 783 784
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and 
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is 
            `decode_center_size`. Set 0 by default. 
X
Xin Pan 已提交
785 786

    Returns:
W
wangguanzhong 已提交
787 788
        Variable:

789
        output_box(Variable): When code_type is 'encode_center_size', the 
W
wangguanzhong 已提交
790 791 792
        output tensor of box_coder_op with shape [N, M, 4] representing the 
        result of N target boxes encoded with M Prior boxes and variances. 
        When code_type is 'decode_center_size', N represents the batch size 
T
tianshuo78520a 已提交
793
        and M represents the number of decoded boxes.
794 795 796 797 798

    Examples:
 
        .. code-block:: python
 
799
            import paddle.fluid as fluid
W
wangguanzhong 已提交
800
            # For encode
801
            prior_box_encode = fluid.data(name='prior_box_encode',
W
wangguanzhong 已提交
802
                                  shape=[512, 4],
803 804 805 806
                                  dtype='float32')
            target_box_encode = fluid.data(name='target_box_encode',
                                   shape=[81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
807 808 809 810 811
            output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_encode,
                                    code_type="encode_center_size")
            # For decode
812
            prior_box_decode = fluid.data(name='prior_box_decode',
W
wangguanzhong 已提交
813
                                  shape=[512, 4],
814 815 816 817
                                  dtype='float32')
            target_box_decode = fluid.data(name='target_box_decode',
                                   shape=[512, 81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
818 819 820 821 822 823
            output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_decode,
                                    code_type="decode_center_size",
                                    box_normalized=False,
                                    axis=1)
X
Xin Pan 已提交
824 825 826
    """
    helper = LayerHelper("box_coder", **locals())

827 828
    output_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)
X
Xin Pan 已提交
829

830 831 832 833 834 835 836 837 838 839 840 841
    inputs = {"PriorBox": prior_box, "TargetBox": target_box}
    attrs = {
        "code_type": code_type,
        "box_normalized": box_normalized,
        "axis": axis
    }
    if isinstance(prior_box_var, Variable):
        inputs['PriorBoxVar'] = prior_box_var
    elif isinstance(prior_box_var, list):
        attrs['variance'] = prior_box_var
    else:
        raise TypeError("Input variance of box_coder must be Variable or lisz")
X
Xin Pan 已提交
842 843
    helper.append_op(
        type="box_coder",
844 845
        inputs=inputs,
        attrs=attrs,
X
Xin Pan 已提交
846 847 848 849 850 851 852 853 854 855
        outputs={"OutputBox": output_box})
    return output_box


@templatedoc()
def polygon_box_transform(input, name=None):
    """
    ${comment}

    Args:
856 857 858 859
        input(Variable): The input with shape [batch_size, geometry_channels, height, width].
                         A Tensor with type float32, float64.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
X
Xin Pan 已提交
860 861

    Returns:
862
        Variable: The output with the same shape as input. A Tensor with type float32, float64.
B
Bai Yifan 已提交
863 864 865 866 867

    Examples:
        .. code-block:: python
            
            import paddle.fluid as fluid
B
Bai Yifan 已提交
868
            input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
B
Bai Yifan 已提交
869
            out = fluid.layers.polygon_box_transform(input)
X
Xin Pan 已提交
870 871
    """
    helper = LayerHelper("polygon_box_transform", **locals())
872
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
Xin Pan 已提交
873 874 875 876 877 878 879 880 881

    helper.append_op(
        type="polygon_box_transform",
        inputs={"Input": input},
        attrs={},
        outputs={"Output": output})
    return output


D
dengkaipeng 已提交
882 883
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
884 885
                gt_box,
                gt_label,
D
dengkaipeng 已提交
886
                anchors,
887
                anchor_mask,
D
dengkaipeng 已提交
888 889
                class_num,
                ignore_thresh,
890
                downsample_ratio,
891
                gt_score=None,
D
dengkaipeng 已提交
892
                use_label_smooth=True,
D
dengkaipeng 已提交
893 894 895 896 897
                name=None):
    """
    ${comment}

    Args:
X
xiaoting 已提交
898
        x (Variable): ${x_comment}The data type is float32 or float64. 
899
        gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
T
tianshuo78520a 已提交
900 901
                          in the third dimension, x, y, w, h should be stored. 
                          x,y is the center coordinate of boxes, w, h are the
902 903
                          width and height, x, y, w, h should be divided by 
                          input image height to scale to [0, 1].
D
dengkaipeng 已提交
904
                          N is the batch number and B is the max box number in 
X
xiaoting 已提交
905
                          an image.The data type is float32 or float64. 
T
tianshuo78520a 已提交
906
        gt_label (Variable): class id of ground truth boxes, should be in shape
X
xiaoting 已提交
907
                            of [N, B].The data type is int32. 
D
dengkaipeng 已提交
908
        anchors (list|tuple): ${anchors_comment}
909
        anchor_mask (list|tuple): ${anchor_mask_comment}
D
dengkaipeng 已提交
910 911
        class_num (int): ${class_num_comment}
        ignore_thresh (float): ${ignore_thresh_comment}
912
        downsample_ratio (int): ${downsample_ratio_comment}
X
xiaoting 已提交
913 914 915
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
T
tianshuo78520a 已提交
916
        gt_score (Variable): mixup score of ground truth boxes, should be in shape
917
                            of [N, B]. Default None.
918
        use_label_smooth (bool): ${use_label_smooth_comment}
D
dengkaipeng 已提交
919 920

    Returns:
921
        Variable: A 1-D tensor with shape [N], the value of yolov3 loss
D
dengkaipeng 已提交
922 923 924

    Raises:
        TypeError: Input x of yolov3_loss must be Variable
D
dengkaipeng 已提交
925 926
        TypeError: Input gtbox of yolov3_loss must be Variable
        TypeError: Input gtlabel of yolov3_loss must be Variable
D
dengkaipeng 已提交
927
        TypeError: Input gtscore of yolov3_loss must be None or Variable
D
dengkaipeng 已提交
928 929 930
        TypeError: Attr anchors of yolov3_loss must be list or tuple
        TypeError: Attr class_num of yolov3_loss must be an integer
        TypeError: Attr ignore_thresh of yolov3_loss must be a float number
931
        TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
D
dengkaipeng 已提交
932 933

    Examples:
934 935
      .. code-block:: python

936
          import paddle.fluid as fluid
X
xiaoting 已提交
937 938 939 940
          x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
          gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
          gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
          gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
941 942
          anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
          anchor_mask = [0, 1, 2]
943 944
          loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
                                          gt_score=gt_score, anchors=anchors, 
945 946
                                          anchor_mask=anchor_mask, class_num=80,
                                          ignore_thresh=0.7, downsample_ratio=32)
D
dengkaipeng 已提交
947 948 949 950 951
    """
    helper = LayerHelper('yolov3_loss', **locals())

    if not isinstance(x, Variable):
        raise TypeError("Input x of yolov3_loss must be Variable")
952
    if not isinstance(gt_box, Variable):
D
dengkaipeng 已提交
953
        raise TypeError("Input gtbox of yolov3_loss must be Variable")
954
    if not isinstance(gt_label, Variable):
D
dengkaipeng 已提交
955
        raise TypeError("Input gtlabel of yolov3_loss must be Variable")
956
    if gt_score is not None and not isinstance(gt_score, Variable):
957
        raise TypeError("Input gtscore of yolov3_loss must be Variable")
D
dengkaipeng 已提交
958 959
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
        raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
960 961
    if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
        raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
D
dengkaipeng 已提交
962 963 964 965 966
    if not isinstance(class_num, int):
        raise TypeError("Attr class_num of yolov3_loss must be an integer")
    if not isinstance(ignore_thresh, float):
        raise TypeError(
            "Attr ignore_thresh of yolov3_loss must be a float number")
967 968 969
    if not isinstance(use_label_smooth, bool):
        raise TypeError(
            "Attr use_label_smooth of yolov3_loss must be a bool value")
D
dengkaipeng 已提交
970

971
    loss = helper.create_variable_for_type_inference(dtype=x.dtype)
D
dengkaipeng 已提交
972

973 974 975
    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

976 977
    inputs = {
        "X": x,
978 979
        "GTBox": gt_box,
        "GTLabel": gt_label,
980
    }
981
    if gt_score is not None:
982
        inputs["GTScore"] = gt_score
983

D
dengkaipeng 已提交
984 985
    attrs = {
        "anchors": anchors,
986
        "anchor_mask": anchor_mask,
D
dengkaipeng 已提交
987 988
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
989
        "downsample_ratio": downsample_ratio,
990
        "use_label_smooth": use_label_smooth,
D
dengkaipeng 已提交
991 992 993 994
    }

    helper.append_op(
        type='yolov3_loss',
995
        inputs=inputs,
996 997 998 999 1000
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask
        },
D
dengkaipeng 已提交
1001 1002 1003 1004
        attrs=attrs)
    return loss


D
dengkaipeng 已提交
1005
@templatedoc(op_type="yolo_box")
1006 1007 1008 1009 1010 1011
def yolo_box(x,
             img_size,
             anchors,
             class_num,
             conf_thresh,
             downsample_ratio,
1012
             clip_bbox=True,
1013
             name=None):
D
dengkaipeng 已提交
1014 1015 1016 1017
    """
    ${comment}

    Args:
X
xiaoting 已提交
1018 1019
        x (Variable): ${x_comment} The data type is float32 or float64. 
        img_size (Variable): ${img_size_comment} The data type is int32. 
D
dengkaipeng 已提交
1020 1021 1022 1023
        anchors (list|tuple): ${anchors_comment}
        class_num (int): ${class_num_comment}
        conf_thresh (float): ${conf_thresh_comment}
        downsample_ratio (int): ${downsample_ratio_comment}
1024
        clip_bbox (bool): ${clip_bbox_comment}
X
xiaoting 已提交
1025 1026 1027
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
D
dengkaipeng 已提交
1028 1029

    Returns:
D
dengkaipeng 已提交
1030
        Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
D
dengkaipeng 已提交
1031 1032
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification 
        scores of boxes.
D
dengkaipeng 已提交
1033 1034 1035 1036 1037 1038 1039 1040

    Raises:
        TypeError: Input x of yolov_box must be Variable
        TypeError: Attr anchors of yolo box must be list or tuple
        TypeError: Attr class_num of yolo box must be an integer
        TypeError: Attr conf_thresh of yolo box must be a float number

    Examples:
D
dengkaipeng 已提交
1041

D
dengkaipeng 已提交
1042 1043
    .. code-block:: python

X
xiaoting 已提交
1044
        import paddle.fluid as fluid
X
xiaoting 已提交
1045 1046
        x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
        img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
D
dengkaipeng 已提交
1047
        anchors = [10, 13, 16, 30, 33, 23]
X
xiaoting 已提交
1048
        boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors, 
D
dengkaipeng 已提交
1049 1050 1051 1052 1053
                                        conf_thresh=0.01, downsample_ratio=32)
    """
    helper = LayerHelper('yolo_box', **locals())

    if not isinstance(x, Variable):
1054 1055 1056
        raise TypeError("Input x of yolo_box must be Variable")
    if not isinstance(img_size, Variable):
        raise TypeError("Input img_size of yolo_box must be Variable")
D
dengkaipeng 已提交
1057
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
1058
        raise TypeError("Attr anchors of yolo_box must be list or tuple")
D
dengkaipeng 已提交
1059
    if not isinstance(class_num, int):
1060
        raise TypeError("Attr class_num of yolo_box must be an integer")
D
dengkaipeng 已提交
1061
    if not isinstance(conf_thresh, float):
1062
        raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
D
dengkaipeng 已提交
1063 1064 1065 1066 1067 1068 1069

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
D
dengkaipeng 已提交
1070
        "conf_thresh": conf_thresh,
D
dengkaipeng 已提交
1071
        "downsample_ratio": downsample_ratio,
1072
        "clip_bbox": clip_bbox,
D
dengkaipeng 已提交
1073 1074 1075 1076
    }

    helper.append_op(
        type='yolo_box',
1077 1078 1079 1080
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
D
dengkaipeng 已提交
1081 1082 1083 1084 1085 1086 1087 1088
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs)
    return boxes, scores


X
Xin Pan 已提交
1089
@templatedoc()
1090 1091
def detection_map(detect_res,
                  label,
1092 1093
                  class_num,
                  background_label=0,
1094 1095
                  overlap_threshold=0.3,
                  evaluate_difficult=True,
1096 1097 1098 1099
                  has_state=None,
                  input_states=None,
                  out_states=None,
                  ap_version='integral'):
X
Xin Pan 已提交
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
    """
    ${comment}

    Args:
        detect_res: ${detect_res_comment}
        label:  ${label_comment}
        class_num: ${class_num_comment}
        background_label: ${background_label_comment}
        overlap_threshold: ${overlap_threshold_comment}
        evaluate_difficult: ${evaluate_difficult_comment}
        has_state: ${has_state_comment}
1111 1112 1113 1114 1115 1116 1117 1118
        input_states: (tuple|None) If not None, It contains 3 elements:
            (1) pos_count ${pos_count_comment}.
            (2) true_pos ${true_pos_comment}.
            (3) false_pos ${false_pos_comment}.
        out_states: (tuple|None) If not None, it contains 3 elements.
            (1) accum_pos_count ${accum_pos_count_comment}.
            (2) accum_true_pos ${accum_true_pos_comment}.
            (3) accum_false_pos ${accum_false_pos_comment}.
X
Xin Pan 已提交
1119 1120 1121 1122 1123 1124 1125 1126 1127
        ap_version: ${ap_type_comment}

    Returns:
        ${map_comment}


    Examples:
          .. code-block:: python

1128
            import paddle.fluid as fluid
1129
            from fluid.layers import detection
1130
            detect_res = fluid.data(
X
Xin Pan 已提交
1131 1132 1133
                name='detect_res',
                shape=[10, 6],
                dtype='float32')
1134
            label = fluid.data(
X
Xin Pan 已提交
1135 1136 1137 1138
                name='label',
                shape=[10, 6],
                dtype='float32')

1139
            map_out = detection.detection_map(detect_res, label, 21)
X
Xin Pan 已提交
1140
    """
1141 1142
    helper = LayerHelper("detection_map", **locals())

1143
    def __create_var(type):
X
Xin Pan 已提交
1144
        return helper.create_variable_for_type_inference(dtype=type)
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

    map_out = __create_var('float32')
    accum_pos_count_out = out_states[0] if out_states else __create_var('int32')
    accum_true_pos_out = out_states[1] if out_states else __create_var(
        'float32')
    accum_false_pos_out = out_states[2] if out_states else __create_var(
        'float32')

    pos_count = input_states[0] if input_states else None
    true_pos = input_states[1] if input_states else None
    false_pos = input_states[2] if input_states else None

1157 1158 1159 1160 1161
    helper.append_op(
        type="detection_map",
        inputs={
            'Label': label,
            'DetectRes': detect_res,
1162
            'HasState': has_state,
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
            'PosCount': pos_count,
            'TruePos': true_pos,
            'FalsePos': false_pos
        },
        outputs={
            'MAP': map_out,
            'AccumPosCount': accum_pos_count_out,
            'AccumTruePos': accum_true_pos_out,
            'AccumFalsePos': accum_false_pos_out
        },
        attrs={
            'overlap_threshold': overlap_threshold,
            'evaluate_difficult': evaluate_difficult,
1176 1177
            'ap_type': ap_version,
            'class_num': class_num,
1178
        })
1179
    return map_out
1180 1181


1182 1183 1184 1185
def bipartite_match(dist_matrix,
                    match_type=None,
                    dist_threshold=None,
                    name=None):
1186
    """
Y
yuyang18 已提交
1187 1188
    This operator implements a greedy bipartite matching algorithm, which is
    used to obtain the matching with the maximum distance based on the input
1189
    distance matrix. For input 2D matrix, the bipartite matching algorithm can
Y
yuyang18 已提交
1190 1191 1192 1193
    find the matched column for each row (matched means the largest distance),
    also can find the matched row for each column. And this operator only
    calculate matched indices from column to row. For each instance,
    the number of matched indices is the column number of the input distance
W
wangguanzhong 已提交
1194
    matrix. **The OP only supports CPU**.
Y
yuyang18 已提交
1195 1196 1197

    There are two outputs, matched indices and distance.
    A simple description, this algorithm matched the best (maximum distance)
1198 1199 1200
    row entity to the column entity and the matched indices are not duplicated
    in each row of ColToRowMatchIndices. If the column entity is not matched
    any row entity, set -1 in ColToRowMatchIndices.
C
chengduoZH 已提交
1201

Y
yuyang18 已提交
1202
    NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1203 1204 1205
    If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
    If Tensor, the height of ColToRowMatchIndices is 1.

Y
yuyang18 已提交
1206 1207 1208
    NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
    layer. Please consider to use :code:`ssd_loss` instead.

1209 1210
    Args:
        dist_matrix(Variable): This input is a 2-D LoDTensor with shape
W
wangguanzhong 已提交
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
            [K, M]. The data type is float32 or float64. It is pair-wise 
            distance matrix between the entities represented by each row and 
            each column. For example, assumed one entity is A with shape [K], 
            another entity is B with shape [M]. The dist_matrix[i][j] is the 
            distance between A[i] and B[j]. The bigger the distance is, the 
            better matching the pairs are. NOTE: This tensor can contain LoD 
            information to represent a batch of inputs. One instance of this 
            batch can contain different numbers of entities.
        match_type(str, optional): The type of matching method, should be
           'bipartite' or 'per_prediction'. None ('bipartite') by default.
        dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1222
            this threshold is to determine the extra matching bboxes based
Y
yuyang18 已提交
1223
            on the maximum distance, 0.5 by default.
W
wangguanzhong 已提交
1224 1225 1226 1227
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.
 
1228
    Returns:
W
wangguanzhong 已提交
1229
        Tuple:
Y
yuyang18 已提交
1230

W
wangguanzhong 已提交
1231 1232
        matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
        type is int32. N is the batch size. If match_indices[i][j] is -1, it
Y
yuyang18 已提交
1233 1234 1235 1236 1237
        means B[j] does not match any entity in i-th instance.
        Otherwise, it means B[j] is matched to row
        match_indices[i][j] in i-th instance. The row number of
        i-th instance is saved in match_indices[i][j].

W
wangguanzhong 已提交
1238 1239
        matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
        type is float32. N is batch size. If match_indices[i][j] is -1,
Y
yuyang18 已提交
1240 1241 1242 1243 1244 1245 1246
        match_distance[i][j] is also -1.0. Otherwise, assumed
        match_distance[i][j] = d, and the row offsets of each instance
        are called LoD. Then match_distance[i][j] =
        dist_matrix[d+LoD[i]][j].

    Examples:

1247
        >>> import paddle.fluid as fluid
1248 1249
        >>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
        >>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
Y
yuyang18 已提交
1250 1251
        >>> iou = fluid.layers.iou_similarity(x=x, y=y)
        >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
1252 1253
    """
    helper = LayerHelper('bipartite_match', **locals())
X
Xin Pan 已提交
1254 1255 1256
    match_indices = helper.create_variable_for_type_inference(dtype='int32')
    match_distance = helper.create_variable_for_type_inference(
        dtype=dist_matrix.dtype)
1257 1258 1259
    helper.append_op(
        type='bipartite_match',
        inputs={'DistMat': dist_matrix},
1260 1261 1262 1263
        attrs={
            'match_type': match_type,
            'dist_threshold': dist_threshold,
        },
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
        outputs={
            'ColToRowMatchIndices': match_indices,
            'ColToRowMatchDist': match_distance
        })
    return match_indices, match_distance


def target_assign(input,
                  matched_indices,
                  negative_indices=None,
                  mismatch_value=None,
                  name=None):
    """
    This operator can be, for given the target bounding boxes or labels,
    to assign classification and regression targets to each prediction as well as
    weights to prediction. The weights is used to specify which prediction would
    not contribute to training loss.
C
chengduoZH 已提交
1281

1282 1283 1284 1285 1286
    For each instance, the output `out` and`out_weight` are assigned based on
    `match_indices` and `negative_indices`.
    Assumed that the row offset for each instance in `input` is called lod,
    this operator assigns classification/regression targets by performing the
    following steps:
C
chengduoZH 已提交
1287

1288
    1. Assigning all outputs based on `match_indices`:
C
chengduoZH 已提交
1289

1290 1291 1292
    .. code-block:: text

        If id = match_indices[i][j] > 0,
C
chengduoZH 已提交
1293

1294 1295
            out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
            out_weight[i][j] = 1.
C
chengduoZH 已提交
1296

1297
        Otherwise,
C
chengduoZH 已提交
1298

1299 1300
            out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][j] = 0.
C
chengduoZH 已提交
1301

Q
qingqing01 已提交
1302
    2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
C
chengduoZH 已提交
1303

Q
qingqing01 已提交
1304 1305
    Assumed that i-th instance in `neg_indices` is called `neg_indice`,
    for i-th instance:
M
minqiyang 已提交
1306

1307
    .. code-block:: text
C
chengduoZH 已提交
1308

Q
qingqing01 已提交
1309 1310 1311
        for id in neg_indice:
            out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][id] = 1.0
1312 1313

    Args:
Q
qingqing01 已提交
1314 1315 1316
       input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
           Data type should be int32 or float32.
       matched_indices (Variable): The input matched indices
1317 1318 1319
           is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
           the j-th entity of column is not matched to any entity of row in
           i-th instance.
Q
qingqing01 已提交
1320 1321
       negative_indices (Variable, optional): The input negative example indices
           are an optional input with shape [Neg, 1] and int32 type, where Neg is
1322
           the total number of negative example indices.
Q
qingqing01 已提交
1323 1324 1325 1326 1327
       mismatch_value (float32, optional): Fill this value to the mismatched
           location.
       name (string): The default value is None.  Normally there is no need for
           user to set this property.  For more information, please refer
           to :ref:`api_guide_Name`.
1328 1329

    Returns:
Q
qingqing01 已提交
1330 1331 1332 1333 1334 1335 1336 1337
        tuple: A tuple(out, out_weight) is returned.

        out (Variable): a 3D Tensor with shape [N, P, K] and same data type
        with `input`, N and P is the same as they are in `matched_indices`,
        K is the same as it in input of X.

        out_weight (Variable): the weight for output with the shape of [N, P, 1].
        Data type is float32.
1338 1339 1340 1341 1342

    Examples:

        .. code-block:: python

1343
            import paddle.fluid as fluid
Q
qingqing01 已提交
1344
            x = fluid.data(
1345 1346 1347
                name='x',
                shape=[4, 20, 4],
                dtype='float',
Q
qingqing01 已提交
1348 1349
                lod_level=1)
            matched_id = fluid.data(
1350 1351
                name='indices',
                shape=[8, 20],
Q
qingqing01 已提交
1352
                dtype='int32')
1353 1354 1355 1356
            trg, trg_weight = fluid.layers.target_assign(
                x,
                matched_id,
                mismatch_value=0)
1357 1358
    """
    helper = LayerHelper('target_assign', **locals())
X
Xin Pan 已提交
1359 1360
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_weight = helper.create_variable_for_type_inference(dtype='float32')
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
    helper.append_op(
        type='target_assign',
        inputs={
            'X': input,
            'MatchIndices': matched_indices,
            'NegIndices': negative_indices
        },
        outputs={'Out': out,
                 'OutWeight': out_weight},
        attrs={'mismatch_value': mismatch_value})
    return out, out_weight


def ssd_loss(location,
             confidence,
             gt_box,
             gt_label,
             prior_box,
             prior_box_var=None,
             background_label=0,
             overlap_threshold=0.5,
             neg_pos_ratio=3.0,
             neg_overlap=0.5,
             loc_loss_weight=1.0,
             conf_loss_weight=1.0,
             match_type='per_prediction',
             mining_type='max_negative',
1388
             normalize=True,
1389 1390
             sample_size=None):
    """
Y
yuyang18 已提交
1391
    **Multi-box loss layer for object detection algorithm of SSD**
1392

翟飞跃 已提交
1393 1394
    This layer is to compute detection loss for SSD given the location offset
    predictions, confidence predictions, prior boxes and ground-truth bounding
1395 1396 1397 1398
    boxes and labels, and the type of hard example mining. The returned loss
    is a weighted sum of the localization loss (or regression loss) and
    confidence loss (or classification loss) by performing the following steps:

Y
yuyang18 已提交
1399
    1. Find matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1400

1401
      1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
Y
yuyang18 已提交
1402

T
tianshuo78520a 已提交
1403
      1.2 Compute matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1404

1405
    2. Compute confidence for mining hard examples
Y
yuyang18 已提交
1406

1407
      2.1. Get the target label based on matched indices.
Y
yuyang18 已提交
1408

1409
      2.2. Compute confidence loss.
Y
yuyang18 已提交
1410

1411 1412
    3. Apply hard example mining to get the negative example indices and update
       the matched indices.
Y
yuyang18 已提交
1413

1414
    4. Assign classification and regression targets
Y
yuyang18 已提交
1415

1416
      4.1. Encoded bbox according to the prior boxes.
Y
yuyang18 已提交
1417

1418
      4.2. Assign regression targets.
Y
yuyang18 已提交
1419

1420
      4.3. Assign classification targets.
Y
yuyang18 已提交
1421

1422
    5. Compute the overall objective loss.
Y
yuyang18 已提交
1423

1424
      5.1 Compute confidence loss.
Y
yuyang18 已提交
1425

1426
      5.2 Compute localization loss.
Y
yuyang18 已提交
1427

1428 1429 1430 1431 1432 1433
      5.3 Compute the overall weighted loss.

    Args:
        location (Variable): The location predictions are a 3D Tensor with
            shape [N, Np, 4], N is the batch size, Np is total number of
            predictions for each instance. 4 is the number of coordinate values,
1434 1435
            the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
            float64.
1436 1437
        confidence (Variable): The confidence predictions are a 3D Tensor
            with shape [N, Np, C], N and Np are the same as they are in
1438 1439
            `location`, C is the class number.The data type is float32 or
            float64.
翟飞跃 已提交
1440
        gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
1441
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
1442
            bboxes of mini-batch input.The data type is float32 or float64.
1443
        gt_label (Variable): The ground-truth labels are a 2D LoDTensor
1444 1445 1446
            with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
            mini-batch input, 1 is the number of class. The data type is float32
            or float64.
1447
        prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
1448 1449
            Np and 4 are the same as they are in `location`. The data type is
            float32 or float64.
1450
        prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
1451
            with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
1452 1453
        background_label (int): The index of background label, 0 by default.
        overlap_threshold (float): If match_type is 'per_prediction', use
1454 1455
            'overlap_threshold' to determine the extra matching bboxes when finding \
            matched boxes. 0.5 by default.
1456
        neg_pos_ratio (float): The ratio of the negative boxes to the positive
翟飞跃 已提交
1457
            boxes, used only when mining_type is 'max_negative', 3.0 by default.
1458
        neg_overlap (float): The negative overlap upper bound for the unmatched
1459
            predictions. Use only when mining_type is 'max_negative',
1460 1461 1462 1463
            0.5 by default.
        loc_loss_weight (float): Weight for localization loss, 1.0 by default.
        conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
        match_type (str): The type of matching method during training, should
翟飞跃 已提交
1464
            be 'bipartite' or 'per_prediction', 'per_prediction' by default.
1465 1466
        mining_type (str): The hard example mining type, should be 'hard_example'
            or 'max_negative', now only support `max_negative`.
1467
        normalize (bool): Whether to normalize the SSD loss by the total number
Y
yuyang18 已提交
1468
            of output locations, True by default.
1469 1470
        sample_size (int): The max sample size of negative box, used only when
            mining_type is 'hard_example'.
1471 1472

    Returns:
1473 1474 1475
        Variable(Tensor):  The weighted sum of the localization loss and confidence loss, \
        with shape [N * Np, 1], N and Np are the same as they are in
        `location`.The data type is float32 or float64.
1476 1477

    Raises:
Y
yuyang18 已提交
1478 1479
        ValueError: If mining_type is 'hard_example', now only support mining \
        type of `max_negative`.
Y
yuyang18 已提交
1480 1481

    Examples:
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500

        .. code-block:: python

            import paddle.fluid as fluid
            pb = fluid.data(
                           name='prior_box',
                           shape=[10, 4],
                           dtype='float32')
            pbv = fluid.data(
                           name='prior_box_var',
                           shape=[10, 4],
                           dtype='float32')
            loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
            gt_box = fluid.data(
                 name='gt_box', shape=[4], lod_level=1, dtype='float32')
            gt_label = fluid.data(
                 name='gt_label', shape=[1], lod_level=1, dtype='float32')
            loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
1501 1502 1503 1504 1505 1506 1507
    """

    helper = LayerHelper('ssd_loss', **locals())
    if mining_type != 'max_negative':
        raise ValueError("Only support mining_type == max_negative now.")

    num, num_prior, num_class = confidence.shape
G
merge  
gongweibao 已提交
1508
    conf_shape = nn.shape(confidence)
1509 1510

    def __reshape_to_2d(var):
1511
        return nn.flatten(x=var, axis=2)
1512

T
tianshuo78520a 已提交
1513
    # 1. Find matched bounding box by prior box.
1514 1515
    #   1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
    iou = iou_similarity(x=gt_box, y=prior_box)
T
tianshuo78520a 已提交
1516
    #   1.2 Compute matched bounding box by bipartite matching algorithm.
1517 1518
    matched_indices, matched_dist = bipartite_match(iou, match_type,
                                                    overlap_threshold)
1519 1520 1521

    # 2. Compute confidence for mining hard examples
    # 2.1. Get the target label based on matched indices
1522 1523
    gt_label = nn.reshape(
        x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
1524
    gt_label.stop_gradient = True
1525 1526 1527 1528 1529 1530 1531
    target_label, _ = target_assign(
        gt_label, matched_indices, mismatch_value=background_label)
    # 2.2. Compute confidence loss.
    # Reshape confidence to 2D tensor.
    confidence = __reshape_to_2d(confidence)
    target_label = tensor.cast(x=target_label, dtype='int64')
    target_label = __reshape_to_2d(target_label)
1532
    target_label.stop_gradient = True
1533
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1534
    # 3. Mining hard examples
G
merge  
gongweibao 已提交
1535
    actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
1536
    actual_shape.stop_gradient = True
1537 1538
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1539
    conf_loss = nn.reshape(
1540
        x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
1541
    conf_loss.stop_gradient = True
X
Xin Pan 已提交
1542
    neg_indices = helper.create_variable_for_type_inference(dtype='int32')
1543
    dtype = matched_indices.dtype
X
Xin Pan 已提交
1544 1545
    updated_matched_indices = helper.create_variable_for_type_inference(
        dtype=dtype)
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
    helper.append_op(
        type='mine_hard_examples',
        inputs={
            'ClsLoss': conf_loss,
            'LocLoss': None,
            'MatchIndices': matched_indices,
            'MatchDist': matched_dist,
        },
        outputs={
            'NegIndices': neg_indices,
            'UpdatedMatchIndices': updated_matched_indices
        },
        attrs={
            'neg_pos_ratio': neg_pos_ratio,
B
Bai Yifan 已提交
1560
            'neg_dist_threshold': neg_overlap,
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
            'mining_type': mining_type,
            'sample_size': sample_size,
        })

    # 4. Assign classification and regression targets
    # 4.1. Encoded bbox according to the prior boxes.
    encoded_bbox = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=gt_box,
        code_type='encode_center_size')
    # 4.2. Assign regression targets
    target_bbox, target_loc_weight = target_assign(
        encoded_bbox, updated_matched_indices, mismatch_value=background_label)
    # 4.3. Assign classification targets
    target_label, target_conf_weight = target_assign(
        gt_label,
        updated_matched_indices,
        negative_indices=neg_indices,
        mismatch_value=background_label)

    # 5. Compute loss.
    # 5.1 Compute confidence loss.
    target_label = __reshape_to_2d(target_label)
    target_label = tensor.cast(x=target_label, dtype='int64')
1586

1587
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1588 1589 1590
    target_conf_weight = __reshape_to_2d(target_conf_weight)
    conf_loss = conf_loss * target_conf_weight

1591 1592 1593 1594
    # the target_label and target_conf_weight do not have gradient.
    target_label.stop_gradient = True
    target_conf_weight.stop_gradient = True

1595 1596 1597 1598 1599 1600 1601 1602
    # 5.2 Compute regression loss.
    location = __reshape_to_2d(location)
    target_bbox = __reshape_to_2d(target_bbox)

    loc_loss = nn.smooth_l1(location, target_bbox)
    target_loc_weight = __reshape_to_2d(target_loc_weight)
    loc_loss = loc_loss * target_loc_weight

1603 1604 1605 1606
    # the target_bbox and target_loc_weight do not have gradient.
    target_bbox.stop_gradient = True
    target_loc_weight.stop_gradient = True

1607 1608
    # 5.3 Compute overall weighted loss.
    loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
1609
    # reshape to [N, Np], N is the batch size and Np is the prior box number.
1610 1611 1612
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
    loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
1613 1614 1615 1616 1617
    loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
    if normalize:
        normalizer = nn.reduce_sum(target_loc_weight)
        loss = loss / normalizer

1618
    return loss
C
chengduoZH 已提交
1619 1620


1621 1622 1623 1624
def prior_box(input,
              image,
              min_sizes,
              max_sizes=None,
1625
              aspect_ratios=[1.],
1626 1627 1628 1629 1630
              variance=[0.1, 0.1, 0.2, 0.2],
              flip=False,
              clip=False,
              steps=[0.0, 0.0],
              offset=0.5,
1631 1632
              name=None,
              min_max_aspect_ratios_order=False):
1633
    """
R
ruri 已提交
1634
    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
1635 1636 1637 1638 1639
    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

R
ruri 已提交
1640
    Parameters:
T
tianshuo78520a 已提交
1641
       input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
R
ruri 已提交
1642 1643 1644 1645
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes(list|tuple|float): the min sizes of generated prior boxes.
       max_sizes(list|tuple|None): the max sizes of generated prior boxes.
1646
            Default: None.
R
ruri 已提交
1647
       aspect_ratios(list|tuple|float): the aspect ratios of generated
1648
            prior boxes. Default: [1.].
1649 1650 1651 1652
       variance(list|tuple): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1653
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1654 1655
            step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
            height or weight of the input will be automatically calculated.
1656
            Default: [0., 0.]
1657
       offset(float): Prior boxes center offset. Default: 0.5
1658
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1659
            in order of [min, max, aspect_ratios], which is consistent with
1660 1661 1662
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
R
ruri 已提交
1663
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1664 1665

    Returns:
R
ruri 已提交
1666
        Tuple: A tuple with two Variable (boxes, variances)
Q
update  
qiaolongfei 已提交
1667

R
ruri 已提交
1668 1669
        boxes(Variable): the output prior boxes of PriorBox.
	4-D tensor, the layout is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1670
        H is the height of input, W is the width of input,
R
ruri 已提交
1671
        num_priors is the total box count of each position of input.
Q
update  
qiaolongfei 已提交
1672

R
ruri 已提交
1673 1674
        variances(Variable): the expanded variances of PriorBox.
    	4-D tensor, the layput is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1675
        H is the height of input, W is the width of input
R
ruri 已提交
1676
        num_priors is the total box count of each position of input
1677 1678 1679

    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1680

R
ruri 已提交
1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
	    #declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
	    input = fluid.data(name="input", shape=[None,3,6,9])
	    image = fluid.data(name="image", shape=[None,3,9,12])
	    box, var = fluid.layers.prior_box(
                 input=input,
                 image=image,
		 min_sizes=[100.],
                 clip=True,
                 flip=True)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
 
	    # prepare a batch of data
	    input_data = np.random.rand(1,3,6,9).astype("float32")
	    image_data = np.random.rand(1,3,9,12).astype("float32")
 
	    box_out, var_out = exe.run(fluid.default_main_program(),
                feed={"input":input_data,"image":image_data},
                fetch_list=[box,var],
                return_numpy=True)
 
	    # print(box_out.shape)
	    # (6, 9, 1, 4)
	    # print(var_out.shape)
	    # (6, 9, 1, 4)

	    # imperative mode
	    import paddle.fluid.dygraph as dg

	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		image = dg.to_variable(image_data)
    		box, var = fluid.layers.prior_box(
		    input=input,
		    image=image,
		    min_sizes=[100.],
		    clip=True,
		    flip=True)
		# print(box.shape)
		# [6L, 9L, 1L, 4L]
                # print(var.shape)
		# [6L, 9L, 1L, 4L]

1728 1729 1730 1731
    """
    helper = LayerHelper("prior_box", **locals())
    dtype = helper.input_dtype()

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(min_sizes):
        min_sizes = [min_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    min_sizes = list(map(float, min_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    steps = list(map(float, steps))

1747 1748 1749 1750 1751 1752 1753 1754
    attrs = {
        'min_sizes': min_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'flip': flip,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
1755 1756
        'offset': offset,
        'min_max_aspect_ratios_order': min_max_aspect_ratios_order
1757 1758
    }
    if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
1759 1760
        if not _is_list_or_tuple_(max_sizes):
            max_sizes = [max_sizes]
1761 1762
        attrs['max_sizes'] = max_sizes

X
Xin Pan 已提交
1763 1764
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
    helper.append_op(
        type="prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


R
ruri 已提交
1777 1778 1779 1780 1781 1782 1783 1784 1785
def density_prior_box(input,
                      image,
                      densities=None,
                      fixed_sizes=None,
                      fixed_ratios=None,
                      variance=[0.1, 0.1, 0.2, 0.2],
                      clip=False,
                      steps=[0.0, 0.0],
                      offset=0.5,
1786
                      flatten_to_2d=False,
R
ruri 已提交
1787 1788 1789
                      name=None):
    """

R
ruri 已提交
1790
    This op generates density prior boxes for SSD(Single Shot MultiBox Detector) 
R
ruri 已提交
1791 1792 1793 1794 1795 1796
    algorithm. Each position of the input produce N prior boxes, N is 
    determined by the count of densities, fixed_sizes and fixed_ratios. 
    Boxes center at grid points around each input position is generated by 
    this operator, and the grid points is determined by densities and 
    the count of density prior box is determined by fixed_sizes and fixed_ratios. 
    Obviously, the number of fixed_sizes is equal to the number of densities.
R
ruri 已提交
1797
    
R
ruri 已提交
1798
    For densities_i in densities:
R
ruri 已提交
1799 1800
    
    .. math::
R
ruri 已提交
1801

R
ruri 已提交
1802 1803 1804 1805 1806 1807 1808
        N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)

    N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.

    Parameters:
       input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
R
ruri 已提交
1809
            the layout is NCHW.
R
ruri 已提交
1810
       densities(list|tuple|None): The densities of generated density prior 
R
ruri 已提交
1811 1812
            boxes, this attribute should be a list or tuple of integers. 
            Default: None.
R
ruri 已提交
1813
       fixed_sizes(list|tuple|None): The fixed sizes of generated density
R
ruri 已提交
1814 1815
            prior boxes, this attribute should a list or tuple of same 
            length with :attr:`densities`. Default: None.
R
ruri 已提交
1816
       fixed_ratios(list|tuple|None): The fixed ratios of generated density
R
ruri 已提交
1817 1818 1819
            prior boxes, if this attribute is not set and :attr:`densities`
            and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
            to generate density prior boxes.
R
ruri 已提交
1820
       variance(list|tuple): The variances to be encoded in density prior boxes.
R
ruri 已提交
1821
            Default:[0.1, 0.1, 0.2, 0.2].
R
ruri 已提交
1822
       clip(bool): Whether to clip out of boundary boxes. Default: False.
翟飞跃 已提交
1823
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1824 1825
            step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
            height or weight of the input will be automatically calculated.
R
ruri 已提交
1826 1827
            Default: [0., 0.]
       offset(float): Prior boxes center offset. Default: 0.5
1828 1829
       flatten_to_2d(bool): Whether to flatten output prior boxes and variance
           to 2D shape, the second dim is 4. Default: False.
R
ruri 已提交
1830 1831
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    
R
ruri 已提交
1832
    Returns:
R
ruri 已提交
1833
        Tuple: A tuple with two Variable (boxes, variances)
R
ruri 已提交
1834 1835

        boxes: the output density prior boxes of PriorBox.
R
ruri 已提交
1836 1837 1838
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1839 1840

        variances: the expanded variances of PriorBox.
R
ruri 已提交
1841 1842 1843
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1844 1845 1846


    Examples:
R
ruri 已提交
1847

R
ruri 已提交
1848 1849
        .. code-block:: python

R
ruri 已提交
1850
            #declarative mode
R
ruri 已提交
1851

R
ruri 已提交
1852 1853
            import paddle.fluid as fluid
            import numpy as np
R
ruri 已提交
1854

R
ruri 已提交
1855 1856 1857
            input = fluid.data(name="input", shape=[None,3,6,9])
            image = fluid.data(name="image", shape=[None,3,9,12])
            box, var = fluid.layers.density_prior_box(
R
ruri 已提交
1858 1859 1860 1861 1862 1863 1864 1865
                 input=input,
                 image=image,
                 densities=[4, 2, 1],
                 fixed_sizes=[32.0, 64.0, 128.0],
                 fixed_ratios=[1.],
                 clip=True,
                 flatten_to_2d=True)

R
ruri 已提交
1866 1867 1868
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
R
ruri 已提交
1869
 
R
ruri 已提交
1870 1871 1872 1873 1874 1875
            # prepare a batch of data
            input_data = np.random.rand(1,3,6,9).astype("float32")
            image_data = np.random.rand(1,3,9,12).astype("float32")

            box_out, var_out = exe.run(
                fluid.default_main_program(),
R
ruri 已提交
1876
                feed={"input":input_data,
R
ruri 已提交
1877
                      "image":image_data},
R
ruri 已提交
1878 1879 1880
                fetch_list=[box,var],
                return_numpy=True)

R
ruri 已提交
1881 1882 1883 1884
            # print(box_out.shape)
            # (1134, 4)
            # print(var_out.shape)
            # (1134, 4)
R
ruri 已提交
1885 1886


R
ruri 已提交
1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
            #imperative mode
            import paddle.fluid.dygraph as dg

            with dg.guard(place) as g:
                input = dg.to_variable(input_data)
                image = dg.to_variable(image_data)
                box, var = fluid.layers.density_prior_box(
                    input=input,
                    image=image,
                    densities=[4, 2, 1],
                    fixed_sizes=[32.0, 64.0, 128.0],
                    fixed_ratios=[1.],
                    clip=True)

                # print(box.shape)
                # [6L, 9L, 21L, 4L]
                # print(var.shape)
                # [6L, 9L, 21L, 4L]
R
ruri 已提交
1905

R
ruri 已提交
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
    """
    helper = LayerHelper("density_prior_box", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(densities):
        raise TypeError('densities should be a list or a tuple or None.')
    if not _is_list_or_tuple_(fixed_sizes):
        raise TypeError('fixed_sizes should be a list or a tuple or None.')
    if not _is_list_or_tuple_(fixed_ratios):
        raise TypeError('fixed_ratios should be a list or a tuple or None.')
    if len(densities) != len(fixed_sizes):
        raise ValueError('densities and fixed_sizes length should be euqal.')
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    densities = list(map(int, densities))
    fixed_sizes = list(map(float, fixed_sizes))
    fixed_ratios = list(map(float, fixed_ratios))
    steps = list(map(float, steps))

    attrs = {
        'variances': variance,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
        'offset': offset,
1936 1937 1938 1939
        'densities': densities,
        'fixed_sizes': fixed_sizes,
        'fixed_ratios': fixed_ratios,
        'flatten_to_2d': flatten_to_2d,
R
ruri 已提交
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
    }
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="density_prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


C
chengduoZH 已提交
1955
def multi_box_head(inputs,
C
chengduoZH 已提交
1956 1957
                   image,
                   base_size,
C
chengduoZH 已提交
1958
                   num_classes,
C
chengduoZH 已提交
1959
                   aspect_ratios,
1960 1961
                   min_ratio=None,
                   max_ratio=None,
C
chengduoZH 已提交
1962 1963
                   min_sizes=None,
                   max_sizes=None,
C
chengduoZH 已提交
1964 1965 1966 1967
                   steps=None,
                   step_w=None,
                   step_h=None,
                   offset=0.5,
1968 1969
                   variance=[0.1, 0.1, 0.2, 0.2],
                   flip=True,
C
chengduoZH 已提交
1970
                   clip=False,
C
chengduoZH 已提交
1971
                   kernel_size=1,
C
chengduoZH 已提交
1972
                   pad=0,
C
chengduoZH 已提交
1973
                   stride=1,
1974 1975
                   name=None,
                   min_max_aspect_ratios_order=False):
C
chengduoZH 已提交
1976
    """
Q
qingqing01 已提交
1977 1978 1979 1980
    Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
    regression location and classification confidence on multiple input feature
    maps, then output the concatenate results. The details of this algorithm,
    please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
C
chengduoZH 已提交
1981
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
1982 1983

    Args:
Q
qingqing01 已提交
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
       inputs (list(Variable)|tuple(Variable)): The list of input variables,
           the format of all Variables are 4-D Tensor, layout is NCHW.
           Data type should be float32 or float64.
       image (Variable): The input image, layout is NCHW. Data type should be
           the same as inputs.
       base_size(int): the base_size is input image size. When len(inputs) > 2
           and `min_size` and `max_size` are None, the `min_size` and `max_size`
           are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
           formula is as follows:

              ..  code-block:: text

                  min_sizes = []
                  max_sizes = []
                  step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
                  for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
                      min_sizes.append(base_size * ratio / 100.)
                      max_sizes.append(base_size * (ratio + step) / 100.)
                      min_sizes = [base_size * .10] + min_sizes
                      max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2005
       num_classes(int): The number of classes.
Q
qingqing01 已提交
2006 2007
       aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
           prior boxes. The length of input and aspect_ratios must be equal.
C
chengduoZH 已提交
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       steps(list|tuple): If step_w and step_h are the same,
            step_w and step_h can be replaced by steps.
       step_w(list|tuple): Prior boxes step
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically
            calculated. Default: None.
       step_h(list|tuple): Prior boxes step across height, If
            step_h[i] == 0.0, the prior boxes step across height of
            the inputs[i] will be automatically calculated. Default: None.
       offset(float): Prior boxes center offset. Default: 0.5
       variance(list|tuple): the variances to be encoded in prior boxes.
2027
            Default:[0.1, 0.1, 0.2, 0.2].
C
chengduoZH 已提交
2028 2029 2030 2031 2032
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
       kernel_size(int): The kernel size of conv2d. Default: 1.
       pad(int|list|tuple): The padding of conv2d. Default:0.
       stride(int|list|tuple): The stride of conv2d. Default:1,
Q
qingqing01 已提交
2033 2034 2035
       name(str): The default value is None.  Normally there is no need
           for user to set this property.  For more information, please
           refer to :ref:`api_guide_Name`.
2036
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
2037
            in order of [min, max, aspect_ratios], which is consistent with
2038
            Caffe. Please note, this order affects the weights order of
T
tianshuo78520a 已提交
2039
            convolution layer followed by and does not affect the final
2040
            detection results. Default: False.
C
chengduoZH 已提交
2041 2042

    Returns:
Q
update  
qiaolongfei 已提交
2043 2044
        tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)

Q
qingqing01 已提交
2045 2046 2047
        mbox_loc (Variable): The predicted boxes' location of the inputs. The
        layout is [N, num_priors, 4], where N is batch size, ``num_priors``
        is the number of prior boxes. Data type is the same as input.
Q
update  
qiaolongfei 已提交
2048

Q
qingqing01 已提交
2049 2050 2051 2052
        mbox_conf (Variable): The predicted boxes' confidence of the inputs.
        The layout is [N, num_priors, C], where ``N`` and ``num_priors`` 
        has the same meaning as above. C is the number of Classes.
        Data type is the same as input.
Q
update  
qiaolongfei 已提交
2053

Q
qingqing01 已提交
2054 2055 2056
        boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
        The meaning of num_priors is the same as above.
        Data type is the same as input.
C
chengduoZH 已提交
2057

Q
qingqing01 已提交
2058 2059
        variances (Variable): the expanded variances for prior boxes.
        The layout is [num_priors, 4]. Data type is the same as input.
C
chengduoZH 已提交
2060

Q
qingqing01 已提交
2061
    Examples 1: set min_ratio and max_ratio:
C
chengduoZH 已提交
2062
        .. code-block:: python
Q
update  
qiaolongfei 已提交
2063

2064 2065
          import paddle.fluid as fluid

Q
qingqing01 已提交
2066 2067 2068 2069 2070 2071 2072
          images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
2073

Q
update  
qiaolongfei 已提交
2074
          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
2075
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
C
chengduoZH 已提交
2076 2077 2078 2079 2080 2081 2082 2083 2084
            image=images,
            num_classes=21,
            min_ratio=20,
            max_ratio=90,
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)
Q
qingqing01 已提交
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110

    Examples 2: set min_sizes and max_sizes:
        .. code-block:: python

          import paddle.fluid as fluid

          images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')

          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
            image=images,
            num_classes=21,
            min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
            max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)

C
chengduoZH 已提交
2111 2112
    """

C
chengduoZH 已提交
2113
    def _reshape_with_axis_(input, axis=1):
2114
        out = nn.flatten(x=input, axis=axis)
C
chengduoZH 已提交
2115
        return out
2116

2117 2118
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))
2119

C
chengduoZH 已提交
2120 2121 2122 2123
    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

2124 2125
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
2126

C
chengduoZH 已提交
2127 2128 2129 2130 2131
    num_layer = len(inputs)

    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
2132
    elif min_sizes is None and max_sizes is None:
C
chengduoZH 已提交
2133 2134 2135
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
M
minqiyang 已提交
2136
        for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
C
chengduoZH 已提交
2137 2138 2139 2140 2141
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
    if aspect_ratios:
        _is_list_or_tuple_and_equal(
            aspect_ratios, num_layer,
            'aspect_ratios should be list or tuple, and the length of inputs '
            'and aspect_ratios should be the same.')
    if step_h:
        _is_list_or_tuple_and_equal(
            step_h, num_layer,
            'step_h should be list or tuple, and the length of inputs and '
            'step_h should be the same.')
    if step_w:
        _is_list_or_tuple_and_equal(
            step_w, num_layer,
            'step_w should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
    if steps:
        _is_list_or_tuple_and_equal(
            steps, num_layer,
            'steps should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
        step_w = steps
        step_h = steps

C
chengduoZH 已提交
2165 2166
    mbox_locs = []
    mbox_confs = []
C
chengduoZH 已提交
2167 2168
    box_results = []
    var_results = []
C
chengduoZH 已提交
2169 2170
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
C
chengduoZH 已提交
2171 2172
        max_size = max_sizes[i]

2173
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
2174
            min_size = [min_size]
C
chengduoZH 已提交
2175 2176
        if not _is_list_or_tuple_(max_size):
            max_size = [max_size]
C
chengduoZH 已提交
2177 2178 2179 2180

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
2181
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
2182
                aspect_ratio = [aspect_ratio]
2183
        step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
C
chengduoZH 已提交
2184

2185
        box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
2186 2187
                             variance, flip, clip, step, offset, None,
                             min_max_aspect_ratios_order)
C
chengduoZH 已提交
2188 2189 2190 2191 2192

        box_results.append(box)
        var_results.append(var)

        num_boxes = box.shape[2]
C
chengduoZH 已提交
2193

2194
        # get loc
Y
Yuan Gao 已提交
2195
        num_loc_output = num_boxes * 4
2196
        mbox_loc = nn.conv2d(
C
chengduoZH 已提交
2197
            input=input,
2198 2199 2200 2201 2202
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)

2203
        mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
2204
        mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
Y
Yuan Gao 已提交
2205
        mbox_locs.append(mbox_loc_flatten)
C
chengduoZH 已提交
2206

2207
        # get conf
C
chengduoZH 已提交
2208
        num_conf_output = num_boxes * num_classes
2209
        conf_loc = nn.conv2d(
C
chengduoZH 已提交
2210
            input=input,
2211 2212 2213 2214
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)
2215
        conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
2216
        conf_loc_flatten = nn.flatten(conf_loc, axis=1)
Y
Yuan Gao 已提交
2217
        mbox_confs.append(conf_loc_flatten)
C
chengduoZH 已提交
2218

C
chengduoZH 已提交
2219 2220 2221
    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
Y
Yuan Gao 已提交
2222 2223
        mbox_locs_concat = mbox_locs[0]
        mbox_confs_concat = mbox_confs[0]
C
chengduoZH 已提交
2224 2225 2226 2227 2228 2229 2230 2231 2232
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))

        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
Y
Yuan Gao 已提交
2233
        mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
2234
        mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
Y
Yuan Gao 已提交
2235
        mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
2236 2237
        mbox_confs_concat = nn.reshape(
            mbox_confs_concat, shape=[0, -1, num_classes])
C
chengduoZH 已提交
2238

2239 2240
    box.stop_gradient = True
    var.stop_gradient = True
Y
Yuan Gao 已提交
2241
    return mbox_locs_concat, mbox_confs_concat, box, var
2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259


def anchor_generator(input,
                     anchor_sizes=None,
                     aspect_ratios=None,
                     variance=[0.1, 0.1, 0.2, 0.2],
                     stride=None,
                     offset=0.5,
                     name=None):
    """
    **Anchor generator operator**

    Generate anchors for Faster RCNN algorithm.
    Each position of the input produce N anchors, N =
    size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
    is firstly aspect_ratios loop then anchor_sizes loop.

    Args:
W
wangguanzhong 已提交
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
       input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
       anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
          anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
          For instance, the anchor size of 64 means the area of this anchor 
          equals to 64**2. None by default.
       aspect_ratios(float32|list|tuple, optional): The height / width ratios 
           of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
       variance(list|tuple, optional): The variances to be used in box 
           regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by 
           default.
       stride(list|tuple, optional): The anchors stride across width and height.
           The data type is float32. e.g. [16.0, 16.0]. None by default.
       offset(float32, optional): Prior boxes center offset. 0.5 by default.
       name(str, optional): For detailed information, please refer 
           to :ref:`api_guide_Name`. Usually name is no need to set and None 
           by default. 
2276 2277

    Returns:
W
wangguanzhong 已提交
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
        Tuple:

        Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
        H is the height of input, W is the width of input,
        num_anchors is the box count of each position. 
        Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
 
        Variances(Variable): The expanded variances of anchors
        with a layout of [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_anchors is the box count of each position.
        Each variance is in (xcenter, ycenter, w, h) format.
2290 2291 2292 2293 2294 2295


    Examples:

        .. code-block:: python

2296
            import paddle.fluid as fluid
2297
            conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
J
jerrywgz 已提交
2298
            anchor, var = fluid.layers.anchor_generator(
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331
                input=conv1,
                anchor_sizes=[64, 128, 256, 512],
                aspect_ratios=[0.5, 1.0, 2.0],
                variance=[0.1, 0.1, 0.2, 0.2],
                stride=[16.0, 16.0],
                offset=0.5)
    """
    helper = LayerHelper("anchor_generator", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(anchor_sizes):
        anchor_sizes = [anchor_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(stride) and len(stride) == 2):
        raise ValueError('stride should be a list or tuple ',
                         'with length 2, (stride_width, stride_height).')

    anchor_sizes = list(map(float, anchor_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    stride = list(map(float, stride))

    attrs = {
        'anchor_sizes': anchor_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'stride': stride,
        'offset': offset
    }

X
Xin Pan 已提交
2332 2333
    anchor = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
2334 2335 2336 2337 2338 2339 2340 2341 2342
    helper.append_op(
        type="anchor_generator",
        inputs={"Input": input},
        outputs={"Anchors": anchor,
                 "Variances": var},
        attrs=attrs, )
    anchor.stop_gradient = True
    var.stop_gradient = True
    return anchor, var
2343 2344


W
whs 已提交
2345 2346 2347 2348
def roi_perspective_transform(input,
                              rois,
                              transformed_height,
                              transformed_width,
S
SunGaofeng 已提交
2349 2350
                              spatial_scale=1.0,
                              name=None):
W
whs 已提交
2351
    """
S
SunGaofeng 已提交
2352
    **The** `rois` **of this op should be a LoDTensor.**
W
whs 已提交
2353

S
SunGaofeng 已提交
2354 2355 2356 2357 2358
    ROI perspective transform op applies perspective transform to map each roi into an 
    rectangular region. Perspective transform is a type of transformation in linear algebra.

    Parameters:
        input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of 
W
whs 已提交
2359 2360
                          input tensor is NCHW. Where N is batch size, C is the
                          number of input channels, H is the height of the feature,
S
SunGaofeng 已提交
2361 2362 2363
                          and W is the width of the feature. The data type is float32.
        rois (Variable):  2-D LoDTensor, ROIs (Regions of Interest) to be transformed. 
                          It should be a 2-D LoDTensor of shape (num_rois, 8). Given as 
W
whs 已提交
2364 2365 2366
                          [[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the 
                          top left coordinates, and (x2, y2) is the top right 
                          coordinates, and (x3, y3) is the bottom right coordinates, 
S
SunGaofeng 已提交
2367 2368 2369 2370
                          and (x4, y4) is the bottom left coordinates. The data type is the
                          same as `input` 
        transformed_height (int): The height of transformed output.
        transformed_width (int): The width of transformed output.
W
whs 已提交
2371
        spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
S
SunGaofeng 已提交
2372 2373 2374
        name(str, optional): The default value is None.  
                             Normally there is no need for user to set this property.  
                             For more information, please refer to :ref:`api_guide_Name`
W
whs 已提交
2375 2376

    Returns:
S
SunGaofeng 已提交
2377
            A tuple with three Variables. (out, mask, transform_matrix)
2378 2379

            out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2380
            (num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
2381 2382

            mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2383
            (num_rois, 1, transformed_h, transformed_w). The data type is int32
2384 2385

            transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
S
SunGaofeng 已提交
2386 2387 2388 2389
            a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`

    Return Type:
        tuple
W
whs 已提交
2390 2391 2392 2393

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
2394
            import paddle.fluid as fluid
2395

S
SunGaofeng 已提交
2396 2397
            x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
2398
            out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
W
whs 已提交
2399 2400 2401
    """
    helper = LayerHelper('roi_perspective_transform', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2402
    out = helper.create_variable_for_type_inference(dtype)
2403 2404
    mask = helper.create_variable_for_type_inference(dtype="int32")
    transform_matrix = helper.create_variable_for_type_inference(dtype)
2405 2406
    out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
    out2in_w = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
2407 2408 2409 2410
    helper.append_op(
        type="roi_perspective_transform",
        inputs={"X": input,
                "ROIs": rois},
2411 2412 2413
        outputs={
            "Out": out,
            "Out2InIdx": out2in_idx,
2414 2415 2416
            "Out2InWeights": out2in_w,
            "Mask": mask,
            "TransformMatrix": transform_matrix
2417
        },
W
whs 已提交
2418 2419 2420 2421 2422
        attrs={
            "transformed_height": transformed_height,
            "transformed_width": transformed_width,
            "spatial_scale": spatial_scale
        })
2423
    return out, mask, transform_matrix
W
whs 已提交
2424 2425


2426 2427
def generate_proposal_labels(rpn_rois,
                             gt_classes,
2428
                             is_crowd,
2429
                             gt_boxes,
2430
                             im_info,
2431 2432 2433 2434 2435 2436
                             batch_size_per_im=256,
                             fg_fraction=0.25,
                             fg_thresh=0.25,
                             bg_thresh_hi=0.5,
                             bg_thresh_lo=0.0,
                             bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
2437
                             class_nums=None,
2438 2439 2440
                             use_random=True,
                             is_cls_agnostic=False,
                             is_cascade_rcnn=False):
2441
    """
2442
    **Generate Proposal Labels of Faster-RCNN**
2443

B
buxingyuan 已提交
2444
    This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
2445
    to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
2446 2447 2448

    RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
    were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
2449
    If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
2450 2451
    If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
    then it was considered as a background sample.
B
buxingyuan 已提交
2452
    After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
2453
    then we apply random sampling to make sure
B
buxingyuan 已提交
2454
    the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
2455 2456 2457 2458 2459

    For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
    Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.

    Args:
2460 2461 2462
        rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
        is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
B
buxingyuan 已提交
2463 2464 2465
        gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
        im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.

2466 2467 2468 2469 2470 2471 2472
        batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
        fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
        fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
        bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
        bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
        bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
        class_nums(int): Class number. The data type must be int32.
B
buxingyuan 已提交
2473
        use_random(bool): Use random sampling to choose foreground and background boxes.
2474 2475
        is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
        is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
B
Bai Yifan 已提交
2476

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
    Returns:
        tuple:
        A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.

        - **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
        - **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
        - **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
        - **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
        - **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.


B
Bai Yifan 已提交
2488 2489 2490 2491
    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
2492 2493 2494 2495 2496
            rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
            gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
2497
            rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
B
Bai Yifan 已提交
2498 2499 2500
                           rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
                           class_nums=10)

2501 2502 2503 2504
    """

    helper = LayerHelper('generate_proposal_labels', **locals())

X
Xin Pan 已提交
2505 2506 2507 2508 2509 2510 2511 2512 2513
    rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
    labels_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    bbox_targets = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_inside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_outside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
2514 2515 2516 2517 2518 2519

    helper.append_op(
        type="generate_proposal_labels",
        inputs={
            'RpnRois': rpn_rois,
            'GtClasses': gt_classes,
2520
            'IsCrowd': is_crowd,
2521
            'GtBoxes': gt_boxes,
2522
            'ImInfo': im_info
2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
        },
        outputs={
            'Rois': rois,
            'LabelsInt32': labels_int32,
            'BboxTargets': bbox_targets,
            'BboxInsideWeights': bbox_inside_weights,
            'BboxOutsideWeights': bbox_outside_weights
        },
        attrs={
            'batch_size_per_im': batch_size_per_im,
            'fg_fraction': fg_fraction,
            'fg_thresh': fg_thresh,
            'bg_thresh_hi': bg_thresh_hi,
            'bg_thresh_lo': bg_thresh_lo,
            'bbox_reg_weights': bbox_reg_weights,
2538
            'class_nums': class_nums,
2539 2540 2541
            'use_random': use_random,
            'is_cls_agnostic': is_cls_agnostic,
            'is_cascade_rcnn': is_cascade_rcnn
2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552
        })

    rois.stop_gradient = True
    labels_int32.stop_gradient = True
    bbox_targets.stop_gradient = True
    bbox_inside_weights.stop_gradient = True
    bbox_outside_weights.stop_gradient = True

    return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights


2553 2554 2555
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
                         labels_int32, num_classes, resolution):
    """
Q
qingqing01 已提交
2556
    **Generate Mask Labels for Mask-RCNN**
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591

    This operator can be, for given the RoIs and corresponding labels,
    to sample foreground RoIs. This mask branch also has
    a :math: `K \\times M^{2}` dimensional output targets for each foreground
    RoI, which encodes K binary masks of resolution M x M, one for each of the
    K classes. This mask targets are used to compute loss of mask branch.

    Please note, the data format of groud-truth segmentation, assumed the
    segmentations are as follows. The first instance has two gt objects.
    The second instance has one gt object, this object has two gt segmentations.

        .. code-block:: python

            #[
            #  [[[229.14, 370.9, 229.14, 370.9, ...]],
            #   [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
            #  [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
            #]

            batch_masks = []
            for semgs in batch_semgs:
                gt_masks = []
                for semg in semgs:
                    gt_segm = []
                    for polys in semg:
                        gt_segm.append(np.array(polys).reshape(-1, 2))
                    gt_masks.append(gt_segm)
                batch_masks.append(gt_masks)
            
            
            place = fluid.CPUPlace()
            feeder = fluid.DataFeeder(place=place, feed_list=feeds)
            feeder.feed(batch_masks)

    Args:
Q
qingqing01 已提交
2592 2593 2594 2595 2596 2597
        im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
            data type. N is the batch size, each element is
            [height, width, scale] of image. Image scale is
            target_size / original_size, target_size is the size after resize,
            original_size is the original image size.
        gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
T
tianshuo78520a 已提交
2598
            should be int. M is the total number of ground-truth, each
Q
qingqing01 已提交
2599 2600 2601 2602 2603 2604 2605
            element is a class label.
        is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
            as gt_classes, each element is a flag indicating whether a
            groundtruth is crowd.
        gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
            float32 data type, it's LoD level is 3.
            Usually users do not needs to understand LoD,
2606
            The users should return correct data format in reader.
Q
qingqing01 已提交
2607
            The LoD[0] represents the ground-truth objects number of
2608 2609 2610 2611
            each instance. LoD[1] represents the segmentation counts of each
            objects. LoD[2] represents the polygons number of each segmentation.
            S the total number of polygons coordinate points. Each element is
            (x, y) coordinate points.
Q
qingqing01 已提交
2612 2613 2614 2615
        rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
            float32. R is the total number of RoIs, each element is a bounding
            box with (xmin, ymin, xmax, ymax) format in the range of original image.
        labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
T
tianshuo78520a 已提交
2616
            of int32. R is the same as it in `rois`. Each element represents
2617
            a class label of a RoI.
Q
qingqing01 已提交
2618 2619
        num_classes (int): Class number.
        resolution (int): Resolution of mask predictions.
2620 2621

    Returns:
Q
qingqing01 已提交
2622 2623 2624
        mask_rois (Variable):  A 2D LoDTensor with shape [P, 4] and same data
        type as `rois`. P is the total number of sampled RoIs. Each element
        is a bounding box with [xmin, ymin, xmax, ymax] format in range of
T
tianshuo78520a 已提交
2625
        original image size.
Q
qingqing01 已提交
2626 2627

        mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
T
tianshuo78520a 已提交
2628
        and int data type, each element represents the output mask RoI
Q
qingqing01 已提交
2629 2630 2631 2632
        index with regard to input RoIs.

        mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
        data type, K is the classes number and M is the resolution of mask
T
tianshuo78520a 已提交
2633
        predictions. Each element represents the binary mask targets.
2634 2635 2636 2637

    Examples:
        .. code-block:: python

2638 2639
          import paddle.fluid as fluid

Q
qingqing01 已提交
2640
          im_info = fluid.data(name="im_info", shape=[None, 3],
2641
              dtype="float32")
Q
qingqing01 已提交
2642
          gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
2643
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2644
          is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
2645
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2646
          gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
2647
              dtype="float32", lod_level=3)
2648
          # rois, roi_labels can be the output of
2649
          # fluid.layers.generate_proposal_labels.
Q
qingqing01 已提交
2650
          rois = fluid.data(name="rois", shape=[None, 4],
2651
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2652
          roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
2653
              dtype="int32", lod_level=1)
2654 2655 2656 2657 2658 2659
          mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
              im_info=im_info,
              gt_classes=gt_classes,
              is_crowd=is_crowd,
              gt_segms=gt_masks,
              rois=rois,
2660
              labels_int32=roi_labels,
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
              num_classes=81,
              resolution=14)
    """

    helper = LayerHelper('generate_mask_labels', **locals())

    mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
    roi_has_mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)

    helper.append_op(
        type="generate_mask_labels",
        inputs={
            'ImInfo': im_info,
            'GtClasses': gt_classes,
            'IsCrowd': is_crowd,
            'GtSegms': gt_segms,
            'Rois': rois,
            'LabelsInt32': labels_int32
        },
        outputs={
            'MaskRois': mask_rois,
            'RoiHasMaskInt32': roi_has_mask_int32,
            'MaskInt32': mask_int32
        },
        attrs={'num_classes': num_classes,
               'resolution': resolution})

    mask_rois.stop_gradient = True
    roi_has_mask_int32.stop_gradient = True
    mask_int32.stop_gradient = True

    return mask_rois, roi_has_mask_int32, mask_int32


2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709
def generate_proposals(scores,
                       bbox_deltas,
                       im_info,
                       anchors,
                       variances,
                       pre_nms_top_n=6000,
                       post_nms_top_n=1000,
                       nms_thresh=0.5,
                       min_size=0.1,
                       eta=1.0,
                       name=None):
    """
H
haowang101779990 已提交
2710 2711
    **Generate proposal Faster-RCNN**

2712 2713 2714 2715
    This operation proposes RoIs according to each box with their
    probability to be a foreground object and 
    the box can be calculated by anchors. Bbox_deltais and scores
    to be an object are the output of RPN. Final proposals
H
haowang101779990 已提交
2716 2717 2718 2719
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

2720 2721
    1. Transposes and resizes scores and bbox_deltas in size of
       (H*W*A, 1) and (H*W*A, 4)
H
haowang101779990 已提交
2722 2723 2724 2725 2726 2727
    2. Calculate box locations as proposals candidates. 
    3. Clip boxes to image
    4. Remove predicted boxes with small area. 
    5. Apply NMS to get final proposals as output.

    Args:
2728 2729 2730
        scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
2731
            width of the feature map. The data type must be float32.
2732
        bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
T
tianshuo78520a 已提交
2733
            represents the difference between predicted box location and
2734
            anchor location. The data type must be float32.
2735
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
2736 2737
            image information for N batch. Height and width are the input sizes 
            and scale is the ratio of network input size and original size. 
2738
            The data type must be int32.
2739 2740 2741
        anchors(Variable):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
2742 2743
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
2744
            [H, W, num_priors, 4]. Each variance is in
2745
            (xcenter, ycenter, w, h) format. The data type must be float32.
2746
        pre_nms_top_n(float): Number of total bboxes to be kept per
2747
            image before NMS. The data type must be float32. `6000` by default.
2748
        post_nms_top_n(float): Number of total bboxes to be kept per
2749 2750
            image after NMS. The data type must be float32. `1000` by default.
        nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
2751
        min_size(float): Remove predicted boxes with either height or
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
            width < min_size. The data type must be float32. `0.1` by default.
        eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration.

    Returns:
        tuple:
        A tuple with format ``(rpn_rois, rpn_roi_probs)``.

        - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
B
Bai Yifan 已提交
2762 2763 2764 2765 2766

    Examples:
        .. code-block:: python
        
            import paddle.fluid as fluid
2767 2768 2769 2770 2771
            scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
            bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
            anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
            variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
B
Bai Yifan 已提交
2772 2773 2774
            rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
                         im_info, anchors, variances)

2775 2776 2777
    """
    helper = LayerHelper('generate_proposals', **locals())

X
Xin Pan 已提交
2778 2779 2780 2781
    rpn_rois = helper.create_variable_for_type_inference(
        dtype=bbox_deltas.dtype)
    rpn_roi_probs = helper.create_variable_for_type_inference(
        dtype=scores.dtype)
2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
    helper.append_op(
        type="generate_proposals",
        inputs={
            'Scores': scores,
            'BboxDeltas': bbox_deltas,
            'ImInfo': im_info,
            'Anchors': anchors,
            'Variances': variances
        },
        attrs={
            'pre_nms_topN': pre_nms_top_n,
            'post_nms_topN': post_nms_top_n,
            'nms_thresh': nms_thresh,
            'min_size': min_size,
            'eta': eta
        },
        outputs={'RpnRois': rpn_rois,
                 'RpnRoiProbs': rpn_roi_probs})
    rpn_rois.stop_gradient = True
    rpn_roi_probs.stop_gradient = True

    return rpn_rois, rpn_roi_probs
J
jerrywgz 已提交
2804 2805


J
jerrywgz 已提交
2806
def box_clip(input, im_info, name=None):
J
jerrywgz 已提交
2807 2808
    """
    Clip the box into the size given by im_info
J
jerrywgz 已提交
2809
    For each input box, The formula is given as follows:
2810 2811 2812
        
    .. code-block:: text

J
jerrywgz 已提交
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823
        xmin = max(min(xmin, im_w - 1), 0)
        ymin = max(min(ymin, im_h - 1), 0) 
        xmax = max(min(xmax, im_w - 1), 0)
        ymax = max(min(ymax, im_h - 1), 0)
    
    where im_w and im_h are computed from im_info:
 
    .. code-block:: text

        im_h = round(height / scale)
        im_w = round(weight / scale)
J
jerrywgz 已提交
2824 2825

    Args:
W
wangguanzhong 已提交
2826 2827 2828
        input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
            the last dimension is 4 and data type is float32 or float64.
        im_info(Variable): The 2-D Tensor with shape [N, 3] with layout 
T
tianshuo78520a 已提交
2829
            (height, width, scale) representing the information of image. 
2830
            Height and width are the input sizes and scale is the ratio of network input
W
wangguanzhong 已提交
2831 2832 2833 2834
            size and original size. The data type is float32 or float64.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
2835 2836
    
    Returns:
W
wangguanzhong 已提交
2837 2838
        Variable:

T
tianshuo78520a 已提交
2839
        output(Variable): The clipped tensor with data type float32 or float64. 
W
wangguanzhong 已提交
2840 2841
        The shape is same as input.

2842
        
J
jerrywgz 已提交
2843 2844
    Examples:
        .. code-block:: python
2845
        
2846
            import paddle.fluid as fluid
2847 2848 2849
            boxes = fluid.data(
                name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
            im_info = fluid.data(name='im_info', shape=[-1 ,3])
J
jerrywgz 已提交
2850
            out = fluid.layers.box_clip(
J
jerrywgz 已提交
2851
                input=boxes, im_info=im_info)
J
jerrywgz 已提交
2852 2853
    """

2854 2855 2856 2857
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'box_clip')

J
jerrywgz 已提交
2858
    helper = LayerHelper("box_clip", **locals())
J
jerrywgz 已提交
2859
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
2860
    inputs = {"Input": input, "ImInfo": im_info}
J
jerrywgz 已提交
2861
    helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
J
jerrywgz 已提交
2862

2863 2864
    return output

J
jerrywgz 已提交
2865

2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
def retinanet_detection_output(bboxes,
                               scores,
                               anchors,
                               im_info,
                               score_threshold=0.05,
                               nms_top_k=1000,
                               keep_top_k=100,
                               nms_threshold=0.3,
                               nms_eta=1.):
    """
2876
    **Detection Output Layer for the detector RetinaNet.**
2877

2878 2879 2880 2881
    In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many 
    `FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
    and location predictions, this OP is to get the detection results by
    performing following steps:
2882

2883 2884 2885
    1. For each FPN level, decode box predictions according to the anchor
       boxes from at most :attr:`nms_top_k` top-scoring predictions after
       thresholding detector confidence at :attr:`score_threshold`.
2886 2887 2888 2889
    2. Merge top predictions from all levels and apply multi-class non 
       maximum suppression (NMS) on them to get the final detections.

    Args:
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
        bboxes(List): A list of Tensors from multiple FPN levels represents
            the location prediction for all anchor boxes. Each element is
            a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
            batch size, :math:`Mi` is the number of bounding boxes from
            :math:`i`-th FPN level and each bounding box has four coordinate
            values and the layout is [xmin, ymin, xmax, ymax]. The data type
            of each element is float32 or float64.
        scores(List): A list of Tensors from multiple FPN levels represents
            the category prediction for all anchor boxes. Each element is a
            3-D Tensor with shape :math:`[N, Mi, C]`,  :math:`N` is the batch
            size, :math:`C` is the class number (**excluding background**),
            :math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
            level. The data type of each element is float32 or float64.
        anchors(List): A list of Tensors from multiple FPN levels represents
            the locations of all anchor boxes. Each element is a 2-D Tensor
            with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
            boxes from :math:`i`-th FPN level, and each bounding box has four
2907
            coordinate values and the layout is [xmin, ymin, xmax, ymax].
2908 2909 2910
            The data type of each element is float32 or float64.
        im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
2911
            information of each image is a 3-vector which are the height and width
2912 2913
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
2914
        score_threshold(float): Threshold to filter out bounding boxes
2915
            with a confidence score before NMS, default value is set to 0.05.
2916
        nms_top_k(int): Maximum number of detections per FPN layer to be
2917 2918
            kept according to the confidences before NMS, default value is set to
            1000.
2919
        keep_top_k(int): Number of total bounding boxes to be kept per image after
2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
            NMS step. Default value is set to 100, -1 means keeping all bounding
            boxes after NMS step.
        nms_threshold(float): The Intersection-over-Union(IoU) threshold used to 
            filter out boxes in NMS.
        nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
            Default value is set to 1., which represents the value of
            :attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
            to be lower than 1. and the value of :attr:`nms_threshold` is set to
            be higher than 0.5, everytime a bounding box is filtered out,
            the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
            = :attr:`nms_threshold` * :attr:`nms_eta`  will not be stopped until
            the actual value of :attr:`nms_threshold` is lower than or equal to
            0.5.

    **Notice**: In some cases where the image sizes are very small, it's possible
    that there is no detection if :attr:`score_threshold` are used at all
    levels. Hence, this OP do not filter out anchors from the highest FPN level
    before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
T
tianshuo78520a 已提交
2938
    :attr:`anchors` is required to be from the highest FPN level.
2939 2940

    Returns:
2941 2942
        Variable(The data type is float32 or float64):
            The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
2943
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
2944 2945 2946
            :math:`No` is the total number of detections in this mini-batch.
            The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
            results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
2947 2948 2949 2950 2951 2952
            has no detected results. If all images have no detected results,
            LoD will be set to 0, and the output tensor is empty (None).

    Examples:
        .. code-block:: python

2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
           import paddle.fluid as fluid

           bboxes_low = fluid.data(
               name='bboxes_low', shape=[1, 44, 4], dtype='float32')
           bboxes_high = fluid.data(
               name='bboxes_high', shape=[1, 11, 4], dtype='float32')
           scores_low = fluid.data(
               name='scores_low', shape=[1, 44, 10], dtype='float32')
           scores_high = fluid.data(
               name='scores_high', shape=[1, 11, 10], dtype='float32')
           anchors_low = fluid.data(
               name='anchors_low', shape=[44, 4], dtype='float32')
           anchors_high = fluid.data(
               name='anchors_high', shape=[11, 4], dtype='float32')
           im_info = fluid.data(
               name="im_info", shape=[1, 3], dtype='float32')
           nmsed_outs = fluid.layers.retinanet_detection_output(
                                          bboxes=[bboxes_low, bboxes_high],
                                          scores=[scores_low, scores_high],
                                          anchors=[anchors_low, anchors_high],
                                          im_info=im_info,
                                          score_threshold=0.05,
                                          nms_top_k=1000,
                                          keep_top_k=100,
                                          nms_threshold=0.45,
                                          nms_eta=1.)
2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
    """

    helper = LayerHelper('retinanet_detection_output', **locals())
    output = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('scores'))
    helper.append_op(
        type="retinanet_detection_output",
        inputs={
            'BBoxes': bboxes,
            'Scores': scores,
            'Anchors': anchors,
            'ImInfo': im_info
        },
        attrs={
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'keep_top_k': keep_top_k,
            'nms_eta': 1.,
        },
        outputs={'Out': output})
    output.stop_gradient = True
    return output


J
jerrywgz 已提交
3004 3005 3006 3007 3008
def multiclass_nms(bboxes,
                   scores,
                   score_threshold,
                   nms_top_k,
                   keep_top_k,
J
jerrywgz 已提交
3009
                   nms_threshold=0.3,
J
jerrywgz 已提交
3010 3011
                   normalized=True,
                   nms_eta=1.,
3012 3013
                   background_label=0,
                   name=None):
J
jerrywgz 已提交
3014
    """
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028
    **Multiclass NMS**
    
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
    See below for an example:

    .. code-block:: text

        if:
            box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
            box1.scores = (0.7, 0.2, 0.4)  which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)

            box2.data = (3.0, 4.0, 8.0, 5.0)
            box2.score = (0.3, 0.3, 0.1)

            nms_threshold = 0.3
            background_label = 0
            score_threshold = 0
3043

3044 3045 3046 3047 3048 3049 3050

        Then:
            iou = 4/11 > 0.3
            out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],    
                         [2, 0.4, 2.0, 3.0, 7.0, 5.0]]
                         
            Out format is (label, confidence, xmin, ymin, xmax, ymax)
3051 3052 3053 3054 3055 3056 3057 3058
    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is 
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
X
xiaoting 已提交
3059
                           The data type is float32 or float64.
3060 3061
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
                           M is the number of bounding boxes, C is the 
X
xiaoting 已提交
3062
                           class number. The data type is float32 or float64.   
3063 3064 3065 3066 3067 3068 3069
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is 
                           number of bounding boxes. For each category there 
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
X
xiaoting 已提交
3070
                           of BBoxes.The data type is float32 or float64. 
3071 3072 3073
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
X
xiaoting 已提交
3074
                           case with shape [M, C, 4].The data type is float32 or float64. 
3075 3076 3077 3078 3079 3080 3081
        background_label (int): The index of background label, the background 
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided, 
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3082
                         the confidences after the filtering detections based
3083 3084 3085 3086 3087 3088 3089 3090 3091
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
X
xiaoting 已提交
3092
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
3093 3094 3095 3096 3097
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values: 
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the 
             total number of detections. If there is no detected boxes for all
J
jerrywgz 已提交
3098 3099 3100 3101
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed 
             from {0} to {1}) 
3102

3103

3104 3105 3106
    Examples:
        .. code-block:: python

3107

3108
            import paddle.fluid as fluid
X
xiaoting 已提交
3109
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
3110
                                      dtype='float32', lod_level=1)
X
xiaoting 已提交
3111
            scores = fluid.data(name='scores', shape=[None,81],
3112 3113 3114 3115 3116 3117 3118 3119 3120
                                      dtype='float32', lod_level=1)
            out = fluid.layers.multiclass_nms(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
J
jerrywgz 已提交
3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
    """
    helper = LayerHelper('multiclass_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    helper.append_op(
        type="multiclass_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True
J
jerrywgz 已提交
3141 3142

    return output
3143 3144


3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192
def locality_aware_nms(bboxes,
                       scores,
                       score_threshold,
                       nms_top_k,
                       keep_top_k,
                       nms_threshold=0.3,
                       normalized=True,
                       nms_eta=1.,
                       background_label=-1,
                       name=None):
    """
    **Local Aware NMS**
    
    `Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
    suppression (LANMS) on boxes and scores.

    Firstly, this operator merge box and score according their IOU
    (intersection over union). In the NMS step, this operator greedily selects a
    subset of detection bounding boxes that have high scores larger than score_threshold,
    if providing this threshold, then selects the largest nms_top_k confidences scores
    if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
    IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
    of nms_threshold and nms_eta.

    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
                           represents the predicted locations of M bounding
                           bboxes, N is the batch size. Each bounding box
                           has four coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
                           predicted confidence predictions. N is the batch
                           size, C is the class number, M is number of bounding
                           boxes. Now only support 1 class. For each category
                           there are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension of
                           BBoxes. The data type is float32 or float64.
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: -1
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided,
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3193
                         the confidences after the filtering detections based
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
                          Default: None.

    Returns:
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values:
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
             total number of detections. If there is no detected boxes for all
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1}). The data type is float32 or float64.


    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
                                      dtype='float32')
            scores = fluid.data(name='scores', shape=[None, 1, 81],
                                      dtype='float32')
            out = fluid.layers.locality_aware_nms(bboxes=boxes,
                                              scores=scores,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
    """
    shape = scores.shape
    assert len(shape) == 3, "dim size of scores must be 3"
    assert shape[
        1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"

    helper = LayerHelper('locality_aware_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    out = {'Out': output}

    helper.append_op(
        type="locality_aware_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True

    return output


3263 3264 3265 3266 3267 3268 3269
def distribute_fpn_proposals(fpn_rois,
                             min_level,
                             max_level,
                             refer_level,
                             refer_scale,
                             name=None):
    """
W
wangguanzhong 已提交
3270 3271 3272 3273 3274 3275
    **This op only takes LoDTensor as input.** In Feature Pyramid Networks 
    (FPN) models, it is needed to distribute all proposals into different FPN 
    level, with respect to scale of the proposals, the referring scale and the 
    referring level. Besides, to restore the order of proposals, we return an 
    array which indicates the original index of rois in current proposals. 
    To compute FPN level for each roi, the formula is given as follows:
3276
    
J
jerrywgz 已提交
3277
    .. math::
3278

J
jerrywgz 已提交
3279
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
3280

J
jerrywgz 已提交
3281 3282 3283
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)

    where BBoxArea is a function to compute the area of each roi.
3284 3285

    Args:
W
wangguanzhong 已提交
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297

        fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is 
            float32 or float64. The input fpn_rois.
        min_level(int32): The lowest level of FPN layer where the proposals come 
            from.
        max_level(int32): The highest level of FPN layer where the proposals
            come from.
        refer_level(int32): The referring level of FPN layer with specified scale.
        refer_scale(int32): The referring scale of FPN layer with specified level.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3298

3299
    Returns:
W
wangguanzhong 已提交
3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
        Tuple:

        multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4] 
        and data type of float32 and float64. The length is 
        max_level-min_level+1. The proposals in each FPN level.

        restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is 
        the number of total rois. The data type is int32. It is
        used to restore the order of fpn_rois.

3310 3311 3312 3313

    Examples:
        .. code-block:: python

3314
            import paddle.fluid as fluid
3315 3316
            fpn_rois = fluid.data(
                name='data', shape=[None, 4], dtype='float32', lod_level=1)
3317
            multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
3318 3319 3320
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
3321 3322 3323 3324 3325
                refer_level=4,
                refer_scale=224)
    """

    helper = LayerHelper('distribute_fpn_proposals', **locals())
3326
    dtype = helper.input_dtype('fpn_rois')
3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343
    num_lvl = max_level - min_level + 1
    multi_rois = [
        helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
    ]
    restore_ind = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type='distribute_fpn_proposals',
        inputs={'FpnRois': fpn_rois},
        outputs={'MultiFpnRois': multi_rois,
                 'RestoreIndex': restore_ind},
        attrs={
            'min_level': min_level,
            'max_level': max_level,
            'refer_level': refer_level,
            'refer_scale': refer_scale
        })
    return multi_rois, restore_ind
3344 3345


3346
@templatedoc()
J
jerrywgz 已提交
3347 3348 3349 3350 3351 3352
def box_decoder_and_assign(prior_box,
                           prior_box_var,
                           target_box,
                           box_score,
                           box_clip,
                           name=None):
3353 3354 3355 3356 3357 3358 3359
    """
    ${comment}
    Args:
        prior_box(${prior_box_type}): ${prior_box_comment}
        prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
        target_box(${target_box_type}): ${target_box_comment}
        box_score(${box_score_type}): ${box_score_comment}
J
jerrywgz 已提交
3360
        box_clip(${box_clip_type}): ${box_clip_comment}
W
wangguanzhong 已提交
3361 3362 3363 3364
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

3365
    Returns:
W
wangguanzhong 已提交
3366
        Tuple:
J
jerrywgz 已提交
3367

W
wangguanzhong 已提交
3368 3369 3370
        decode_box(${decode_box_type}): ${decode_box_comment}

        output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
J
jerrywgz 已提交
3371 3372


3373 3374 3375
    Examples:
        .. code-block:: python

3376
            import paddle.fluid as fluid
3377 3378 3379 3380 3381 3382 3383 3384
            pb = fluid.data(
                name='prior_box', shape=[None, 4], dtype='float32')
            pbv = fluid.data(
                name='prior_box_var', shape=[4], dtype='float32')
            loc = fluid.data(
                name='target_box', shape=[None, 4*81], dtype='float32')
            scores = fluid.data(
                name='scores', shape=[None, 81], dtype='float32')
J
jerrywgz 已提交
3385
            decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
J
jerrywgz 已提交
3386
                pb, pbv, loc, scores, 4.135)
3387 3388 3389 3390

    """
    helper = LayerHelper("box_decoder_and_assign", **locals())

J
jerrywgz 已提交
3391
    decoded_box = helper.create_variable_for_type_inference(
3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
        dtype=prior_box.dtype)
    output_assign_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)

    helper.append_op(
        type="box_decoder_and_assign",
        inputs={
            "PriorBox": prior_box,
            "PriorBoxVar": prior_box_var,
            "TargetBox": target_box,
            "BoxScore": box_score
        },
        attrs={"box_clip": box_clip},
        outputs={
J
jerrywgz 已提交
3406
            "DecodeBox": decoded_box,
3407 3408
            "OutputAssignBox": output_assign_box
        })
J
jerrywgz 已提交
3409
    return decoded_box, output_assign_box
3410 3411 3412 3413 3414 3415 3416 3417 3418


def collect_fpn_proposals(multi_rois,
                          multi_scores,
                          min_level,
                          max_level,
                          post_nms_top_n,
                          name=None):
    """
W
wangguanzhong 已提交
3419 3420 3421
    **This OP only supports LoDTensor as input**. Concat multi-level RoIs 
    (Region of Interest) and select N RoIs with respect to multi_scores. 
    This operation performs the following steps:
3422 3423 3424 3425 3426 3427 3428 3429

    1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
    2. Concat multi-level RoIs and scores
    3. Sort scores and select post_nms_top_n scores
    4. Gather RoIs by selected indices from scores
    5. Re-sort RoIs by corresponding batch_id

    Args:
W
wangguanzhong 已提交
3430 3431 3432 3433 3434 3435
        multi_rois(list): List of RoIs to collect. Element in list is 2-D 
            LoDTensor with shape [N, 4] and data type is float32 or float64, 
            N is the number of RoIs.
        multi_scores(list): List of scores of RoIs to collect. Element in list 
            is 2-D LoDTensor with shape [N, 1] and data type is float32 or
            float64, N is the number of RoIs.
3436 3437 3438
        min_level(int): The lowest level of FPN layer to collect
        max_level(int): The highest level of FPN layer to collect
        post_nms_top_n(int): The number of selected RoIs
W
wangguanzhong 已提交
3439 3440 3441 3442
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.        

3443
    Returns:
W
wangguanzhong 已提交
3444 3445 3446 3447 3448
        Variable:

        fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is 
        float32 or float64. Selected RoIs. 

3449 3450 3451 3452

    Examples:
        .. code-block:: python
           
3453
            import paddle.fluid as fluid
3454 3455 3456
            multi_rois = []
            multi_scores = []
            for i in range(4):
3457 3458
                multi_rois.append(fluid.data(
                    name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
3459
            for i in range(4):
3460 3461
                multi_scores.append(fluid.data(
                    name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486

            fpn_rois = fluid.layers.collect_fpn_proposals(
                multi_rois=multi_rois, 
                multi_scores=multi_scores,
                min_level=2, 
                max_level=5, 
                post_nms_top_n=2000)
    """

    helper = LayerHelper('collect_fpn_proposals', **locals())
    dtype = helper.input_dtype('multi_rois')
    num_lvl = max_level - min_level + 1
    input_rois = multi_rois[:num_lvl]
    input_scores = multi_scores[:num_lvl]
    output_rois = helper.create_variable_for_type_inference(dtype)
    output_rois.stop_gradient = True
    helper.append_op(
        type='collect_fpn_proposals',
        inputs={
            'MultiLevelRois': input_rois,
            'MultiLevelScores': input_scores
        },
        outputs={'FpnRois': output_rois},
        attrs={'post_nms_topN': post_nms_top_n})
    return output_rois