detection.py 156.5 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15 16 17
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

18 19
from __future__ import print_function

20 21
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
22
from ..layer_helper import LayerHelper
D
dengkaipeng 已提交
23
from ..framework import Variable
24
from .loss import softmax_with_cross_entropy
25 26
from . import tensor
from . import nn
27
from . import ops
M
minqiyang 已提交
28
from ... import compat as cpt
29
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
C
chengduoZH 已提交
30
import math
M
minqiyang 已提交
31
import six
32
import numpy as np
33
from functools import reduce
34
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
35

C
chengduoZH 已提交
36
__all__ = [
37 38 39 40 41 42 43 44
    'prior_box',
    'density_prior_box',
    'multi_box_head',
    'bipartite_match',
    'target_assign',
    'detection_output',
    'ssd_loss',
    'rpn_target_assign',
45
    'retinanet_target_assign',
46
    'sigmoid_focal_loss',
47 48 49 50
    'anchor_generator',
    'roi_perspective_transform',
    'generate_proposal_labels',
    'generate_proposals',
51
    'generate_mask_labels',
52 53 54 55
    'iou_similarity',
    'box_coder',
    'polygon_box_transform',
    'yolov3_loss',
D
dengkaipeng 已提交
56
    'yolo_box',
57
    'box_clip',
J
jerrywgz 已提交
58
    'multiclass_nms',
59
    'locality_aware_nms',
60
    'retinanet_detection_output',
61
    'distribute_fpn_proposals',
62
    'box_decoder_and_assign',
63
    'collect_fpn_proposals',
C
chengduoZH 已提交
64
]
65 66


67 68 69 70 71 72 73 74 75 76 77 78
def retinanet_target_assign(bbox_pred,
                            cls_logits,
                            anchor_box,
                            anchor_var,
                            gt_boxes,
                            gt_labels,
                            is_crowd,
                            im_info,
                            num_classes=1,
                            positive_overlap=0.5,
                            negative_overlap=0.4):
    """
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
    **Target Assign Layer for the detector RetinaNet.**

    This OP finds out positive and negative samples from all anchors
    for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
    and assigns target labels for classification along with target locations for
    regression to each sample, then takes out the part belonging to positive and
    negative samples from category prediction( :attr:`cls_logits`) and location
    prediction( :attr:`bbox_pred`) which belong to all anchors.

    The searching principles for positive and negative samples are as followed:

    1. Anchors are assigned to ground-truth boxes when it has the highest IoU
    overlap with a ground-truth box.

    2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
    higher than :attr:`positive_overlap` with any ground-truth box.

    3. Anchors are assigned to background when its IoU overlap is lower than
    :attr:`negative_overlap` for all ground-truth boxes.

    4. Anchors which do not meet the above conditions do not participate in
    the training process.

    Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
T
tianshuo78520a 已提交
103
    regression for each anchor, hence the target label for each positive(or negative)
104 105 106 107 108 109 110 111 112 113 114 115 116 117
    sample is a :math:`C`-vector and the target locations for each positive sample
    is a 4-vector. As for a positive sample, if the category of its assigned
    ground-truth box is class :math:`i`, the corresponding entry in its length
    :math:`C` label vector is set to 1 and all other entries is set to 0, its box
    regression targets are computed as the offset between itself and its assigned
    ground-truth box. As for a negative sample, all entries in its length :math:`C`
    label vector are set to 0 and box regression targets are omitted because
    negative samples do not participate in the training process of location
    regression.

    After the assignment, the part belonging to positive and negative samples is
    taken out from category prediction( :attr:`cls_logits` ), and the part
    belonging to positive samples is taken out from location
    prediction( :attr:`bbox_pred` ).
118 119

    Args:
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
        bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
            the predicted locations of all anchors. :math:`N` is the batch size( the
            number of images in a mini-batch), :math:`M` is the number of all anchors
            of one image, and each anchor has 4 coordinate values. The data type of
            :attr:`bbox_pred` is float32 or float64.
        cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
            the predicted categories of all anchors. :math:`N` is the batch size,
            :math:`M` is the number of all anchors of one image, and :math:`C` is
            the number of categories (**Notice: excluding background**). The data type
            of :attr:`cls_logits` is float32 or float64.
        anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
            the locations of all anchors. :math:`M` is the number of all anchors of
            one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
            :math:`[xmin, ymin]` is the left top coordinate of the anchor box,
            :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
            The data type of :attr:`anchor_box` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator` 
            for the generation of :attr:`anchor_box`.
        anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded 
            factors of anchor locations used in loss function. :math:`M` is number of
            all anchors of one image, each anchor possesses a 4-vector expanded factor.
            The data type of :attr:`anchor_var` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator`
            for the generation of :attr:`anchor_var`.
        gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
            locations of all ground-truth boxes. :math:`G` is the total number of
            all ground-truth boxes in a mini-batch, and each ground-truth box has 4
            coordinate values. The data type of :attr:`gt_boxes` is float32 or
            float64.
        gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
            categories of all ground-truth boxes, and the values are in the range of
            :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
            in a mini-batch, and each ground-truth box has one category. The data type
            of :attr:`gt_labels` is int32.
        is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
            indicates whether a ground-truth box is a crowd. If the value is 1, the
            corresponding box is a crowd, it is ignored during training. :math:`G` is
            the total number of all ground-truth boxes in a mini-batch. The data type
            of :attr:`is_crowd` is int32.
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
161
            information of each image is a 3-vector which are the height and width
162 163 164 165 166 167 168 169 170 171 172 173
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
        num_classes(int32): The number of categories for classification, the default
            value is 1.
        positive_overlap(float32): Minimum overlap required between an anchor
            and ground-truth box for the anchor to be a positive sample, the default
            value is 0.5.
        negative_overlap(float32): Maximum overlap allowed between an anchor
            and ground-truth box for the anchor to be a negative sample, the default
            value is 0.4. :attr:`negative_overlap` should be less than or equal to
            :attr:`positive_overlap`, if not, the actual value of
            :attr:`positive_overlap` is :attr:`negative_overlap`.
174 175

    Returns:
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
        A tuple with 6 Variables:
        
        **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
        category prediction belonging to positive and negative samples. :math:`F`
        is the number of positive samples in a mini-batch, :math:`B` is the number
        of negative samples, and :math:`C` is the number of categories
        (**Notice: excluding background**). The data type of :attr:`predict_scores`
        is float32 or float64.

        **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        location prediction belonging to positive samples. :math:`F` is the number
        of positive samples. :math:`F` is the number of positive samples, and each
        sample has 4 coordinate values. The data type of :attr:`predict_location`
        is float32 or float64.

        **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
        target labels for classification belonging to positive and negative
        samples. :math:`F` is the number of positive samples, :math:`B` is the
        number of negative, and each sample has one target category. The data type
        of :attr:`target_label` is int32.

        **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        target locations for box regression belonging to positive samples.
        :math:`F` is the number of positive samples, and each sample has 4
        coordinate values. The data type of :attr:`target_bbox` is float32 or
        float64.

        **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
        represents whether a positive sample is fake positive, if a positive
        sample is false positive, the corresponding entries in
        :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
        of total positive samples in a mini-batch, and each sample has 4
        coordinate values. The data type of :attr:`bbox_inside_weight` is float32
        or float64.

        **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
        of positive samples. :math:`N` is the batch size. **Notice: The number
        of positive samples is used as the denominator of later loss function,
        to avoid the condition that the denominator is zero, this OP has added 1
        to the actual number of positive samples of each image.** The data type of
        :attr:`fg_num` is int32.
217 218 219 220 221

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
222 223 224 225 226 227 228 229 230 231 232 233 234 235
          bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
                            dtype='float32')
          cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
                            dtype='float32')
          anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
                            dtype='float32')
          anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
                            dtype='float32')
          gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
                            dtype='float32')
          gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
                            dtype='float32')
          is_crowd = fluid.data(name='is_crowd', shape=[1],
                            dtype='float32')
236
          im_info = fluid.data(name='im_info', shape=[1, 3],
237
                            dtype='float32')
238
          score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
239 240 241 242 243
                fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
                anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)

    """

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
    check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
                             'retinanet_target_assign')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'retinanet_target_assign')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'retinanet_target_assign')

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
    helper = LayerHelper('retinanet_target_assign', **locals())
    # Assign target label to anchors
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    fg_num = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type="retinanet_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'GtLabels': gt_labels,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
            'ForegroundNumber': fg_num
        },
        attrs={
            'positive_overlap': positive_overlap,
            'negative_overlap': negative_overlap
        })

    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
    bbox_inside_weight.stop_gradient = True
    fg_num.stop_gradient = True

    cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)

    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num


308 309
def rpn_target_assign(bbox_pred,
                      cls_logits,
Y
Yuan Gao 已提交
310
                      anchor_box,
311
                      anchor_var,
312 313 314
                      gt_boxes,
                      is_crowd,
                      im_info,
Y
Yuan Gao 已提交
315
                      rpn_batch_size_per_im=256,
316 317
                      rpn_straddle_thresh=0.0,
                      rpn_fg_fraction=0.5,
Y
Yuan Gao 已提交
318
                      rpn_positive_overlap=0.7,
319 320
                      rpn_negative_overlap=0.3,
                      use_random=True):
Y
Yuan Gao 已提交
321
    """
H
haowang101779990 已提交
322
    **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
Y
Yuan Gao 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339

    This layer can be, for given the  Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each each anchor, these target labels are used for
    train RPN. The classification targets is a binary class label (of being
    an object or not). Following the paper of Faster-RCNN, the positive labels
    are two kinds of anchors: (i) the anchor/anchors with the highest IoU
    overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
    higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
    that a single ground-truth box may assign positive labels to multiple
    anchors. A non-positive anchor is when its IoU ratio is lower than
    rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
    neither positive nor negative do not contribute to the training objective.
    The regression targets are the encoded ground-truth boxes associated with
    the positive anchors.

    Args:
340
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Y
Yuan Gao 已提交
341 342
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
343
            is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
344 345 346
        cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
            predicted confidence predictions. N is the batch size, 1 is the
            frontground and background sigmoid, M is number of bounding boxes.
347
            The data type can be float32 or float64.
Y
Yuan Gao 已提交
348 349 350 351 352
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
353
            coordinate of the anchor box. The data type can be float32 or float64.
354
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded 
355
            variances of anchors. The data type can be float32 or float64.
翟飞跃 已提交
356
        gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
Y
Yuan Gao 已提交
357
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
358
            bboxes of mini-batch input. The data type can be float32 or float64.
359
        is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
360
                             The data type must be int32.
361 362
        im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
        3 is the height, width and scale.
Y
Yuan Gao 已提交
363
        rpn_batch_size_per_im(int): Total number of RPN examples per image.
364
                                    The data type must be int32.
365
        rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
366
            by straddle_thresh pixels. The data type must be float32.
367
        rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
368
            foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
Y
Yuan Gao 已提交
369 370
        rpn_positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
371
            example. The data type must be float32.
Y
Yuan Gao 已提交
372 373
        rpn_negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
374
            examples. The data type must be float32.
Y
Yuan Gao 已提交
375 376

    Returns:
M
minqiyang 已提交
377
        tuple:
378 379 380 381 382 383 384 385 386 387 388 389 390
        A tuple(predicted_scores, predicted_location, target_label,
        target_bbox, bbox_inside_weight) is returned. The predicted_scores 
        and predicted_location is the predicted result of the RPN.
        The target_label and target_bbox is the ground truth,
        respectively. The predicted_location is a 2D Tensor with shape
        [F, 4], and the shape of target_bbox is same as the shape of
        the predicted_location, F is the number of the foreground
        anchors. The predicted_scores is a 2D Tensor with shape
        [F + B, 1], and the shape of target_label is same as the shape
        of the predicted_scores, B is the number of the background
        anchors, the F and B is depends on the input of this operator.
        Bbox_inside_weight represents whether the predicted loc is fake_fg
        or not and the shape is [F, 4].
Y
Yuan Gao 已提交
391 392 393 394

    Examples:
        .. code-block:: python

B
Bai Yifan 已提交
395
            import paddle.fluid as fluid
396 397 398 399 400 401 402
            bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
            cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
            anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
            anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
            im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
403 404
            loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
                bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
H
haowang101779990 已提交
405

Y
Yuan Gao 已提交
406 407 408
    """

    helper = LayerHelper('rpn_target_assign', **locals())
409
    # Assign target label to anchors
J
jerrywgz 已提交
410 411 412 413 414 415 416
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
Y
Yuan Gao 已提交
417 418
    helper.append_op(
        type="rpn_target_assign",
419 420 421 422 423 424
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
Y
Yuan Gao 已提交
425 426 427
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
428
            'TargetLabel': target_label,
J
jerrywgz 已提交
429
            'TargetBBox': target_bbox,
J
jerrywgz 已提交
430
            'BBoxInsideWeight': bbox_inside_weight
Y
Yuan Gao 已提交
431 432 433
        },
        attrs={
            'rpn_batch_size_per_im': rpn_batch_size_per_im,
434
            'rpn_straddle_thresh': rpn_straddle_thresh,
Y
Yuan Gao 已提交
435 436
            'rpn_positive_overlap': rpn_positive_overlap,
            'rpn_negative_overlap': rpn_negative_overlap,
437 438
            'rpn_fg_fraction': rpn_fg_fraction,
            'use_random': use_random
Y
Yuan Gao 已提交
439 440
        })

441 442 443 444
    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
J
jerrywgz 已提交
445
    bbox_inside_weight.stop_gradient = True
Y
Yuan Gao 已提交
446

447 448 449 450
    cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
451

J
jerrywgz 已提交
452
    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
Y
Yuan Gao 已提交
453 454


455
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
456 457 458
    """
    **Sigmoid Focal Loss Operator.**

459 460 461 462 463
    `Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
    class imbalance existed on the training phase of many computer vision tasks. This OP computes
    the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
    measured between the sigmoid value and target label. 

464 465 466
    The focal loss is given as followed:

    .. math::
467 468 469 470 471 472 473
  
        \\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
        \\begin{array}{rcl}
        - \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
        - \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
        \\end{array} \\right.

474 475 476 477 478 479 480

    We know that
    
    .. math::
        \\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}


481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
    Args:
        x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
            all samples. :math:`N` is the number of all samples responsible for optimization in
            a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
            is the total number of positive and negative samples in a mini-batch; Samples are images
            for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
            is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
            float32 or float64.
        label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
            classification. :math:`N` is the number of all samples responsible for optimization in a
            mini-batch, each sample has one target category. The values for positive samples are in the
            range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
            is int32.
        fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
            mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
496
        gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
497
            set to 2.0.
498
        alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
499 500 501
            is set to 0.25.

    Returns:
502 503 504
        Variable(the data type is float32 or float64): 
            A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
            tensor :attr:`x`.
505 506 507 508 509 510

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

511 512 513
            input = fluid.data(name='data', shape=[10,80], dtype='float32')
            label = fluid.data(name='label', shape=[10,1], dtype='int32')
            fg_num = fluid.data(name='fg_num', shape=[1], dtype='int32')
514 515 516
            loss = fluid.layers.sigmoid_focal_loss(x=input,
                                                   label=label,
                                                   fg_num=fg_num,
517
                                                   gamma=2.0,
518 519 520
                                                   alpha=0.25)
    """

521 522 523 524 525
    check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                             'sigmoid_focal_loss')
    check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
    check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
    helper = LayerHelper("sigmoid_focal_loss", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="sigmoid_focal_loss",
        inputs={"X": x,
                "Label": label,
                "FgNum": fg_num},
        attrs={"gamma": gamma,
               'alpha': alpha},
        outputs={"Out": out})
    return out


Y
Yuan Gao 已提交
541 542
def detection_output(loc,
                     scores,
543 544 545 546 547 548 549
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
550 551
                     nms_eta=1.0,
                     return_index=False):
552
    """
Q
qingqing01 已提交
553 554
    Given the regression locations, classification confidences and prior boxes,
    calculate the detection outputs by performing following steps:
555

Q
qingqing01 已提交
556 557
    1. Decode input bounding box predictions according to the prior boxes and
       regression locations.
558 559 560 561 562
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.
563 564 565

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Q
qingqing01 已提交
566 567
            predicted locations of M bounding bboxes. Data type should be
            float32 or float64. N is the batch size,
568 569
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
Y
Yuan Gao 已提交
570
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
Q
qingqing01 已提交
571 572 573
            predicted confidence predictions. Data type should be float32
            or float64. N is the batch size, C is the
            class number, M is number of bounding boxes.
574
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
Q
qingqing01 已提交
575 576
            each box is represented as [xmin, ymin, xmax, ymax]. Data type
            should be float32 or float64.
577
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
Q
qingqing01 已提交
578 579
            of variance. Data type should be float32 or float64.
        background_label(int): The index of background label,
580
            the background label will be ignored. If set to -1, then all
Q
qingqing01 已提交
581 582
            categories will be considered. Default: 0.
        nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
583
        nms_top_k(int): Maximum number of detections to be kept according
T
tianshuo78520a 已提交
584
            to the confidences after filtering detections based on
Q
qingqing01 已提交
585
            score_threshold and before NMS. Default: 400.
586
        keep_top_k(int): Number of total bboxes to be kept per image after
Q
qingqing01 已提交
587
            NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
588 589
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
Q
qingqing01 已提交
590 591 592
            Default: 0.01.
        nms_eta(float): The parameter for adaptive NMS. It works only when the
            value is less than 1.0. Default: 1.0.
593
        return_index(bool): Whether return selected index. Default: False
594 595

    Returns:
M
minqiyang 已提交
596

597 598 599
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, a tuple with one Variable(Out) is returned. 

Q
qingqing01 已提交
600 601 602 603 604 605 606 607 608 609 610 611
        Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
        Data type is the same as input (loc). Each row has six values:
        [label, confidence, xmin, ymin, xmax, ymax]. `No` is
        the total number of detections in this mini-batch. For each instance,
        the offsets in first dimension are called LoD, the offset number is
        N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
        detected results, if it is 0, the i-th image has no detected results.

        Index (Variable): Only return when return_index is True. A 2-D LoDTensor
        with shape [No, 1] represents the selected index which type is Integer.
        The index is the absolute value cross batches. No is the same number
        as Out. If the index is used to gather other attribute such as age,
612 613 614
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
        N is the batch size and M is the number of boxes.

615 616 617 618

    Examples:
        .. code-block:: python

619 620
            import paddle.fluid as fluid

Q
qingqing01 已提交
621 622 623 624
            pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
            pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
            loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
625
            nmsed_outs, index = fluid.layers.detection_output(scores=scores,
626 627
                                       loc=loc,
                                       prior_box=pb,
628 629
                                       prior_box_var=pbv,
                                       return_index=True)
630 631
    """
    helper = LayerHelper("detection_output", **locals())
632 633 634 635 636
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size')
637
    scores = nn.softmax(input=scores)
Y
Yuan Gao 已提交
638
    scores = nn.transpose(scores, perm=[0, 2, 1])
639
    scores.stop_gradient = True
X
Xin Pan 已提交
640 641
    nmsed_outs = helper.create_variable_for_type_inference(
        dtype=decoded_box.dtype)
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
    if return_index:
        index = helper.create_variable_for_type_inference(dtype='int')
        helper.append_op(
            type="multiclass_nms2",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs,
                     'Index': index},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
        index.stop_gradient = True
    else:
        helper.append_op(
            type="multiclass_nms",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
673
    nmsed_outs.stop_gradient = True
674 675
    if return_index:
        return nmsed_outs, index
676
    return nmsed_outs
C
chengduoZH 已提交
677 678


X
Xin Pan 已提交
679
@templatedoc()
680
def iou_similarity(x, y, box_normalized=True, name=None):
X
Xin Pan 已提交
681 682 683 684
    """
    ${comment}

    Args:
L
LielinJiang 已提交
685 686
        x (Variable): ${x_comment}.The data type is float32 or float64.
        y (Variable): ${y_comment}.The data type is float32 or float64.
T
tianshuo78520a 已提交
687
        box_normalized(bool): Whether treat the priorbox as a normalized box.
688
            Set true by default.
X
Xin Pan 已提交
689
    Returns:
L
LielinJiang 已提交
690
        Variable: ${out_comment}.The data type is same with x.
691 692 693 694

    Examples:
        .. code-block:: python

L
LielinJiang 已提交
695
            import numpy as np
696 697
            import paddle.fluid as fluid

L
LielinJiang 已提交
698 699 700 701 702 703
            use_gpu = False
            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)

            x = fluid.data(name='x', shape=[None, 4], dtype='float32')
            y = fluid.data(name='y', shape=[None, 4], dtype='float32')
704
            iou = fluid.layers.iou_similarity(x=x, y=y)
L
LielinJiang 已提交
705 706 707 708 709 710 711 712 713 714 715

            exe.run(fluid.default_startup_program())
            test_program = fluid.default_main_program().clone(for_test=True)

            [out_iou] = exe.run(test_program,
                    fetch_list=iou,
                    feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
                                         [0., 0., 1.0, 1.0]]).astype('float32'),
                          'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
            # out_iou is [[0.2857143],
            #             [0.       ]] with shape: [2, 1]
X
Xin Pan 已提交
716 717
    """
    helper = LayerHelper("iou_similarity", **locals())
718
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
719 720 721 722 723

    helper.append_op(
        type="iou_similarity",
        inputs={"X": x,
                "Y": y},
724
        attrs={"box_normalized": box_normalized},
X
Xin Pan 已提交
725 726 727 728 729 730 731 732 733 734
        outputs={"Out": out})
    return out


@templatedoc()
def box_coder(prior_box,
              prior_box_var,
              target_box,
              code_type="encode_center_size",
              box_normalized=True,
735 736
              name=None,
              axis=0):
X
Xin Pan 已提交
737
    """
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
    **Box Coder Layer**

    Encode/Decode the target bounding box with the priorbox information.
    
    The Encoding schema described below:

    .. math::

        ox = (tx - px) / pw / pxv

        oy = (ty - py) / ph / pyv

        ow = \log(\abs(tw / pw)) / pwv 

        oh = \log(\abs(th / ph)) / phv 

    The Decoding schema described below:
    
    .. math::
  
        ox = (pw * pxv * tx * + px) - tw / 2

        oy = (ph * pyv * ty * + py) - th / 2

        ow = \exp(pwv * tw) * pw + tw / 2

        oh = \exp(phv * th) * ph + th / 2   

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, 
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote 
    the priorbox's (anchor) center coordinates, width and height. `pxv`, 
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, 
    `ow`, `oh` denote the encoded/decoded coordinates, width and height. 

    During Box Decoding, two modes for broadcast are supported. Say target 
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or 
    [M, 4]. Then prior box will broadcast to target box along the 
    assigned axis. 
X
Xin Pan 已提交
776 777

    Args:
778
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape 
W
wangguanzhong 已提交
779 780 781 782 783 784 785 786 787 788
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the 
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system. 
            [xmax, ymax] is the right bottom coordinate of the anchor box.       
        prior_box_var(List|Variable|None): prior_box_var supports three types 
            of input. One is variable with shape [M, 4] which holds M group and 
            data type is float32 or float64. The second is list consist of 
            4 elements shared by all boxes and data type is float32 or float64. 
            Other is None and not involved in calculation. 
789
        target_box(Variable): This input can be a 2-D LoDTensor with shape 
W
wangguanzhong 已提交
790 791 792 793 794 795 796 797
            [N, 4] when code_type is 'encode_center_size'. This input also can 
            be a 3-D Tensor with shape [N, M, 4] when code_type is 
            'decode_center_size'. Each box is represented as 
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64. 
            This tensor can contain LoD information to represent a batch of inputs. 
        code_type(str): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size` 
            by default.
T
tianshuo78520a 已提交
798
        box_normalized(bool): Whether treat the priorbox as a normalized box.
W
wangguanzhong 已提交
799 800 801 802
            Set true by default.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
803
        axis(int): Which axis in PriorBox to broadcast for box decode, 
W
wangguanzhong 已提交
804 805 806 807
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and 
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is 
            `decode_center_size`. Set 0 by default. 
X
Xin Pan 已提交
808 809

    Returns:
W
wangguanzhong 已提交
810 811
        Variable:

812
        output_box(Variable): When code_type is 'encode_center_size', the 
W
wangguanzhong 已提交
813 814 815
        output tensor of box_coder_op with shape [N, M, 4] representing the 
        result of N target boxes encoded with M Prior boxes and variances. 
        When code_type is 'decode_center_size', N represents the batch size 
T
tianshuo78520a 已提交
816
        and M represents the number of decoded boxes.
817 818 819 820 821

    Examples:
 
        .. code-block:: python
 
822
            import paddle.fluid as fluid
W
wangguanzhong 已提交
823
            # For encode
824
            prior_box_encode = fluid.data(name='prior_box_encode',
W
wangguanzhong 已提交
825
                                  shape=[512, 4],
826 827 828 829
                                  dtype='float32')
            target_box_encode = fluid.data(name='target_box_encode',
                                   shape=[81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
830 831 832 833 834
            output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_encode,
                                    code_type="encode_center_size")
            # For decode
835
            prior_box_decode = fluid.data(name='prior_box_decode',
W
wangguanzhong 已提交
836
                                  shape=[512, 4],
837 838 839 840
                                  dtype='float32')
            target_box_decode = fluid.data(name='target_box_decode',
                                   shape=[512, 81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
841 842 843 844 845 846
            output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_decode,
                                    code_type="decode_center_size",
                                    box_normalized=False,
                                    axis=1)
X
Xin Pan 已提交
847
    """
848 849 850 851
    check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
                             'box_coder')
    check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
                             'box_coder')
X
Xin Pan 已提交
852 853
    helper = LayerHelper("box_coder", **locals())

854 855
    output_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)
X
Xin Pan 已提交
856

857 858 859 860 861 862 863 864 865 866 867 868
    inputs = {"PriorBox": prior_box, "TargetBox": target_box}
    attrs = {
        "code_type": code_type,
        "box_normalized": box_normalized,
        "axis": axis
    }
    if isinstance(prior_box_var, Variable):
        inputs['PriorBoxVar'] = prior_box_var
    elif isinstance(prior_box_var, list):
        attrs['variance'] = prior_box_var
    else:
        raise TypeError("Input variance of box_coder must be Variable or lisz")
X
Xin Pan 已提交
869 870
    helper.append_op(
        type="box_coder",
871 872
        inputs=inputs,
        attrs=attrs,
X
Xin Pan 已提交
873 874 875 876 877 878 879 880 881 882
        outputs={"OutputBox": output_box})
    return output_box


@templatedoc()
def polygon_box_transform(input, name=None):
    """
    ${comment}

    Args:
883 884 885 886
        input(Variable): The input with shape [batch_size, geometry_channels, height, width].
                         A Tensor with type float32, float64.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
X
Xin Pan 已提交
887 888

    Returns:
889
        Variable: The output with the same shape as input. A Tensor with type float32, float64.
B
Bai Yifan 已提交
890 891 892 893 894

    Examples:
        .. code-block:: python
            
            import paddle.fluid as fluid
B
Bai Yifan 已提交
895
            input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
B
Bai Yifan 已提交
896
            out = fluid.layers.polygon_box_transform(input)
X
Xin Pan 已提交
897
    """
898 899
    check_variable_and_dtype(input, "input", ['float32', 'float64'],
                             'polygon_box_transform')
X
Xin Pan 已提交
900
    helper = LayerHelper("polygon_box_transform", **locals())
901
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
Xin Pan 已提交
902 903 904 905 906 907 908 909 910

    helper.append_op(
        type="polygon_box_transform",
        inputs={"Input": input},
        attrs={},
        outputs={"Output": output})
    return output


D
dengkaipeng 已提交
911 912
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
913 914
                gt_box,
                gt_label,
D
dengkaipeng 已提交
915
                anchors,
916
                anchor_mask,
D
dengkaipeng 已提交
917 918
                class_num,
                ignore_thresh,
919
                downsample_ratio,
920
                gt_score=None,
D
dengkaipeng 已提交
921
                use_label_smooth=True,
922 923
                name=None,
                scale_x_y=1.):
D
dengkaipeng 已提交
924 925 926 927
    """
    ${comment}

    Args:
X
xiaoting 已提交
928
        x (Variable): ${x_comment}The data type is float32 or float64. 
929
        gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
T
tianshuo78520a 已提交
930 931
                          in the third dimension, x, y, w, h should be stored. 
                          x,y is the center coordinate of boxes, w, h are the
932 933
                          width and height, x, y, w, h should be divided by 
                          input image height to scale to [0, 1].
D
dengkaipeng 已提交
934
                          N is the batch number and B is the max box number in 
X
xiaoting 已提交
935
                          an image.The data type is float32 or float64. 
T
tianshuo78520a 已提交
936
        gt_label (Variable): class id of ground truth boxes, should be in shape
X
xiaoting 已提交
937
                            of [N, B].The data type is int32. 
D
dengkaipeng 已提交
938
        anchors (list|tuple): ${anchors_comment}
939
        anchor_mask (list|tuple): ${anchor_mask_comment}
D
dengkaipeng 已提交
940 941
        class_num (int): ${class_num_comment}
        ignore_thresh (float): ${ignore_thresh_comment}
942
        downsample_ratio (int): ${downsample_ratio_comment}
X
xiaoting 已提交
943 944 945
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
T
tianshuo78520a 已提交
946
        gt_score (Variable): mixup score of ground truth boxes, should be in shape
947
                            of [N, B]. Default None.
948
        use_label_smooth (bool): ${use_label_smooth_comment}
949
        scale_x_y (float): ${scale_x_y_comment}
D
dengkaipeng 已提交
950 951

    Returns:
952
        Variable: A 1-D tensor with shape [N], the value of yolov3 loss
D
dengkaipeng 已提交
953 954 955

    Raises:
        TypeError: Input x of yolov3_loss must be Variable
D
dengkaipeng 已提交
956 957
        TypeError: Input gtbox of yolov3_loss must be Variable
        TypeError: Input gtlabel of yolov3_loss must be Variable
D
dengkaipeng 已提交
958
        TypeError: Input gtscore of yolov3_loss must be None or Variable
D
dengkaipeng 已提交
959 960 961
        TypeError: Attr anchors of yolov3_loss must be list or tuple
        TypeError: Attr class_num of yolov3_loss must be an integer
        TypeError: Attr ignore_thresh of yolov3_loss must be a float number
962
        TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
D
dengkaipeng 已提交
963 964

    Examples:
965 966
      .. code-block:: python

967
          import paddle.fluid as fluid
X
xiaoting 已提交
968 969 970 971
          x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
          gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
          gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
          gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
972 973
          anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
          anchor_mask = [0, 1, 2]
974 975
          loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
                                          gt_score=gt_score, anchors=anchors, 
976 977
                                          anchor_mask=anchor_mask, class_num=80,
                                          ignore_thresh=0.7, downsample_ratio=32)
D
dengkaipeng 已提交
978 979 980 981 982
    """
    helper = LayerHelper('yolov3_loss', **locals())

    if not isinstance(x, Variable):
        raise TypeError("Input x of yolov3_loss must be Variable")
983
    if not isinstance(gt_box, Variable):
D
dengkaipeng 已提交
984
        raise TypeError("Input gtbox of yolov3_loss must be Variable")
985
    if not isinstance(gt_label, Variable):
D
dengkaipeng 已提交
986
        raise TypeError("Input gtlabel of yolov3_loss must be Variable")
987
    if gt_score is not None and not isinstance(gt_score, Variable):
988
        raise TypeError("Input gtscore of yolov3_loss must be Variable")
D
dengkaipeng 已提交
989 990
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
        raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
991 992
    if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
        raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
D
dengkaipeng 已提交
993 994 995 996 997
    if not isinstance(class_num, int):
        raise TypeError("Attr class_num of yolov3_loss must be an integer")
    if not isinstance(ignore_thresh, float):
        raise TypeError(
            "Attr ignore_thresh of yolov3_loss must be a float number")
998 999 1000
    if not isinstance(use_label_smooth, bool):
        raise TypeError(
            "Attr use_label_smooth of yolov3_loss must be a bool value")
D
dengkaipeng 已提交
1001

1002
    loss = helper.create_variable_for_type_inference(dtype=x.dtype)
D
dengkaipeng 已提交
1003

1004 1005 1006
    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

1007 1008
    inputs = {
        "X": x,
1009 1010
        "GTBox": gt_box,
        "GTLabel": gt_label,
1011
    }
1012
    if gt_score is not None:
1013
        inputs["GTScore"] = gt_score
1014

D
dengkaipeng 已提交
1015 1016
    attrs = {
        "anchors": anchors,
1017
        "anchor_mask": anchor_mask,
D
dengkaipeng 已提交
1018 1019
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
1020
        "downsample_ratio": downsample_ratio,
1021
        "use_label_smooth": use_label_smooth,
1022
        "scale_x_y": scale_x_y,
D
dengkaipeng 已提交
1023 1024 1025 1026
    }

    helper.append_op(
        type='yolov3_loss',
1027
        inputs=inputs,
1028 1029 1030 1031 1032
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask
        },
D
dengkaipeng 已提交
1033 1034 1035 1036
        attrs=attrs)
    return loss


D
dengkaipeng 已提交
1037
@templatedoc(op_type="yolo_box")
1038 1039 1040 1041 1042 1043
def yolo_box(x,
             img_size,
             anchors,
             class_num,
             conf_thresh,
             downsample_ratio,
1044
             clip_bbox=True,
1045 1046
             name=None,
             scale_x_y=1.):
D
dengkaipeng 已提交
1047 1048 1049 1050
    """
    ${comment}

    Args:
X
xiaoting 已提交
1051 1052
        x (Variable): ${x_comment} The data type is float32 or float64. 
        img_size (Variable): ${img_size_comment} The data type is int32. 
D
dengkaipeng 已提交
1053 1054 1055 1056
        anchors (list|tuple): ${anchors_comment}
        class_num (int): ${class_num_comment}
        conf_thresh (float): ${conf_thresh_comment}
        downsample_ratio (int): ${downsample_ratio_comment}
1057
        clip_bbox (bool): ${clip_bbox_comment}
1058
        scale_x_y (float): ${scale_x_y_comment}
X
xiaoting 已提交
1059 1060 1061
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
D
dengkaipeng 已提交
1062 1063

    Returns:
D
dengkaipeng 已提交
1064
        Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
D
dengkaipeng 已提交
1065 1066
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification 
        scores of boxes.
D
dengkaipeng 已提交
1067 1068 1069 1070 1071 1072 1073 1074

    Raises:
        TypeError: Input x of yolov_box must be Variable
        TypeError: Attr anchors of yolo box must be list or tuple
        TypeError: Attr class_num of yolo box must be an integer
        TypeError: Attr conf_thresh of yolo box must be a float number

    Examples:
D
dengkaipeng 已提交
1075

D
dengkaipeng 已提交
1076 1077
    .. code-block:: python

X
xiaoting 已提交
1078
        import paddle.fluid as fluid
X
xiaoting 已提交
1079 1080
        x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
        img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
D
dengkaipeng 已提交
1081
        anchors = [10, 13, 16, 30, 33, 23]
X
xiaoting 已提交
1082
        boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors, 
D
dengkaipeng 已提交
1083 1084 1085 1086 1087
                                        conf_thresh=0.01, downsample_ratio=32)
    """
    helper = LayerHelper('yolo_box', **locals())

    if not isinstance(x, Variable):
1088 1089 1090
        raise TypeError("Input x of yolo_box must be Variable")
    if not isinstance(img_size, Variable):
        raise TypeError("Input img_size of yolo_box must be Variable")
D
dengkaipeng 已提交
1091
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
1092
        raise TypeError("Attr anchors of yolo_box must be list or tuple")
D
dengkaipeng 已提交
1093
    if not isinstance(class_num, int):
1094
        raise TypeError("Attr class_num of yolo_box must be an integer")
D
dengkaipeng 已提交
1095
    if not isinstance(conf_thresh, float):
1096
        raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
D
dengkaipeng 已提交
1097 1098 1099 1100 1101 1102 1103

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
D
dengkaipeng 已提交
1104
        "conf_thresh": conf_thresh,
D
dengkaipeng 已提交
1105
        "downsample_ratio": downsample_ratio,
1106
        "clip_bbox": clip_bbox,
1107
        "scale_x_y": scale_x_y,
D
dengkaipeng 已提交
1108 1109 1110 1111
    }

    helper.append_op(
        type='yolo_box',
1112 1113 1114 1115
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
D
dengkaipeng 已提交
1116 1117 1118 1119 1120 1121 1122 1123
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs)
    return boxes, scores


X
Xin Pan 已提交
1124
@templatedoc()
1125 1126
def detection_map(detect_res,
                  label,
1127 1128
                  class_num,
                  background_label=0,
1129 1130
                  overlap_threshold=0.3,
                  evaluate_difficult=True,
1131 1132 1133 1134
                  has_state=None,
                  input_states=None,
                  out_states=None,
                  ap_version='integral'):
X
Xin Pan 已提交
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
    """
    ${comment}

    Args:
        detect_res: ${detect_res_comment}
        label:  ${label_comment}
        class_num: ${class_num_comment}
        background_label: ${background_label_comment}
        overlap_threshold: ${overlap_threshold_comment}
        evaluate_difficult: ${evaluate_difficult_comment}
        has_state: ${has_state_comment}
1146 1147 1148 1149 1150 1151 1152 1153
        input_states: (tuple|None) If not None, It contains 3 elements:
            (1) pos_count ${pos_count_comment}.
            (2) true_pos ${true_pos_comment}.
            (3) false_pos ${false_pos_comment}.
        out_states: (tuple|None) If not None, it contains 3 elements.
            (1) accum_pos_count ${accum_pos_count_comment}.
            (2) accum_true_pos ${accum_true_pos_comment}.
            (3) accum_false_pos ${accum_false_pos_comment}.
X
Xin Pan 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162
        ap_version: ${ap_type_comment}

    Returns:
        ${map_comment}


    Examples:
          .. code-block:: python

1163
            import paddle.fluid as fluid
1164
            from fluid.layers import detection
1165
            detect_res = fluid.data(
X
Xin Pan 已提交
1166 1167 1168
                name='detect_res',
                shape=[10, 6],
                dtype='float32')
1169
            label = fluid.data(
X
Xin Pan 已提交
1170 1171 1172 1173
                name='label',
                shape=[10, 6],
                dtype='float32')

1174
            map_out = detection.detection_map(detect_res, label, 21)
X
Xin Pan 已提交
1175
    """
1176 1177
    helper = LayerHelper("detection_map", **locals())

1178
    def __create_var(type):
X
Xin Pan 已提交
1179
        return helper.create_variable_for_type_inference(dtype=type)
1180 1181

    map_out = __create_var('float32')
Z
zhongpu 已提交
1182 1183 1184 1185 1186 1187
    accum_pos_count_out = out_states[
        0] if out_states is not None else __create_var('int32')
    accum_true_pos_out = out_states[
        1] if out_states is not None else __create_var('float32')
    accum_false_pos_out = out_states[
        2] if out_states is not None else __create_var('float32')
1188

Z
zhongpu 已提交
1189 1190 1191
    pos_count = input_states[0] if input_states is not None else None
    true_pos = input_states[1] if input_states is not None else None
    false_pos = input_states[2] if input_states is not None else None
1192

1193 1194 1195 1196 1197
    helper.append_op(
        type="detection_map",
        inputs={
            'Label': label,
            'DetectRes': detect_res,
1198
            'HasState': has_state,
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
            'PosCount': pos_count,
            'TruePos': true_pos,
            'FalsePos': false_pos
        },
        outputs={
            'MAP': map_out,
            'AccumPosCount': accum_pos_count_out,
            'AccumTruePos': accum_true_pos_out,
            'AccumFalsePos': accum_false_pos_out
        },
        attrs={
            'overlap_threshold': overlap_threshold,
            'evaluate_difficult': evaluate_difficult,
1212 1213
            'ap_type': ap_version,
            'class_num': class_num,
1214
        })
1215
    return map_out
1216 1217


1218 1219 1220 1221
def bipartite_match(dist_matrix,
                    match_type=None,
                    dist_threshold=None,
                    name=None):
1222
    """
Y
yuyang18 已提交
1223 1224
    This operator implements a greedy bipartite matching algorithm, which is
    used to obtain the matching with the maximum distance based on the input
1225
    distance matrix. For input 2D matrix, the bipartite matching algorithm can
Y
yuyang18 已提交
1226 1227 1228 1229
    find the matched column for each row (matched means the largest distance),
    also can find the matched row for each column. And this operator only
    calculate matched indices from column to row. For each instance,
    the number of matched indices is the column number of the input distance
W
wangguanzhong 已提交
1230
    matrix. **The OP only supports CPU**.
Y
yuyang18 已提交
1231 1232 1233

    There are two outputs, matched indices and distance.
    A simple description, this algorithm matched the best (maximum distance)
1234 1235 1236
    row entity to the column entity and the matched indices are not duplicated
    in each row of ColToRowMatchIndices. If the column entity is not matched
    any row entity, set -1 in ColToRowMatchIndices.
C
chengduoZH 已提交
1237

Y
yuyang18 已提交
1238
    NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1239 1240 1241
    If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
    If Tensor, the height of ColToRowMatchIndices is 1.

Y
yuyang18 已提交
1242 1243 1244
    NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
    layer. Please consider to use :code:`ssd_loss` instead.

1245 1246
    Args:
        dist_matrix(Variable): This input is a 2-D LoDTensor with shape
W
wangguanzhong 已提交
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
            [K, M]. The data type is float32 or float64. It is pair-wise 
            distance matrix between the entities represented by each row and 
            each column. For example, assumed one entity is A with shape [K], 
            another entity is B with shape [M]. The dist_matrix[i][j] is the 
            distance between A[i] and B[j]. The bigger the distance is, the 
            better matching the pairs are. NOTE: This tensor can contain LoD 
            information to represent a batch of inputs. One instance of this 
            batch can contain different numbers of entities.
        match_type(str, optional): The type of matching method, should be
           'bipartite' or 'per_prediction'. None ('bipartite') by default.
        dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1258
            this threshold is to determine the extra matching bboxes based
Y
yuyang18 已提交
1259
            on the maximum distance, 0.5 by default.
W
wangguanzhong 已提交
1260 1261 1262 1263
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.
 
1264
    Returns:
W
wangguanzhong 已提交
1265
        Tuple:
Y
yuyang18 已提交
1266

W
wangguanzhong 已提交
1267 1268
        matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
        type is int32. N is the batch size. If match_indices[i][j] is -1, it
Y
yuyang18 已提交
1269 1270 1271 1272 1273
        means B[j] does not match any entity in i-th instance.
        Otherwise, it means B[j] is matched to row
        match_indices[i][j] in i-th instance. The row number of
        i-th instance is saved in match_indices[i][j].

W
wangguanzhong 已提交
1274 1275
        matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
        type is float32. N is batch size. If match_indices[i][j] is -1,
Y
yuyang18 已提交
1276 1277 1278 1279 1280 1281 1282
        match_distance[i][j] is also -1.0. Otherwise, assumed
        match_distance[i][j] = d, and the row offsets of each instance
        are called LoD. Then match_distance[i][j] =
        dist_matrix[d+LoD[i]][j].

    Examples:

1283
        >>> import paddle.fluid as fluid
1284 1285
        >>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
        >>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
Y
yuyang18 已提交
1286 1287
        >>> iou = fluid.layers.iou_similarity(x=x, y=y)
        >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
1288 1289
    """
    helper = LayerHelper('bipartite_match', **locals())
X
Xin Pan 已提交
1290 1291 1292
    match_indices = helper.create_variable_for_type_inference(dtype='int32')
    match_distance = helper.create_variable_for_type_inference(
        dtype=dist_matrix.dtype)
1293 1294 1295
    helper.append_op(
        type='bipartite_match',
        inputs={'DistMat': dist_matrix},
1296 1297 1298 1299
        attrs={
            'match_type': match_type,
            'dist_threshold': dist_threshold,
        },
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
        outputs={
            'ColToRowMatchIndices': match_indices,
            'ColToRowMatchDist': match_distance
        })
    return match_indices, match_distance


def target_assign(input,
                  matched_indices,
                  negative_indices=None,
                  mismatch_value=None,
                  name=None):
    """
    This operator can be, for given the target bounding boxes or labels,
    to assign classification and regression targets to each prediction as well as
    weights to prediction. The weights is used to specify which prediction would
    not contribute to training loss.
C
chengduoZH 已提交
1317

1318 1319 1320 1321 1322
    For each instance, the output `out` and`out_weight` are assigned based on
    `match_indices` and `negative_indices`.
    Assumed that the row offset for each instance in `input` is called lod,
    this operator assigns classification/regression targets by performing the
    following steps:
C
chengduoZH 已提交
1323

1324
    1. Assigning all outputs based on `match_indices`:
C
chengduoZH 已提交
1325

1326 1327 1328
    .. code-block:: text

        If id = match_indices[i][j] > 0,
C
chengduoZH 已提交
1329

1330 1331
            out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
            out_weight[i][j] = 1.
C
chengduoZH 已提交
1332

1333
        Otherwise,
C
chengduoZH 已提交
1334

1335 1336
            out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][j] = 0.
C
chengduoZH 已提交
1337

Q
qingqing01 已提交
1338
    2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
C
chengduoZH 已提交
1339

Q
qingqing01 已提交
1340 1341
    Assumed that i-th instance in `neg_indices` is called `neg_indice`,
    for i-th instance:
M
minqiyang 已提交
1342

1343
    .. code-block:: text
C
chengduoZH 已提交
1344

Q
qingqing01 已提交
1345 1346 1347
        for id in neg_indice:
            out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][id] = 1.0
1348 1349

    Args:
Q
qingqing01 已提交
1350 1351 1352
       input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
           Data type should be int32 or float32.
       matched_indices (Variable): The input matched indices
1353 1354 1355
           is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
           the j-th entity of column is not matched to any entity of row in
           i-th instance.
Q
qingqing01 已提交
1356 1357
       negative_indices (Variable, optional): The input negative example indices
           are an optional input with shape [Neg, 1] and int32 type, where Neg is
1358
           the total number of negative example indices.
Q
qingqing01 已提交
1359 1360 1361 1362 1363
       mismatch_value (float32, optional): Fill this value to the mismatched
           location.
       name (string): The default value is None.  Normally there is no need for
           user to set this property.  For more information, please refer
           to :ref:`api_guide_Name`.
1364 1365

    Returns:
Q
qingqing01 已提交
1366 1367 1368 1369 1370 1371 1372 1373
        tuple: A tuple(out, out_weight) is returned.

        out (Variable): a 3D Tensor with shape [N, P, K] and same data type
        with `input`, N and P is the same as they are in `matched_indices`,
        K is the same as it in input of X.

        out_weight (Variable): the weight for output with the shape of [N, P, 1].
        Data type is float32.
1374 1375 1376 1377 1378

    Examples:

        .. code-block:: python

1379
            import paddle.fluid as fluid
Q
qingqing01 已提交
1380
            x = fluid.data(
1381 1382 1383
                name='x',
                shape=[4, 20, 4],
                dtype='float',
Q
qingqing01 已提交
1384 1385
                lod_level=1)
            matched_id = fluid.data(
1386 1387
                name='indices',
                shape=[8, 20],
Q
qingqing01 已提交
1388
                dtype='int32')
1389 1390 1391 1392
            trg, trg_weight = fluid.layers.target_assign(
                x,
                matched_id,
                mismatch_value=0)
1393 1394
    """
    helper = LayerHelper('target_assign', **locals())
X
Xin Pan 已提交
1395 1396
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_weight = helper.create_variable_for_type_inference(dtype='float32')
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
    helper.append_op(
        type='target_assign',
        inputs={
            'X': input,
            'MatchIndices': matched_indices,
            'NegIndices': negative_indices
        },
        outputs={'Out': out,
                 'OutWeight': out_weight},
        attrs={'mismatch_value': mismatch_value})
    return out, out_weight


def ssd_loss(location,
             confidence,
             gt_box,
             gt_label,
             prior_box,
             prior_box_var=None,
             background_label=0,
             overlap_threshold=0.5,
             neg_pos_ratio=3.0,
             neg_overlap=0.5,
             loc_loss_weight=1.0,
             conf_loss_weight=1.0,
             match_type='per_prediction',
             mining_type='max_negative',
1424
             normalize=True,
1425 1426
             sample_size=None):
    """
Y
yuyang18 已提交
1427
    **Multi-box loss layer for object detection algorithm of SSD**
1428

翟飞跃 已提交
1429 1430
    This layer is to compute detection loss for SSD given the location offset
    predictions, confidence predictions, prior boxes and ground-truth bounding
1431 1432 1433 1434
    boxes and labels, and the type of hard example mining. The returned loss
    is a weighted sum of the localization loss (or regression loss) and
    confidence loss (or classification loss) by performing the following steps:

Y
yuyang18 已提交
1435
    1. Find matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1436

1437
      1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
Y
yuyang18 已提交
1438

T
tianshuo78520a 已提交
1439
      1.2 Compute matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1440

1441
    2. Compute confidence for mining hard examples
Y
yuyang18 已提交
1442

1443
      2.1. Get the target label based on matched indices.
Y
yuyang18 已提交
1444

1445
      2.2. Compute confidence loss.
Y
yuyang18 已提交
1446

1447 1448
    3. Apply hard example mining to get the negative example indices and update
       the matched indices.
Y
yuyang18 已提交
1449

1450
    4. Assign classification and regression targets
Y
yuyang18 已提交
1451

1452
      4.1. Encoded bbox according to the prior boxes.
Y
yuyang18 已提交
1453

1454
      4.2. Assign regression targets.
Y
yuyang18 已提交
1455

1456
      4.3. Assign classification targets.
Y
yuyang18 已提交
1457

1458
    5. Compute the overall objective loss.
Y
yuyang18 已提交
1459

1460
      5.1 Compute confidence loss.
Y
yuyang18 已提交
1461

1462
      5.2 Compute localization loss.
Y
yuyang18 已提交
1463

1464 1465 1466 1467 1468 1469
      5.3 Compute the overall weighted loss.

    Args:
        location (Variable): The location predictions are a 3D Tensor with
            shape [N, Np, 4], N is the batch size, Np is total number of
            predictions for each instance. 4 is the number of coordinate values,
1470 1471
            the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
            float64.
1472 1473
        confidence (Variable): The confidence predictions are a 3D Tensor
            with shape [N, Np, C], N and Np are the same as they are in
1474 1475
            `location`, C is the class number.The data type is float32 or
            float64.
翟飞跃 已提交
1476
        gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
1477
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
1478
            bboxes of mini-batch input.The data type is float32 or float64.
1479
        gt_label (Variable): The ground-truth labels are a 2D LoDTensor
1480 1481 1482
            with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
            mini-batch input, 1 is the number of class. The data type is float32
            or float64.
1483
        prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
1484 1485
            Np and 4 are the same as they are in `location`. The data type is
            float32 or float64.
1486
        prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
1487
            with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
1488 1489
        background_label (int): The index of background label, 0 by default.
        overlap_threshold (float): If match_type is 'per_prediction', use
1490 1491
            'overlap_threshold' to determine the extra matching bboxes when finding \
            matched boxes. 0.5 by default.
1492
        neg_pos_ratio (float): The ratio of the negative boxes to the positive
翟飞跃 已提交
1493
            boxes, used only when mining_type is 'max_negative', 3.0 by default.
1494
        neg_overlap (float): The negative overlap upper bound for the unmatched
1495
            predictions. Use only when mining_type is 'max_negative',
1496 1497 1498 1499
            0.5 by default.
        loc_loss_weight (float): Weight for localization loss, 1.0 by default.
        conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
        match_type (str): The type of matching method during training, should
翟飞跃 已提交
1500
            be 'bipartite' or 'per_prediction', 'per_prediction' by default.
1501 1502
        mining_type (str): The hard example mining type, should be 'hard_example'
            or 'max_negative', now only support `max_negative`.
1503
        normalize (bool): Whether to normalize the SSD loss by the total number
Y
yuyang18 已提交
1504
            of output locations, True by default.
1505 1506
        sample_size (int): The max sample size of negative box, used only when
            mining_type is 'hard_example'.
1507 1508

    Returns:
1509 1510 1511
        Variable(Tensor):  The weighted sum of the localization loss and confidence loss, \
        with shape [N * Np, 1], N and Np are the same as they are in
        `location`.The data type is float32 or float64.
1512 1513

    Raises:
Y
yuyang18 已提交
1514 1515
        ValueError: If mining_type is 'hard_example', now only support mining \
        type of `max_negative`.
Y
yuyang18 已提交
1516 1517

    Examples:
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536

        .. code-block:: python

            import paddle.fluid as fluid
            pb = fluid.data(
                           name='prior_box',
                           shape=[10, 4],
                           dtype='float32')
            pbv = fluid.data(
                           name='prior_box_var',
                           shape=[10, 4],
                           dtype='float32')
            loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
            gt_box = fluid.data(
                 name='gt_box', shape=[4], lod_level=1, dtype='float32')
            gt_label = fluid.data(
                 name='gt_label', shape=[1], lod_level=1, dtype='float32')
            loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
1537 1538 1539 1540 1541 1542 1543
    """

    helper = LayerHelper('ssd_loss', **locals())
    if mining_type != 'max_negative':
        raise ValueError("Only support mining_type == max_negative now.")

    num, num_prior, num_class = confidence.shape
G
merge  
gongweibao 已提交
1544
    conf_shape = nn.shape(confidence)
1545 1546

    def __reshape_to_2d(var):
1547
        return nn.flatten(x=var, axis=2)
1548

T
tianshuo78520a 已提交
1549
    # 1. Find matched bounding box by prior box.
1550 1551
    #   1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
    iou = iou_similarity(x=gt_box, y=prior_box)
T
tianshuo78520a 已提交
1552
    #   1.2 Compute matched bounding box by bipartite matching algorithm.
1553 1554
    matched_indices, matched_dist = bipartite_match(iou, match_type,
                                                    overlap_threshold)
1555 1556 1557

    # 2. Compute confidence for mining hard examples
    # 2.1. Get the target label based on matched indices
1558 1559
    gt_label = nn.reshape(
        x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
1560
    gt_label.stop_gradient = True
1561 1562 1563 1564 1565 1566 1567
    target_label, _ = target_assign(
        gt_label, matched_indices, mismatch_value=background_label)
    # 2.2. Compute confidence loss.
    # Reshape confidence to 2D tensor.
    confidence = __reshape_to_2d(confidence)
    target_label = tensor.cast(x=target_label, dtype='int64')
    target_label = __reshape_to_2d(target_label)
1568
    target_label.stop_gradient = True
1569
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1570
    # 3. Mining hard examples
G
merge  
gongweibao 已提交
1571
    actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
1572
    actual_shape.stop_gradient = True
1573 1574
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1575
    conf_loss = nn.reshape(
1576
        x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
1577
    conf_loss.stop_gradient = True
X
Xin Pan 已提交
1578
    neg_indices = helper.create_variable_for_type_inference(dtype='int32')
1579
    dtype = matched_indices.dtype
X
Xin Pan 已提交
1580 1581
    updated_matched_indices = helper.create_variable_for_type_inference(
        dtype=dtype)
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
    helper.append_op(
        type='mine_hard_examples',
        inputs={
            'ClsLoss': conf_loss,
            'LocLoss': None,
            'MatchIndices': matched_indices,
            'MatchDist': matched_dist,
        },
        outputs={
            'NegIndices': neg_indices,
            'UpdatedMatchIndices': updated_matched_indices
        },
        attrs={
            'neg_pos_ratio': neg_pos_ratio,
B
Bai Yifan 已提交
1596
            'neg_dist_threshold': neg_overlap,
1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
            'mining_type': mining_type,
            'sample_size': sample_size,
        })

    # 4. Assign classification and regression targets
    # 4.1. Encoded bbox according to the prior boxes.
    encoded_bbox = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=gt_box,
        code_type='encode_center_size')
    # 4.2. Assign regression targets
    target_bbox, target_loc_weight = target_assign(
        encoded_bbox, updated_matched_indices, mismatch_value=background_label)
    # 4.3. Assign classification targets
    target_label, target_conf_weight = target_assign(
        gt_label,
        updated_matched_indices,
        negative_indices=neg_indices,
        mismatch_value=background_label)

    # 5. Compute loss.
    # 5.1 Compute confidence loss.
    target_label = __reshape_to_2d(target_label)
    target_label = tensor.cast(x=target_label, dtype='int64')
1622

1623
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1624 1625 1626
    target_conf_weight = __reshape_to_2d(target_conf_weight)
    conf_loss = conf_loss * target_conf_weight

1627 1628 1629 1630
    # the target_label and target_conf_weight do not have gradient.
    target_label.stop_gradient = True
    target_conf_weight.stop_gradient = True

1631 1632 1633 1634 1635 1636 1637 1638
    # 5.2 Compute regression loss.
    location = __reshape_to_2d(location)
    target_bbox = __reshape_to_2d(target_bbox)

    loc_loss = nn.smooth_l1(location, target_bbox)
    target_loc_weight = __reshape_to_2d(target_loc_weight)
    loc_loss = loc_loss * target_loc_weight

1639 1640 1641 1642
    # the target_bbox and target_loc_weight do not have gradient.
    target_bbox.stop_gradient = True
    target_loc_weight.stop_gradient = True

1643 1644
    # 5.3 Compute overall weighted loss.
    loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
1645
    # reshape to [N, Np], N is the batch size and Np is the prior box number.
1646 1647 1648
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
    loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
1649 1650 1651 1652 1653
    loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
    if normalize:
        normalizer = nn.reduce_sum(target_loc_weight)
        loss = loss / normalizer

1654
    return loss
C
chengduoZH 已提交
1655 1656


1657 1658 1659 1660
def prior_box(input,
              image,
              min_sizes,
              max_sizes=None,
1661
              aspect_ratios=[1.],
1662 1663 1664 1665 1666
              variance=[0.1, 0.1, 0.2, 0.2],
              flip=False,
              clip=False,
              steps=[0.0, 0.0],
              offset=0.5,
1667 1668
              name=None,
              min_max_aspect_ratios_order=False):
1669
    """
R
ruri 已提交
1670
    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
1671 1672 1673 1674 1675
    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

R
ruri 已提交
1676
    Parameters:
T
tianshuo78520a 已提交
1677
       input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
R
ruri 已提交
1678 1679 1680 1681
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes(list|tuple|float): the min sizes of generated prior boxes.
       max_sizes(list|tuple|None): the max sizes of generated prior boxes.
1682
            Default: None.
R
ruri 已提交
1683
       aspect_ratios(list|tuple|float): the aspect ratios of generated
1684
            prior boxes. Default: [1.].
1685 1686 1687 1688
       variance(list|tuple): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1689
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1690 1691
            step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
            height or weight of the input will be automatically calculated.
1692
            Default: [0., 0.]
1693
       offset(float): Prior boxes center offset. Default: 0.5
1694
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1695
            in order of [min, max, aspect_ratios], which is consistent with
1696 1697 1698
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
R
ruri 已提交
1699
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1700 1701

    Returns:
R
ruri 已提交
1702
        Tuple: A tuple with two Variable (boxes, variances)
Q
update  
qiaolongfei 已提交
1703

R
ruri 已提交
1704 1705
        boxes(Variable): the output prior boxes of PriorBox.
	4-D tensor, the layout is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1706
        H is the height of input, W is the width of input,
R
ruri 已提交
1707
        num_priors is the total box count of each position of input.
Q
update  
qiaolongfei 已提交
1708

R
ruri 已提交
1709 1710
        variances(Variable): the expanded variances of PriorBox.
    	4-D tensor, the layput is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1711
        H is the height of input, W is the width of input
R
ruri 已提交
1712
        num_priors is the total box count of each position of input
1713 1714 1715

    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1716

R
ruri 已提交
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	    #declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
	    input = fluid.data(name="input", shape=[None,3,6,9])
	    image = fluid.data(name="image", shape=[None,3,9,12])
	    box, var = fluid.layers.prior_box(
                 input=input,
                 image=image,
		 min_sizes=[100.],
                 clip=True,
                 flip=True)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
 
	    # prepare a batch of data
	    input_data = np.random.rand(1,3,6,9).astype("float32")
	    image_data = np.random.rand(1,3,9,12).astype("float32")
 
	    box_out, var_out = exe.run(fluid.default_main_program(),
                feed={"input":input_data,"image":image_data},
                fetch_list=[box,var],
                return_numpy=True)
 
	    # print(box_out.shape)
	    # (6, 9, 1, 4)
	    # print(var_out.shape)
	    # (6, 9, 1, 4)

	    # imperative mode
	    import paddle.fluid.dygraph as dg

	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		image = dg.to_variable(image_data)
    		box, var = fluid.layers.prior_box(
		    input=input,
		    image=image,
		    min_sizes=[100.],
		    clip=True,
		    flip=True)
		# print(box.shape)
		# [6L, 9L, 1L, 4L]
                # print(var.shape)
		# [6L, 9L, 1L, 4L]

1764 1765 1766 1767
    """
    helper = LayerHelper("prior_box", **locals())
    dtype = helper.input_dtype()

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(min_sizes):
        min_sizes = [min_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    min_sizes = list(map(float, min_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    steps = list(map(float, steps))

1783 1784 1785 1786 1787 1788 1789 1790
    attrs = {
        'min_sizes': min_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'flip': flip,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
1791 1792
        'offset': offset,
        'min_max_aspect_ratios_order': min_max_aspect_ratios_order
1793 1794
    }
    if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
1795 1796
        if not _is_list_or_tuple_(max_sizes):
            max_sizes = [max_sizes]
1797 1798
        attrs['max_sizes'] = max_sizes

X
Xin Pan 已提交
1799 1800
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
    helper.append_op(
        type="prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


R
ruri 已提交
1813 1814 1815 1816 1817 1818 1819 1820 1821
def density_prior_box(input,
                      image,
                      densities=None,
                      fixed_sizes=None,
                      fixed_ratios=None,
                      variance=[0.1, 0.1, 0.2, 0.2],
                      clip=False,
                      steps=[0.0, 0.0],
                      offset=0.5,
1822
                      flatten_to_2d=False,
R
ruri 已提交
1823 1824 1825
                      name=None):
    """

R
ruri 已提交
1826
    This op generates density prior boxes for SSD(Single Shot MultiBox Detector) 
R
ruri 已提交
1827 1828 1829 1830 1831 1832
    algorithm. Each position of the input produce N prior boxes, N is 
    determined by the count of densities, fixed_sizes and fixed_ratios. 
    Boxes center at grid points around each input position is generated by 
    this operator, and the grid points is determined by densities and 
    the count of density prior box is determined by fixed_sizes and fixed_ratios. 
    Obviously, the number of fixed_sizes is equal to the number of densities.
R
ruri 已提交
1833
    
R
ruri 已提交
1834
    For densities_i in densities:
R
ruri 已提交
1835 1836
    
    .. math::
R
ruri 已提交
1837

R
ruri 已提交
1838 1839 1840 1841 1842 1843 1844
        N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)

    N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.

    Parameters:
       input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
R
ruri 已提交
1845
            the layout is NCHW.
R
ruri 已提交
1846
       densities(list|tuple|None): The densities of generated density prior 
R
ruri 已提交
1847 1848
            boxes, this attribute should be a list or tuple of integers. 
            Default: None.
R
ruri 已提交
1849
       fixed_sizes(list|tuple|None): The fixed sizes of generated density
R
ruri 已提交
1850 1851
            prior boxes, this attribute should a list or tuple of same 
            length with :attr:`densities`. Default: None.
R
ruri 已提交
1852
       fixed_ratios(list|tuple|None): The fixed ratios of generated density
R
ruri 已提交
1853 1854 1855
            prior boxes, if this attribute is not set and :attr:`densities`
            and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
            to generate density prior boxes.
R
ruri 已提交
1856
       variance(list|tuple): The variances to be encoded in density prior boxes.
R
ruri 已提交
1857
            Default:[0.1, 0.1, 0.2, 0.2].
R
ruri 已提交
1858
       clip(bool): Whether to clip out of boundary boxes. Default: False.
翟飞跃 已提交
1859
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1860 1861
            step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
            height or weight of the input will be automatically calculated.
R
ruri 已提交
1862 1863
            Default: [0., 0.]
       offset(float): Prior boxes center offset. Default: 0.5
1864 1865
       flatten_to_2d(bool): Whether to flatten output prior boxes and variance
           to 2D shape, the second dim is 4. Default: False.
R
ruri 已提交
1866 1867
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    
R
ruri 已提交
1868
    Returns:
R
ruri 已提交
1869
        Tuple: A tuple with two Variable (boxes, variances)
R
ruri 已提交
1870 1871

        boxes: the output density prior boxes of PriorBox.
R
ruri 已提交
1872 1873 1874
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1875 1876

        variances: the expanded variances of PriorBox.
R
ruri 已提交
1877 1878 1879
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1880 1881 1882


    Examples:
R
ruri 已提交
1883

R
ruri 已提交
1884 1885
        .. code-block:: python

R
ruri 已提交
1886
            #declarative mode
R
ruri 已提交
1887

R
ruri 已提交
1888 1889
            import paddle.fluid as fluid
            import numpy as np
R
ruri 已提交
1890

R
ruri 已提交
1891 1892 1893
            input = fluid.data(name="input", shape=[None,3,6,9])
            image = fluid.data(name="image", shape=[None,3,9,12])
            box, var = fluid.layers.density_prior_box(
R
ruri 已提交
1894 1895 1896 1897 1898 1899 1900 1901
                 input=input,
                 image=image,
                 densities=[4, 2, 1],
                 fixed_sizes=[32.0, 64.0, 128.0],
                 fixed_ratios=[1.],
                 clip=True,
                 flatten_to_2d=True)

R
ruri 已提交
1902 1903 1904
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
R
ruri 已提交
1905
 
R
ruri 已提交
1906 1907 1908 1909 1910 1911
            # prepare a batch of data
            input_data = np.random.rand(1,3,6,9).astype("float32")
            image_data = np.random.rand(1,3,9,12).astype("float32")

            box_out, var_out = exe.run(
                fluid.default_main_program(),
R
ruri 已提交
1912
                feed={"input":input_data,
R
ruri 已提交
1913
                      "image":image_data},
R
ruri 已提交
1914 1915 1916
                fetch_list=[box,var],
                return_numpy=True)

R
ruri 已提交
1917 1918 1919 1920
            # print(box_out.shape)
            # (1134, 4)
            # print(var_out.shape)
            # (1134, 4)
R
ruri 已提交
1921 1922


R
ruri 已提交
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
            #imperative mode
            import paddle.fluid.dygraph as dg

            with dg.guard(place) as g:
                input = dg.to_variable(input_data)
                image = dg.to_variable(image_data)
                box, var = fluid.layers.density_prior_box(
                    input=input,
                    image=image,
                    densities=[4, 2, 1],
                    fixed_sizes=[32.0, 64.0, 128.0],
                    fixed_ratios=[1.],
                    clip=True)

                # print(box.shape)
                # [6L, 9L, 21L, 4L]
                # print(var.shape)
                # [6L, 9L, 21L, 4L]
R
ruri 已提交
1941

R
ruri 已提交
1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
    """
    helper = LayerHelper("density_prior_box", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(densities):
        raise TypeError('densities should be a list or a tuple or None.')
    if not _is_list_or_tuple_(fixed_sizes):
        raise TypeError('fixed_sizes should be a list or a tuple or None.')
    if not _is_list_or_tuple_(fixed_ratios):
        raise TypeError('fixed_ratios should be a list or a tuple or None.')
    if len(densities) != len(fixed_sizes):
        raise ValueError('densities and fixed_sizes length should be euqal.')
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    densities = list(map(int, densities))
    fixed_sizes = list(map(float, fixed_sizes))
    fixed_ratios = list(map(float, fixed_ratios))
    steps = list(map(float, steps))

    attrs = {
        'variances': variance,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
        'offset': offset,
1972 1973 1974 1975
        'densities': densities,
        'fixed_sizes': fixed_sizes,
        'fixed_ratios': fixed_ratios,
        'flatten_to_2d': flatten_to_2d,
R
ruri 已提交
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
    }
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="density_prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


C
chengduoZH 已提交
1991
def multi_box_head(inputs,
C
chengduoZH 已提交
1992 1993
                   image,
                   base_size,
C
chengduoZH 已提交
1994
                   num_classes,
C
chengduoZH 已提交
1995
                   aspect_ratios,
1996 1997
                   min_ratio=None,
                   max_ratio=None,
C
chengduoZH 已提交
1998 1999
                   min_sizes=None,
                   max_sizes=None,
C
chengduoZH 已提交
2000 2001 2002 2003
                   steps=None,
                   step_w=None,
                   step_h=None,
                   offset=0.5,
2004 2005
                   variance=[0.1, 0.1, 0.2, 0.2],
                   flip=True,
C
chengduoZH 已提交
2006
                   clip=False,
C
chengduoZH 已提交
2007
                   kernel_size=1,
C
chengduoZH 已提交
2008
                   pad=0,
C
chengduoZH 已提交
2009
                   stride=1,
2010 2011
                   name=None,
                   min_max_aspect_ratios_order=False):
C
chengduoZH 已提交
2012
    """
Q
qingqing01 已提交
2013 2014 2015 2016
    Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
    regression location and classification confidence on multiple input feature
    maps, then output the concatenate results. The details of this algorithm,
    please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
C
chengduoZH 已提交
2017
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
2018 2019

    Args:
Q
qingqing01 已提交
2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
       inputs (list(Variable)|tuple(Variable)): The list of input variables,
           the format of all Variables are 4-D Tensor, layout is NCHW.
           Data type should be float32 or float64.
       image (Variable): The input image, layout is NCHW. Data type should be
           the same as inputs.
       base_size(int): the base_size is input image size. When len(inputs) > 2
           and `min_size` and `max_size` are None, the `min_size` and `max_size`
           are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
           formula is as follows:

              ..  code-block:: text

                  min_sizes = []
                  max_sizes = []
                  step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
                  for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
                      min_sizes.append(base_size * ratio / 100.)
                      max_sizes.append(base_size * (ratio + step) / 100.)
                      min_sizes = [base_size * .10] + min_sizes
                      max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2041
       num_classes(int): The number of classes.
Q
qingqing01 已提交
2042 2043
       aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
           prior boxes. The length of input and aspect_ratios must be equal.
C
chengduoZH 已提交
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       steps(list|tuple): If step_w and step_h are the same,
            step_w and step_h can be replaced by steps.
       step_w(list|tuple): Prior boxes step
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically
            calculated. Default: None.
       step_h(list|tuple): Prior boxes step across height, If
            step_h[i] == 0.0, the prior boxes step across height of
            the inputs[i] will be automatically calculated. Default: None.
       offset(float): Prior boxes center offset. Default: 0.5
       variance(list|tuple): the variances to be encoded in prior boxes.
2063
            Default:[0.1, 0.1, 0.2, 0.2].
C
chengduoZH 已提交
2064 2065 2066 2067 2068
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
       kernel_size(int): The kernel size of conv2d. Default: 1.
       pad(int|list|tuple): The padding of conv2d. Default:0.
       stride(int|list|tuple): The stride of conv2d. Default:1,
Q
qingqing01 已提交
2069 2070 2071
       name(str): The default value is None.  Normally there is no need
           for user to set this property.  For more information, please
           refer to :ref:`api_guide_Name`.
2072
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
2073
            in order of [min, max, aspect_ratios], which is consistent with
2074
            Caffe. Please note, this order affects the weights order of
T
tianshuo78520a 已提交
2075
            convolution layer followed by and does not affect the final
2076
            detection results. Default: False.
C
chengduoZH 已提交
2077 2078

    Returns:
Q
update  
qiaolongfei 已提交
2079 2080
        tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)

Q
qingqing01 已提交
2081 2082 2083
        mbox_loc (Variable): The predicted boxes' location of the inputs. The
        layout is [N, num_priors, 4], where N is batch size, ``num_priors``
        is the number of prior boxes. Data type is the same as input.
Q
update  
qiaolongfei 已提交
2084

Q
qingqing01 已提交
2085 2086 2087 2088
        mbox_conf (Variable): The predicted boxes' confidence of the inputs.
        The layout is [N, num_priors, C], where ``N`` and ``num_priors`` 
        has the same meaning as above. C is the number of Classes.
        Data type is the same as input.
Q
update  
qiaolongfei 已提交
2089

Q
qingqing01 已提交
2090 2091 2092
        boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
        The meaning of num_priors is the same as above.
        Data type is the same as input.
C
chengduoZH 已提交
2093

Q
qingqing01 已提交
2094 2095
        variances (Variable): the expanded variances for prior boxes.
        The layout is [num_priors, 4]. Data type is the same as input.
C
chengduoZH 已提交
2096

Q
qingqing01 已提交
2097
    Examples 1: set min_ratio and max_ratio:
C
chengduoZH 已提交
2098
        .. code-block:: python
Q
update  
qiaolongfei 已提交
2099

2100 2101
          import paddle.fluid as fluid

Q
qingqing01 已提交
2102 2103 2104 2105 2106 2107 2108
          images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
2109

Q
update  
qiaolongfei 已提交
2110
          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
2111
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
C
chengduoZH 已提交
2112 2113 2114 2115 2116 2117 2118 2119 2120
            image=images,
            num_classes=21,
            min_ratio=20,
            max_ratio=90,
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)
Q
qingqing01 已提交
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146

    Examples 2: set min_sizes and max_sizes:
        .. code-block:: python

          import paddle.fluid as fluid

          images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')

          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
            image=images,
            num_classes=21,
            min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
            max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)

C
chengduoZH 已提交
2147 2148
    """

C
chengduoZH 已提交
2149
    def _reshape_with_axis_(input, axis=1):
2150
        out = nn.flatten(x=input, axis=axis)
C
chengduoZH 已提交
2151
        return out
2152

2153 2154
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))
2155

C
chengduoZH 已提交
2156 2157 2158 2159
    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

2160 2161
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
2162

C
chengduoZH 已提交
2163 2164 2165 2166 2167
    num_layer = len(inputs)

    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
2168
    elif min_sizes is None and max_sizes is None:
C
chengduoZH 已提交
2169 2170 2171
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
M
minqiyang 已提交
2172
        for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
C
chengduoZH 已提交
2173 2174 2175 2176 2177
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2178 2179 2180 2181 2182
    if aspect_ratios:
        _is_list_or_tuple_and_equal(
            aspect_ratios, num_layer,
            'aspect_ratios should be list or tuple, and the length of inputs '
            'and aspect_ratios should be the same.')
Z
zhongpu 已提交
2183
    if step_h is not None:
C
chengduoZH 已提交
2184 2185 2186 2187
        _is_list_or_tuple_and_equal(
            step_h, num_layer,
            'step_h should be list or tuple, and the length of inputs and '
            'step_h should be the same.')
Z
zhongpu 已提交
2188
    if step_w is not None:
C
chengduoZH 已提交
2189 2190 2191 2192
        _is_list_or_tuple_and_equal(
            step_w, num_layer,
            'step_w should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
Z
zhongpu 已提交
2193
    if steps is not None:
C
chengduoZH 已提交
2194 2195 2196 2197 2198 2199 2200
        _is_list_or_tuple_and_equal(
            steps, num_layer,
            'steps should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
        step_w = steps
        step_h = steps

C
chengduoZH 已提交
2201 2202
    mbox_locs = []
    mbox_confs = []
C
chengduoZH 已提交
2203 2204
    box_results = []
    var_results = []
C
chengduoZH 已提交
2205 2206
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
C
chengduoZH 已提交
2207 2208
        max_size = max_sizes[i]

2209
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
2210
            min_size = [min_size]
C
chengduoZH 已提交
2211 2212
        if not _is_list_or_tuple_(max_size):
            max_size = [max_size]
C
chengduoZH 已提交
2213 2214 2215 2216

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
2217
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
2218
                aspect_ratio = [aspect_ratio]
2219
        step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
C
chengduoZH 已提交
2220

2221
        box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
2222 2223
                             variance, flip, clip, step, offset, None,
                             min_max_aspect_ratios_order)
C
chengduoZH 已提交
2224 2225 2226 2227 2228

        box_results.append(box)
        var_results.append(var)

        num_boxes = box.shape[2]
C
chengduoZH 已提交
2229

2230
        # get loc
Y
Yuan Gao 已提交
2231
        num_loc_output = num_boxes * 4
2232
        mbox_loc = nn.conv2d(
C
chengduoZH 已提交
2233
            input=input,
2234 2235 2236 2237 2238
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)

2239
        mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
2240
        mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
Y
Yuan Gao 已提交
2241
        mbox_locs.append(mbox_loc_flatten)
C
chengduoZH 已提交
2242

2243
        # get conf
C
chengduoZH 已提交
2244
        num_conf_output = num_boxes * num_classes
2245
        conf_loc = nn.conv2d(
C
chengduoZH 已提交
2246
            input=input,
2247 2248 2249 2250
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)
2251
        conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
2252
        conf_loc_flatten = nn.flatten(conf_loc, axis=1)
Y
Yuan Gao 已提交
2253
        mbox_confs.append(conf_loc_flatten)
C
chengduoZH 已提交
2254

C
chengduoZH 已提交
2255 2256 2257
    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
Y
Yuan Gao 已提交
2258 2259
        mbox_locs_concat = mbox_locs[0]
        mbox_confs_concat = mbox_confs[0]
C
chengduoZH 已提交
2260 2261 2262 2263 2264 2265 2266 2267 2268
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))

        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
Y
Yuan Gao 已提交
2269
        mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
2270
        mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
Y
Yuan Gao 已提交
2271
        mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
2272 2273
        mbox_confs_concat = nn.reshape(
            mbox_confs_concat, shape=[0, -1, num_classes])
C
chengduoZH 已提交
2274

2275 2276
    box.stop_gradient = True
    var.stop_gradient = True
Y
Yuan Gao 已提交
2277
    return mbox_locs_concat, mbox_confs_concat, box, var
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295


def anchor_generator(input,
                     anchor_sizes=None,
                     aspect_ratios=None,
                     variance=[0.1, 0.1, 0.2, 0.2],
                     stride=None,
                     offset=0.5,
                     name=None):
    """
    **Anchor generator operator**

    Generate anchors for Faster RCNN algorithm.
    Each position of the input produce N anchors, N =
    size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
    is firstly aspect_ratios loop then anchor_sizes loop.

    Args:
W
wangguanzhong 已提交
2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
       input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
       anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
          anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
          For instance, the anchor size of 64 means the area of this anchor 
          equals to 64**2. None by default.
       aspect_ratios(float32|list|tuple, optional): The height / width ratios 
           of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
       variance(list|tuple, optional): The variances to be used in box 
           regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by 
           default.
       stride(list|tuple, optional): The anchors stride across width and height.
           The data type is float32. e.g. [16.0, 16.0]. None by default.
       offset(float32, optional): Prior boxes center offset. 0.5 by default.
       name(str, optional): For detailed information, please refer 
           to :ref:`api_guide_Name`. Usually name is no need to set and None 
           by default. 
2312 2313

    Returns:
W
wangguanzhong 已提交
2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
        Tuple:

        Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
        H is the height of input, W is the width of input,
        num_anchors is the box count of each position. 
        Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
 
        Variances(Variable): The expanded variances of anchors
        with a layout of [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_anchors is the box count of each position.
        Each variance is in (xcenter, ycenter, w, h) format.
2326 2327 2328 2329 2330 2331


    Examples:

        .. code-block:: python

2332
            import paddle.fluid as fluid
2333
            conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
J
jerrywgz 已提交
2334
            anchor, var = fluid.layers.anchor_generator(
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
                input=conv1,
                anchor_sizes=[64, 128, 256, 512],
                aspect_ratios=[0.5, 1.0, 2.0],
                variance=[0.1, 0.1, 0.2, 0.2],
                stride=[16.0, 16.0],
                offset=0.5)
    """
    helper = LayerHelper("anchor_generator", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(anchor_sizes):
        anchor_sizes = [anchor_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(stride) and len(stride) == 2):
        raise ValueError('stride should be a list or tuple ',
                         'with length 2, (stride_width, stride_height).')

    anchor_sizes = list(map(float, anchor_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    stride = list(map(float, stride))

    attrs = {
        'anchor_sizes': anchor_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'stride': stride,
        'offset': offset
    }

X
Xin Pan 已提交
2368 2369
    anchor = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
2370 2371 2372 2373 2374 2375 2376 2377 2378
    helper.append_op(
        type="anchor_generator",
        inputs={"Input": input},
        outputs={"Anchors": anchor,
                 "Variances": var},
        attrs=attrs, )
    anchor.stop_gradient = True
    var.stop_gradient = True
    return anchor, var
2379 2380


W
whs 已提交
2381 2382 2383 2384
def roi_perspective_transform(input,
                              rois,
                              transformed_height,
                              transformed_width,
S
SunGaofeng 已提交
2385 2386
                              spatial_scale=1.0,
                              name=None):
W
whs 已提交
2387
    """
S
SunGaofeng 已提交
2388
    **The** `rois` **of this op should be a LoDTensor.**
W
whs 已提交
2389

S
SunGaofeng 已提交
2390 2391 2392 2393 2394
    ROI perspective transform op applies perspective transform to map each roi into an 
    rectangular region. Perspective transform is a type of transformation in linear algebra.

    Parameters:
        input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of 
W
whs 已提交
2395 2396
                          input tensor is NCHW. Where N is batch size, C is the
                          number of input channels, H is the height of the feature,
S
SunGaofeng 已提交
2397 2398 2399
                          and W is the width of the feature. The data type is float32.
        rois (Variable):  2-D LoDTensor, ROIs (Regions of Interest) to be transformed. 
                          It should be a 2-D LoDTensor of shape (num_rois, 8). Given as 
W
whs 已提交
2400 2401 2402
                          [[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the 
                          top left coordinates, and (x2, y2) is the top right 
                          coordinates, and (x3, y3) is the bottom right coordinates, 
S
SunGaofeng 已提交
2403 2404 2405 2406
                          and (x4, y4) is the bottom left coordinates. The data type is the
                          same as `input` 
        transformed_height (int): The height of transformed output.
        transformed_width (int): The width of transformed output.
W
whs 已提交
2407
        spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
S
SunGaofeng 已提交
2408 2409 2410
        name(str, optional): The default value is None.  
                             Normally there is no need for user to set this property.  
                             For more information, please refer to :ref:`api_guide_Name`
W
whs 已提交
2411 2412

    Returns:
S
SunGaofeng 已提交
2413
            A tuple with three Variables. (out, mask, transform_matrix)
2414 2415

            out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2416
            (num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
2417 2418

            mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2419
            (num_rois, 1, transformed_h, transformed_w). The data type is int32
2420 2421

            transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
S
SunGaofeng 已提交
2422 2423 2424 2425
            a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`

    Return Type:
        tuple
W
whs 已提交
2426 2427 2428 2429

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
2430
            import paddle.fluid as fluid
2431

S
SunGaofeng 已提交
2432 2433
            x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
2434
            out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
W
whs 已提交
2435
    """
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
    check_variable_and_dtype(input, 'input', ['float32'],
                             'roi_perspective_transform')
    check_variable_and_dtype(rois, 'rois', ['float32'],
                             'roi_perspective_transform')
    check_type(transformed_height, 'transformed_height', int,
               'roi_perspective_transform')
    check_type(transformed_width, 'transformed_width', int,
               'roi_perspective_transform')
    check_type(spatial_scale, 'spatial_scale', float,
               'roi_perspective_transform')

W
whs 已提交
2447 2448
    helper = LayerHelper('roi_perspective_transform', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2449
    out = helper.create_variable_for_type_inference(dtype)
2450 2451
    mask = helper.create_variable_for_type_inference(dtype="int32")
    transform_matrix = helper.create_variable_for_type_inference(dtype)
2452 2453
    out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
    out2in_w = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
2454 2455 2456 2457
    helper.append_op(
        type="roi_perspective_transform",
        inputs={"X": input,
                "ROIs": rois},
2458 2459 2460
        outputs={
            "Out": out,
            "Out2InIdx": out2in_idx,
2461 2462 2463
            "Out2InWeights": out2in_w,
            "Mask": mask,
            "TransformMatrix": transform_matrix
2464
        },
W
whs 已提交
2465 2466 2467 2468 2469
        attrs={
            "transformed_height": transformed_height,
            "transformed_width": transformed_width,
            "spatial_scale": spatial_scale
        })
2470
    return out, mask, transform_matrix
W
whs 已提交
2471 2472


2473 2474
def generate_proposal_labels(rpn_rois,
                             gt_classes,
2475
                             is_crowd,
2476
                             gt_boxes,
2477
                             im_info,
2478 2479 2480 2481 2482 2483
                             batch_size_per_im=256,
                             fg_fraction=0.25,
                             fg_thresh=0.25,
                             bg_thresh_hi=0.5,
                             bg_thresh_lo=0.0,
                             bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
2484
                             class_nums=None,
2485 2486 2487
                             use_random=True,
                             is_cls_agnostic=False,
                             is_cascade_rcnn=False):
2488
    """
2489
    **Generate Proposal Labels of Faster-RCNN**
2490

B
buxingyuan 已提交
2491
    This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
2492
    to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
2493 2494 2495

    RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
    were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
2496
    If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
2497 2498
    If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
    then it was considered as a background sample.
B
buxingyuan 已提交
2499
    After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
2500
    then we apply random sampling to make sure
B
buxingyuan 已提交
2501
    the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
2502 2503 2504 2505 2506

    For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
    Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.

    Args:
2507 2508 2509
        rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
        is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
B
buxingyuan 已提交
2510 2511 2512
        gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
        im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.

2513 2514 2515 2516 2517 2518 2519
        batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
        fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
        fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
        bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
        bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
        bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
        class_nums(int): Class number. The data type must be int32.
B
buxingyuan 已提交
2520
        use_random(bool): Use random sampling to choose foreground and background boxes.
2521 2522
        is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
        is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
B
Bai Yifan 已提交
2523

2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
    Returns:
        tuple:
        A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.

        - **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
        - **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
        - **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
        - **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
        - **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.


B
Bai Yifan 已提交
2535 2536 2537 2538
    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
2539 2540 2541 2542 2543
            rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
            gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
2544
            rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
B
Bai Yifan 已提交
2545 2546 2547
                           rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
                           class_nums=10)

2548 2549 2550 2551
    """

    helper = LayerHelper('generate_proposal_labels', **locals())

X
Xin Pan 已提交
2552 2553 2554 2555 2556 2557 2558 2559 2560
    rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
    labels_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    bbox_targets = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_inside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_outside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
2561 2562 2563 2564 2565 2566

    helper.append_op(
        type="generate_proposal_labels",
        inputs={
            'RpnRois': rpn_rois,
            'GtClasses': gt_classes,
2567
            'IsCrowd': is_crowd,
2568
            'GtBoxes': gt_boxes,
2569
            'ImInfo': im_info
2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
        },
        outputs={
            'Rois': rois,
            'LabelsInt32': labels_int32,
            'BboxTargets': bbox_targets,
            'BboxInsideWeights': bbox_inside_weights,
            'BboxOutsideWeights': bbox_outside_weights
        },
        attrs={
            'batch_size_per_im': batch_size_per_im,
            'fg_fraction': fg_fraction,
            'fg_thresh': fg_thresh,
            'bg_thresh_hi': bg_thresh_hi,
            'bg_thresh_lo': bg_thresh_lo,
            'bbox_reg_weights': bbox_reg_weights,
2585
            'class_nums': class_nums,
2586 2587 2588
            'use_random': use_random,
            'is_cls_agnostic': is_cls_agnostic,
            'is_cascade_rcnn': is_cascade_rcnn
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
        })

    rois.stop_gradient = True
    labels_int32.stop_gradient = True
    bbox_targets.stop_gradient = True
    bbox_inside_weights.stop_gradient = True
    bbox_outside_weights.stop_gradient = True

    return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights


2600 2601 2602
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
                         labels_int32, num_classes, resolution):
    """
Q
qingqing01 已提交
2603
    **Generate Mask Labels for Mask-RCNN**
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638

    This operator can be, for given the RoIs and corresponding labels,
    to sample foreground RoIs. This mask branch also has
    a :math: `K \\times M^{2}` dimensional output targets for each foreground
    RoI, which encodes K binary masks of resolution M x M, one for each of the
    K classes. This mask targets are used to compute loss of mask branch.

    Please note, the data format of groud-truth segmentation, assumed the
    segmentations are as follows. The first instance has two gt objects.
    The second instance has one gt object, this object has two gt segmentations.

        .. code-block:: python

            #[
            #  [[[229.14, 370.9, 229.14, 370.9, ...]],
            #   [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
            #  [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
            #]

            batch_masks = []
            for semgs in batch_semgs:
                gt_masks = []
                for semg in semgs:
                    gt_segm = []
                    for polys in semg:
                        gt_segm.append(np.array(polys).reshape(-1, 2))
                    gt_masks.append(gt_segm)
                batch_masks.append(gt_masks)
            
            
            place = fluid.CPUPlace()
            feeder = fluid.DataFeeder(place=place, feed_list=feeds)
            feeder.feed(batch_masks)

    Args:
Q
qingqing01 已提交
2639 2640 2641 2642 2643 2644
        im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
            data type. N is the batch size, each element is
            [height, width, scale] of image. Image scale is
            target_size / original_size, target_size is the size after resize,
            original_size is the original image size.
        gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
T
tianshuo78520a 已提交
2645
            should be int. M is the total number of ground-truth, each
Q
qingqing01 已提交
2646 2647 2648 2649 2650 2651 2652
            element is a class label.
        is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
            as gt_classes, each element is a flag indicating whether a
            groundtruth is crowd.
        gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
            float32 data type, it's LoD level is 3.
            Usually users do not needs to understand LoD,
2653
            The users should return correct data format in reader.
Q
qingqing01 已提交
2654
            The LoD[0] represents the ground-truth objects number of
2655 2656 2657 2658
            each instance. LoD[1] represents the segmentation counts of each
            objects. LoD[2] represents the polygons number of each segmentation.
            S the total number of polygons coordinate points. Each element is
            (x, y) coordinate points.
Q
qingqing01 已提交
2659 2660 2661 2662
        rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
            float32. R is the total number of RoIs, each element is a bounding
            box with (xmin, ymin, xmax, ymax) format in the range of original image.
        labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
T
tianshuo78520a 已提交
2663
            of int32. R is the same as it in `rois`. Each element represents
2664
            a class label of a RoI.
Q
qingqing01 已提交
2665 2666
        num_classes (int): Class number.
        resolution (int): Resolution of mask predictions.
2667 2668

    Returns:
Q
qingqing01 已提交
2669 2670 2671
        mask_rois (Variable):  A 2D LoDTensor with shape [P, 4] and same data
        type as `rois`. P is the total number of sampled RoIs. Each element
        is a bounding box with [xmin, ymin, xmax, ymax] format in range of
T
tianshuo78520a 已提交
2672
        original image size.
Q
qingqing01 已提交
2673 2674

        mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
T
tianshuo78520a 已提交
2675
        and int data type, each element represents the output mask RoI
Q
qingqing01 已提交
2676 2677 2678 2679
        index with regard to input RoIs.

        mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
        data type, K is the classes number and M is the resolution of mask
T
tianshuo78520a 已提交
2680
        predictions. Each element represents the binary mask targets.
2681 2682 2683 2684

    Examples:
        .. code-block:: python

2685 2686
          import paddle.fluid as fluid

Q
qingqing01 已提交
2687
          im_info = fluid.data(name="im_info", shape=[None, 3],
2688
              dtype="float32")
Q
qingqing01 已提交
2689
          gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
2690
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2691
          is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
2692
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2693
          gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
2694
              dtype="float32", lod_level=3)
2695
          # rois, roi_labels can be the output of
2696
          # fluid.layers.generate_proposal_labels.
Q
qingqing01 已提交
2697
          rois = fluid.data(name="rois", shape=[None, 4],
2698
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2699
          roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
2700
              dtype="int32", lod_level=1)
2701 2702 2703 2704 2705 2706
          mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
              im_info=im_info,
              gt_classes=gt_classes,
              is_crowd=is_crowd,
              gt_segms=gt_masks,
              rois=rois,
2707
              labels_int32=roi_labels,
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
              num_classes=81,
              resolution=14)
    """

    helper = LayerHelper('generate_mask_labels', **locals())

    mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
    roi_has_mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)

    helper.append_op(
        type="generate_mask_labels",
        inputs={
            'ImInfo': im_info,
            'GtClasses': gt_classes,
            'IsCrowd': is_crowd,
            'GtSegms': gt_segms,
            'Rois': rois,
            'LabelsInt32': labels_int32
        },
        outputs={
            'MaskRois': mask_rois,
            'RoiHasMaskInt32': roi_has_mask_int32,
            'MaskInt32': mask_int32
        },
        attrs={'num_classes': num_classes,
               'resolution': resolution})

    mask_rois.stop_gradient = True
    roi_has_mask_int32.stop_gradient = True
    mask_int32.stop_gradient = True

    return mask_rois, roi_has_mask_int32, mask_int32


2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
def generate_proposals(scores,
                       bbox_deltas,
                       im_info,
                       anchors,
                       variances,
                       pre_nms_top_n=6000,
                       post_nms_top_n=1000,
                       nms_thresh=0.5,
                       min_size=0.1,
                       eta=1.0,
F
FDInSky 已提交
2755 2756
                       name=None,
                       return_rois_num=False):
2757
    """
H
haowang101779990 已提交
2758 2759
    **Generate proposal Faster-RCNN**

2760 2761 2762 2763
    This operation proposes RoIs according to each box with their
    probability to be a foreground object and 
    the box can be calculated by anchors. Bbox_deltais and scores
    to be an object are the output of RPN. Final proposals
H
haowang101779990 已提交
2764 2765 2766 2767
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

2768 2769
    1. Transposes and resizes scores and bbox_deltas in size of
       (H*W*A, 1) and (H*W*A, 4)
H
haowang101779990 已提交
2770 2771 2772 2773 2774 2775
    2. Calculate box locations as proposals candidates. 
    3. Clip boxes to image
    4. Remove predicted boxes with small area. 
    5. Apply NMS to get final proposals as output.

    Args:
2776 2777 2778
        scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
2779
            width of the feature map. The data type must be float32.
2780
        bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
T
tianshuo78520a 已提交
2781
            represents the difference between predicted box location and
2782
            anchor location. The data type must be float32.
2783
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
2784 2785
            image information for N batch. Height and width are the input sizes 
            and scale is the ratio of network input size and original size. 
2786
            The data type must be int32.
2787 2788 2789
        anchors(Variable):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
2790 2791
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
2792
            [H, W, num_priors, 4]. Each variance is in
2793
            (xcenter, ycenter, w, h) format. The data type must be float32.
2794
        pre_nms_top_n(float): Number of total bboxes to be kept per
2795
            image before NMS. The data type must be float32. `6000` by default.
2796
        post_nms_top_n(float): Number of total bboxes to be kept per
2797 2798
            image after NMS. The data type must be float32. `1000` by default.
        nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
2799
        min_size(float): Remove predicted boxes with either height or
2800 2801 2802
            width < min_size. The data type must be float32. `0.1` by default.
        eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration.
F
FDInSky 已提交
2803 2804 2805 2806
        return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's 
            num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
            the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model. 
            'False' by default. 
2807 2808 2809 2810 2811 2812
    Returns:
        tuple:
        A tuple with format ``(rpn_rois, rpn_roi_probs)``.

        - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
B
Bai Yifan 已提交
2813 2814 2815 2816 2817

    Examples:
        .. code-block:: python
        
            import paddle.fluid as fluid
2818 2819 2820 2821 2822
            scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
            bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
            anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
            variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
B
Bai Yifan 已提交
2823 2824 2825
            rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
                         im_info, anchors, variances)

2826 2827 2828
    """
    helper = LayerHelper('generate_proposals', **locals())

X
Xin Pan 已提交
2829 2830 2831 2832
    rpn_rois = helper.create_variable_for_type_inference(
        dtype=bbox_deltas.dtype)
    rpn_roi_probs = helper.create_variable_for_type_inference(
        dtype=scores.dtype)
F
FDInSky 已提交
2833 2834
    rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')

2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
    helper.append_op(
        type="generate_proposals",
        inputs={
            'Scores': scores,
            'BboxDeltas': bbox_deltas,
            'ImInfo': im_info,
            'Anchors': anchors,
            'Variances': variances
        },
        attrs={
            'pre_nms_topN': pre_nms_top_n,
            'post_nms_topN': post_nms_top_n,
            'nms_thresh': nms_thresh,
            'min_size': min_size,
            'eta': eta
        },
F
FDInSky 已提交
2851 2852 2853 2854 2855
        outputs={
            'RpnRois': rpn_rois,
            'RpnRoiProbs': rpn_roi_probs,
            'RpnRoisLod': rpn_rois_lod
        })
2856 2857
    rpn_rois.stop_gradient = True
    rpn_roi_probs.stop_gradient = True
F
FDInSky 已提交
2858
    rpn_rois_lod.stop_gradient = True
2859

F
FDInSky 已提交
2860 2861 2862 2863
    if return_rois_num:
        return rpn_rois, rpn_roi_probs, rpn_rois_lod
    else:
        return rpn_rois, rpn_roi_probs
J
jerrywgz 已提交
2864 2865


J
jerrywgz 已提交
2866
def box_clip(input, im_info, name=None):
J
jerrywgz 已提交
2867 2868
    """
    Clip the box into the size given by im_info
J
jerrywgz 已提交
2869
    For each input box, The formula is given as follows:
2870 2871 2872
        
    .. code-block:: text

J
jerrywgz 已提交
2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
        xmin = max(min(xmin, im_w - 1), 0)
        ymin = max(min(ymin, im_h - 1), 0) 
        xmax = max(min(xmax, im_w - 1), 0)
        ymax = max(min(ymax, im_h - 1), 0)
    
    where im_w and im_h are computed from im_info:
 
    .. code-block:: text

        im_h = round(height / scale)
        im_w = round(weight / scale)
J
jerrywgz 已提交
2884 2885

    Args:
W
wangguanzhong 已提交
2886 2887 2888
        input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
            the last dimension is 4 and data type is float32 or float64.
        im_info(Variable): The 2-D Tensor with shape [N, 3] with layout 
T
tianshuo78520a 已提交
2889
            (height, width, scale) representing the information of image. 
2890
            Height and width are the input sizes and scale is the ratio of network input
W
wangguanzhong 已提交
2891 2892 2893 2894
            size and original size. The data type is float32 or float64.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
2895 2896
    
    Returns:
W
wangguanzhong 已提交
2897 2898
        Variable:

T
tianshuo78520a 已提交
2899
        output(Variable): The clipped tensor with data type float32 or float64. 
W
wangguanzhong 已提交
2900 2901
        The shape is same as input.

2902
        
J
jerrywgz 已提交
2903 2904
    Examples:
        .. code-block:: python
2905
        
2906
            import paddle.fluid as fluid
2907 2908 2909
            boxes = fluid.data(
                name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
            im_info = fluid.data(name='im_info', shape=[-1 ,3])
J
jerrywgz 已提交
2910
            out = fluid.layers.box_clip(
J
jerrywgz 已提交
2911
                input=boxes, im_info=im_info)
J
jerrywgz 已提交
2912 2913
    """

2914 2915 2916 2917
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'box_clip')

J
jerrywgz 已提交
2918
    helper = LayerHelper("box_clip", **locals())
J
jerrywgz 已提交
2919
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
2920
    inputs = {"Input": input, "ImInfo": im_info}
J
jerrywgz 已提交
2921
    helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
J
jerrywgz 已提交
2922

2923 2924
    return output

J
jerrywgz 已提交
2925

2926 2927 2928 2929 2930 2931 2932 2933
def retinanet_detection_output(bboxes,
                               scores,
                               anchors,
                               im_info,
                               score_threshold=0.05,
                               nms_top_k=1000,
                               keep_top_k=100,
                               nms_threshold=0.3,
2934
                               nms_eta=1.0):
2935
    """
2936
    **Detection Output Layer for the detector RetinaNet.**
2937

2938 2939 2940 2941
    In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many 
    `FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
    and location predictions, this OP is to get the detection results by
    performing following steps:
2942

2943 2944 2945
    1. For each FPN level, decode box predictions according to the anchor
       boxes from at most :attr:`nms_top_k` top-scoring predictions after
       thresholding detector confidence at :attr:`score_threshold`.
2946 2947 2948 2949
    2. Merge top predictions from all levels and apply multi-class non 
       maximum suppression (NMS) on them to get the final detections.

    Args:
2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
        bboxes(List): A list of Tensors from multiple FPN levels represents
            the location prediction for all anchor boxes. Each element is
            a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
            batch size, :math:`Mi` is the number of bounding boxes from
            :math:`i`-th FPN level and each bounding box has four coordinate
            values and the layout is [xmin, ymin, xmax, ymax]. The data type
            of each element is float32 or float64.
        scores(List): A list of Tensors from multiple FPN levels represents
            the category prediction for all anchor boxes. Each element is a
            3-D Tensor with shape :math:`[N, Mi, C]`,  :math:`N` is the batch
            size, :math:`C` is the class number (**excluding background**),
            :math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
            level. The data type of each element is float32 or float64.
        anchors(List): A list of Tensors from multiple FPN levels represents
            the locations of all anchor boxes. Each element is a 2-D Tensor
            with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
            boxes from :math:`i`-th FPN level, and each bounding box has four
2967
            coordinate values and the layout is [xmin, ymin, xmax, ymax].
2968 2969 2970
            The data type of each element is float32 or float64.
        im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
2971
            information of each image is a 3-vector which are the height and width
2972 2973
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
2974
        score_threshold(float): Threshold to filter out bounding boxes
2975
            with a confidence score before NMS, default value is set to 0.05.
2976
        nms_top_k(int): Maximum number of detections per FPN layer to be
2977 2978
            kept according to the confidences before NMS, default value is set to
            1000.
2979
        keep_top_k(int): Number of total bounding boxes to be kept per image after
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997
            NMS step. Default value is set to 100, -1 means keeping all bounding
            boxes after NMS step.
        nms_threshold(float): The Intersection-over-Union(IoU) threshold used to 
            filter out boxes in NMS.
        nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
            Default value is set to 1., which represents the value of
            :attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
            to be lower than 1. and the value of :attr:`nms_threshold` is set to
            be higher than 0.5, everytime a bounding box is filtered out,
            the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
            = :attr:`nms_threshold` * :attr:`nms_eta`  will not be stopped until
            the actual value of :attr:`nms_threshold` is lower than or equal to
            0.5.

    **Notice**: In some cases where the image sizes are very small, it's possible
    that there is no detection if :attr:`score_threshold` are used at all
    levels. Hence, this OP do not filter out anchors from the highest FPN level
    before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
T
tianshuo78520a 已提交
2998
    :attr:`anchors` is required to be from the highest FPN level.
2999 3000

    Returns:
3001 3002
        Variable(The data type is float32 or float64):
            The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
3003
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
3004 3005 3006
            :math:`No` is the total number of detections in this mini-batch.
            The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
            results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
3007 3008 3009 3010 3011 3012
            has no detected results. If all images have no detected results,
            LoD will be set to 0, and the output tensor is empty (None).

    Examples:
        .. code-block:: python

3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
           import paddle.fluid as fluid

           bboxes_low = fluid.data(
               name='bboxes_low', shape=[1, 44, 4], dtype='float32')
           bboxes_high = fluid.data(
               name='bboxes_high', shape=[1, 11, 4], dtype='float32')
           scores_low = fluid.data(
               name='scores_low', shape=[1, 44, 10], dtype='float32')
           scores_high = fluid.data(
               name='scores_high', shape=[1, 11, 10], dtype='float32')
           anchors_low = fluid.data(
               name='anchors_low', shape=[44, 4], dtype='float32')
           anchors_high = fluid.data(
               name='anchors_high', shape=[11, 4], dtype='float32')
           im_info = fluid.data(
               name="im_info", shape=[1, 3], dtype='float32')
           nmsed_outs = fluid.layers.retinanet_detection_output(
3030 3031 3032 3033 3034 3035 3036 3037 3038
               bboxes=[bboxes_low, bboxes_high],
               scores=[scores_low, scores_high],
               anchors=[anchors_low, anchors_high],
               im_info=im_info,
               score_threshold=0.05,
               nms_top_k=1000,
               keep_top_k=100,
               nms_threshold=0.45,
               nms_eta=1.0)
3039 3040
    """

3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
    check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
    for i, bbox in enumerate(bboxes):
        check_variable_and_dtype(bbox, 'bbox{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_type(scores, 'scores', (list), 'retinanet_detection_output')
    for i, score in enumerate(scores):
        check_variable_and_dtype(score, 'score{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
    for i, anchor in enumerate(anchors):
        check_variable_and_dtype(anchor, 'anchor{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'retinanet_detection_output')

3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081
    helper = LayerHelper('retinanet_detection_output', **locals())
    output = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('scores'))
    helper.append_op(
        type="retinanet_detection_output",
        inputs={
            'BBoxes': bboxes,
            'Scores': scores,
            'Anchors': anchors,
            'ImInfo': im_info
        },
        attrs={
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'keep_top_k': keep_top_k,
            'nms_eta': 1.,
        },
        outputs={'Out': output})
    output.stop_gradient = True
    return output


J
jerrywgz 已提交
3082 3083 3084 3085 3086
def multiclass_nms(bboxes,
                   scores,
                   score_threshold,
                   nms_top_k,
                   keep_top_k,
J
jerrywgz 已提交
3087
                   nms_threshold=0.3,
J
jerrywgz 已提交
3088 3089
                   normalized=True,
                   nms_eta=1.,
3090 3091
                   background_label=0,
                   name=None):
J
jerrywgz 已提交
3092
    """
3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106
    **Multiclass NMS**
    
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120
    See below for an example:

    .. code-block:: text

        if:
            box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
            box1.scores = (0.7, 0.2, 0.4)  which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)

            box2.data = (3.0, 4.0, 8.0, 5.0)
            box2.score = (0.3, 0.3, 0.1)

            nms_threshold = 0.3
            background_label = 0
            score_threshold = 0
3121

3122 3123 3124 3125 3126 3127 3128

        Then:
            iou = 4/11 > 0.3
            out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],    
                         [2, 0.4, 2.0, 3.0, 7.0, 5.0]]
                         
            Out format is (label, confidence, xmin, ymin, xmax, ymax)
3129 3130 3131 3132 3133 3134 3135 3136
    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is 
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
X
xiaoting 已提交
3137
                           The data type is float32 or float64.
3138 3139
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
                           M is the number of bounding boxes, C is the 
X
xiaoting 已提交
3140
                           class number. The data type is float32 or float64.   
3141 3142 3143 3144 3145 3146 3147
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is 
                           number of bounding boxes. For each category there 
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
X
xiaoting 已提交
3148
                           of BBoxes.The data type is float32 or float64. 
3149 3150 3151
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
X
xiaoting 已提交
3152
                           case with shape [M, C, 4].The data type is float32 or float64. 
3153 3154 3155 3156 3157 3158 3159
        background_label (int): The index of background label, the background 
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided, 
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3160
                         the confidences after the filtering detections based
3161 3162 3163 3164 3165 3166 3167 3168 3169
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
X
xiaoting 已提交
3170
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
3171 3172 3173 3174 3175
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values: 
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the 
             total number of detections. If there is no detected boxes for all
J
jerrywgz 已提交
3176 3177 3178 3179
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed 
             from {0} to {1}) 
3180

3181

3182 3183 3184
    Examples:
        .. code-block:: python

3185

3186
            import paddle.fluid as fluid
X
xiaoting 已提交
3187
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
3188
                                      dtype='float32', lod_level=1)
X
xiaoting 已提交
3189
            scores = fluid.data(name='scores', shape=[None,81],
3190 3191 3192 3193 3194 3195 3196 3197 3198
                                      dtype='float32', lod_level=1)
            out = fluid.layers.multiclass_nms(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
J
jerrywgz 已提交
3199
    """
X
xiaoting 已提交
3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
    check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
                             'multiclass_nms')
    check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
                             'multiclass_nms')
    check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
    check_type(normalized, 'normalized', bool, 'multiclass_nms')
    check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
    check_type(background_label, 'background_label', int, 'multiclass_nms')

J
jerrywgz 已提交
3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
    helper = LayerHelper('multiclass_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    helper.append_op(
        type="multiclass_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True
J
jerrywgz 已提交
3229 3230

    return output
3231 3232


3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280
def locality_aware_nms(bboxes,
                       scores,
                       score_threshold,
                       nms_top_k,
                       keep_top_k,
                       nms_threshold=0.3,
                       normalized=True,
                       nms_eta=1.,
                       background_label=-1,
                       name=None):
    """
    **Local Aware NMS**
    
    `Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
    suppression (LANMS) on boxes and scores.

    Firstly, this operator merge box and score according their IOU
    (intersection over union). In the NMS step, this operator greedily selects a
    subset of detection bounding boxes that have high scores larger than score_threshold,
    if providing this threshold, then selects the largest nms_top_k confidences scores
    if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
    IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
    of nms_threshold and nms_eta.

    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
                           represents the predicted locations of M bounding
                           bboxes, N is the batch size. Each bounding box
                           has four coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
                           predicted confidence predictions. N is the batch
                           size, C is the class number, M is number of bounding
                           boxes. Now only support 1 class. For each category
                           there are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension of
                           BBoxes. The data type is float32 or float64.
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: -1
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided,
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3281
                         the confidences after the filtering detections based
3282 3283 3284
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
3285 3286
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
                          Default: None.

    Returns:
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values:
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
             total number of detections. If there is no detected boxes for all
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1}). The data type is float32 or float64.


    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
                                      dtype='float32')
            scores = fluid.data(name='scores', shape=[None, 1, 81],
                                      dtype='float32')
            out = fluid.layers.locality_aware_nms(bboxes=boxes,
                                              scores=scores,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
    """
3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332
    check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
                             'locality_aware_nms')
    check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
                             'locality_aware_nms')
    check_type(background_label, 'background_label', int, 'locality_aware_nms')
    check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
    check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
    check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
    check_type(normalized, 'normalized', bool, 'locality_aware_nms')

3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362
    shape = scores.shape
    assert len(shape) == 3, "dim size of scores must be 3"
    assert shape[
        1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"

    helper = LayerHelper('locality_aware_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    out = {'Out': output}

    helper.append_op(
        type="locality_aware_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True

    return output


3363 3364 3365 3366 3367 3368 3369
def distribute_fpn_proposals(fpn_rois,
                             min_level,
                             max_level,
                             refer_level,
                             refer_scale,
                             name=None):
    """
W
wangguanzhong 已提交
3370 3371 3372 3373 3374 3375
    **This op only takes LoDTensor as input.** In Feature Pyramid Networks 
    (FPN) models, it is needed to distribute all proposals into different FPN 
    level, with respect to scale of the proposals, the referring scale and the 
    referring level. Besides, to restore the order of proposals, we return an 
    array which indicates the original index of rois in current proposals. 
    To compute FPN level for each roi, the formula is given as follows:
3376
    
J
jerrywgz 已提交
3377
    .. math::
3378

J
jerrywgz 已提交
3379
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
3380

J
jerrywgz 已提交
3381 3382 3383
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)

    where BBoxArea is a function to compute the area of each roi.
3384 3385

    Args:
W
wangguanzhong 已提交
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397

        fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is 
            float32 or float64. The input fpn_rois.
        min_level(int32): The lowest level of FPN layer where the proposals come 
            from.
        max_level(int32): The highest level of FPN layer where the proposals
            come from.
        refer_level(int32): The referring level of FPN layer with specified scale.
        refer_scale(int32): The referring scale of FPN layer with specified level.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3398

3399
    Returns:
W
wangguanzhong 已提交
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409
        Tuple:

        multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4] 
        and data type of float32 and float64. The length is 
        max_level-min_level+1. The proposals in each FPN level.

        restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is 
        the number of total rois. The data type is int32. It is
        used to restore the order of fpn_rois.

3410 3411 3412 3413

    Examples:
        .. code-block:: python

3414
            import paddle.fluid as fluid
3415 3416
            fpn_rois = fluid.data(
                name='data', shape=[None, 4], dtype='float32', lod_level=1)
3417
            multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
3418 3419 3420
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
3421 3422 3423
                refer_level=4,
                refer_scale=224)
    """
3424 3425
    check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
                             'distribute_fpn_proposals')
3426
    helper = LayerHelper('distribute_fpn_proposals', **locals())
3427
    dtype = helper.input_dtype('fpn_rois')
3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444
    num_lvl = max_level - min_level + 1
    multi_rois = [
        helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
    ]
    restore_ind = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type='distribute_fpn_proposals',
        inputs={'FpnRois': fpn_rois},
        outputs={'MultiFpnRois': multi_rois,
                 'RestoreIndex': restore_ind},
        attrs={
            'min_level': min_level,
            'max_level': max_level,
            'refer_level': refer_level,
            'refer_scale': refer_scale
        })
    return multi_rois, restore_ind
3445 3446


3447
@templatedoc()
J
jerrywgz 已提交
3448 3449 3450 3451 3452 3453
def box_decoder_and_assign(prior_box,
                           prior_box_var,
                           target_box,
                           box_score,
                           box_clip,
                           name=None):
3454 3455 3456 3457 3458 3459 3460
    """
    ${comment}
    Args:
        prior_box(${prior_box_type}): ${prior_box_comment}
        prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
        target_box(${target_box_type}): ${target_box_comment}
        box_score(${box_score_type}): ${box_score_comment}
J
jerrywgz 已提交
3461
        box_clip(${box_clip_type}): ${box_clip_comment}
W
wangguanzhong 已提交
3462 3463 3464 3465
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

3466
    Returns:
W
wangguanzhong 已提交
3467
        Tuple:
J
jerrywgz 已提交
3468

W
wangguanzhong 已提交
3469 3470 3471
        decode_box(${decode_box_type}): ${decode_box_comment}

        output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
J
jerrywgz 已提交
3472 3473


3474 3475 3476
    Examples:
        .. code-block:: python

3477
            import paddle.fluid as fluid
3478 3479 3480 3481 3482 3483 3484 3485
            pb = fluid.data(
                name='prior_box', shape=[None, 4], dtype='float32')
            pbv = fluid.data(
                name='prior_box_var', shape=[4], dtype='float32')
            loc = fluid.data(
                name='target_box', shape=[None, 4*81], dtype='float32')
            scores = fluid.data(
                name='scores', shape=[None, 81], dtype='float32')
J
jerrywgz 已提交
3486
            decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
J
jerrywgz 已提交
3487
                pb, pbv, loc, scores, 4.135)
3488 3489

    """
3490 3491 3492 3493 3494 3495
    check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
                             'box_decoder_and_assign')
    check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
                             'box_decoder_and_assign')
    check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
                             'box_decoder_and_assign')
3496 3497
    helper = LayerHelper("box_decoder_and_assign", **locals())

J
jerrywgz 已提交
3498
    decoded_box = helper.create_variable_for_type_inference(
3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512
        dtype=prior_box.dtype)
    output_assign_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)

    helper.append_op(
        type="box_decoder_and_assign",
        inputs={
            "PriorBox": prior_box,
            "PriorBoxVar": prior_box_var,
            "TargetBox": target_box,
            "BoxScore": box_score
        },
        attrs={"box_clip": box_clip},
        outputs={
J
jerrywgz 已提交
3513
            "DecodeBox": decoded_box,
3514 3515
            "OutputAssignBox": output_assign_box
        })
J
jerrywgz 已提交
3516
    return decoded_box, output_assign_box
3517 3518 3519 3520 3521 3522 3523 3524 3525


def collect_fpn_proposals(multi_rois,
                          multi_scores,
                          min_level,
                          max_level,
                          post_nms_top_n,
                          name=None):
    """
W
wangguanzhong 已提交
3526 3527 3528
    **This OP only supports LoDTensor as input**. Concat multi-level RoIs 
    (Region of Interest) and select N RoIs with respect to multi_scores. 
    This operation performs the following steps:
3529 3530 3531 3532 3533 3534 3535 3536

    1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
    2. Concat multi-level RoIs and scores
    3. Sort scores and select post_nms_top_n scores
    4. Gather RoIs by selected indices from scores
    5. Re-sort RoIs by corresponding batch_id

    Args:
W
wangguanzhong 已提交
3537 3538 3539 3540 3541 3542
        multi_rois(list): List of RoIs to collect. Element in list is 2-D 
            LoDTensor with shape [N, 4] and data type is float32 or float64, 
            N is the number of RoIs.
        multi_scores(list): List of scores of RoIs to collect. Element in list 
            is 2-D LoDTensor with shape [N, 1] and data type is float32 or
            float64, N is the number of RoIs.
3543 3544 3545
        min_level(int): The lowest level of FPN layer to collect
        max_level(int): The highest level of FPN layer to collect
        post_nms_top_n(int): The number of selected RoIs
W
wangguanzhong 已提交
3546 3547 3548 3549
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.        

3550
    Returns:
W
wangguanzhong 已提交
3551 3552 3553 3554 3555
        Variable:

        fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is 
        float32 or float64. Selected RoIs. 

3556 3557 3558 3559

    Examples:
        .. code-block:: python
           
3560
            import paddle.fluid as fluid
3561 3562 3563
            multi_rois = []
            multi_scores = []
            for i in range(4):
3564 3565
                multi_rois.append(fluid.data(
                    name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
3566
            for i in range(4):
3567 3568
                multi_scores.append(fluid.data(
                    name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
3569 3570 3571 3572 3573 3574 3575 3576

            fpn_rois = fluid.layers.collect_fpn_proposals(
                multi_rois=multi_rois, 
                multi_scores=multi_scores,
                min_level=2, 
                max_level=5, 
                post_nms_top_n=2000)
    """
3577 3578
    check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
    check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
3579 3580
    helper = LayerHelper('collect_fpn_proposals', **locals())
    dtype = helper.input_dtype('multi_rois')
3581 3582
    check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
                'collect_fpn_proposals')
3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596
    num_lvl = max_level - min_level + 1
    input_rois = multi_rois[:num_lvl]
    input_scores = multi_scores[:num_lvl]
    output_rois = helper.create_variable_for_type_inference(dtype)
    output_rois.stop_gradient = True
    helper.append_op(
        type='collect_fpn_proposals',
        inputs={
            'MultiLevelRois': input_rois,
            'MultiLevelScores': input_scores
        },
        outputs={'FpnRois': output_rois},
        attrs={'post_nms_topN': post_nms_top_n})
    return output_rois