detection.py 173.6 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15 16 17
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

18 19
from __future__ import print_function

20 21
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
22
from ..layer_helper import LayerHelper
23
from ..framework import Variable, in_dygraph_mode, static_only
24
from .. import core
25
from .loss import softmax_with_cross_entropy
26 27
from . import tensor
from . import nn
28
from . import ops
M
minqiyang 已提交
29
from ... import compat as cpt
30
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
C
chengduoZH 已提交
31
import math
M
minqiyang 已提交
32
import six
33
import numpy as np
34
from functools import reduce
35
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
36
from paddle.utils import deprecated
37

C
chengduoZH 已提交
38
__all__ = [
39 40 41 42 43 44 45 46
    'prior_box',
    'density_prior_box',
    'multi_box_head',
    'bipartite_match',
    'target_assign',
    'detection_output',
    'ssd_loss',
    'rpn_target_assign',
47
    'retinanet_target_assign',
48
    'sigmoid_focal_loss',
49 50 51 52
    'anchor_generator',
    'roi_perspective_transform',
    'generate_proposal_labels',
    'generate_proposals',
53
    'generate_mask_labels',
54 55 56 57
    'iou_similarity',
    'box_coder',
    'polygon_box_transform',
    'yolov3_loss',
D
dengkaipeng 已提交
58
    'yolo_box',
59
    'box_clip',
J
jerrywgz 已提交
60
    'multiclass_nms',
61
    'locality_aware_nms',
Y
Yang Zhang 已提交
62
    'matrix_nms',
63
    'retinanet_detection_output',
64
    'distribute_fpn_proposals',
65
    'box_decoder_and_assign',
66
    'collect_fpn_proposals',
C
chengduoZH 已提交
67
]
68 69


70 71 72 73 74 75 76 77 78 79 80
def retinanet_target_assign(bbox_pred,
                            cls_logits,
                            anchor_box,
                            anchor_var,
                            gt_boxes,
                            gt_labels,
                            is_crowd,
                            im_info,
                            num_classes=1,
                            positive_overlap=0.5,
                            negative_overlap=0.4):
81
    r"""
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
    **Target Assign Layer for the detector RetinaNet.**

    This OP finds out positive and negative samples from all anchors
    for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
    and assigns target labels for classification along with target locations for
    regression to each sample, then takes out the part belonging to positive and
    negative samples from category prediction( :attr:`cls_logits`) and location
    prediction( :attr:`bbox_pred`) which belong to all anchors.

    The searching principles for positive and negative samples are as followed:

    1. Anchors are assigned to ground-truth boxes when it has the highest IoU
    overlap with a ground-truth box.

    2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
    higher than :attr:`positive_overlap` with any ground-truth box.

    3. Anchors are assigned to background when its IoU overlap is lower than
    :attr:`negative_overlap` for all ground-truth boxes.

    4. Anchors which do not meet the above conditions do not participate in
    the training process.

    Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
T
tianshuo78520a 已提交
106
    regression for each anchor, hence the target label for each positive(or negative)
107 108 109 110 111 112 113 114 115 116 117 118 119 120
    sample is a :math:`C`-vector and the target locations for each positive sample
    is a 4-vector. As for a positive sample, if the category of its assigned
    ground-truth box is class :math:`i`, the corresponding entry in its length
    :math:`C` label vector is set to 1 and all other entries is set to 0, its box
    regression targets are computed as the offset between itself and its assigned
    ground-truth box. As for a negative sample, all entries in its length :math:`C`
    label vector are set to 0 and box regression targets are omitted because
    negative samples do not participate in the training process of location
    regression.

    After the assignment, the part belonging to positive and negative samples is
    taken out from category prediction( :attr:`cls_logits` ), and the part
    belonging to positive samples is taken out from location
    prediction( :attr:`bbox_pred` ).
121 122

    Args:
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
        bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
            the predicted locations of all anchors. :math:`N` is the batch size( the
            number of images in a mini-batch), :math:`M` is the number of all anchors
            of one image, and each anchor has 4 coordinate values. The data type of
            :attr:`bbox_pred` is float32 or float64.
        cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
            the predicted categories of all anchors. :math:`N` is the batch size,
            :math:`M` is the number of all anchors of one image, and :math:`C` is
            the number of categories (**Notice: excluding background**). The data type
            of :attr:`cls_logits` is float32 or float64.
        anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
            the locations of all anchors. :math:`M` is the number of all anchors of
            one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
            :math:`[xmin, ymin]` is the left top coordinate of the anchor box,
            :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
            The data type of :attr:`anchor_box` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator` 
            for the generation of :attr:`anchor_box`.
        anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded 
            factors of anchor locations used in loss function. :math:`M` is number of
            all anchors of one image, each anchor possesses a 4-vector expanded factor.
            The data type of :attr:`anchor_var` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator`
            for the generation of :attr:`anchor_var`.
        gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
            locations of all ground-truth boxes. :math:`G` is the total number of
            all ground-truth boxes in a mini-batch, and each ground-truth box has 4
            coordinate values. The data type of :attr:`gt_boxes` is float32 or
            float64.
        gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
            categories of all ground-truth boxes, and the values are in the range of
            :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
            in a mini-batch, and each ground-truth box has one category. The data type
            of :attr:`gt_labels` is int32.
        is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
            indicates whether a ground-truth box is a crowd. If the value is 1, the
            corresponding box is a crowd, it is ignored during training. :math:`G` is
            the total number of all ground-truth boxes in a mini-batch. The data type
            of :attr:`is_crowd` is int32.
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
164
            information of each image is a 3-vector which are the height and width
165 166 167 168 169 170 171 172 173 174 175 176
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
        num_classes(int32): The number of categories for classification, the default
            value is 1.
        positive_overlap(float32): Minimum overlap required between an anchor
            and ground-truth box for the anchor to be a positive sample, the default
            value is 0.5.
        negative_overlap(float32): Maximum overlap allowed between an anchor
            and ground-truth box for the anchor to be a negative sample, the default
            value is 0.4. :attr:`negative_overlap` should be less than or equal to
            :attr:`positive_overlap`, if not, the actual value of
            :attr:`positive_overlap` is :attr:`negative_overlap`.
177 178

    Returns:
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
        A tuple with 6 Variables:
        
        **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
        category prediction belonging to positive and negative samples. :math:`F`
        is the number of positive samples in a mini-batch, :math:`B` is the number
        of negative samples, and :math:`C` is the number of categories
        (**Notice: excluding background**). The data type of :attr:`predict_scores`
        is float32 or float64.

        **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        location prediction belonging to positive samples. :math:`F` is the number
        of positive samples. :math:`F` is the number of positive samples, and each
        sample has 4 coordinate values. The data type of :attr:`predict_location`
        is float32 or float64.

        **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
        target labels for classification belonging to positive and negative
        samples. :math:`F` is the number of positive samples, :math:`B` is the
        number of negative, and each sample has one target category. The data type
        of :attr:`target_label` is int32.

        **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        target locations for box regression belonging to positive samples.
        :math:`F` is the number of positive samples, and each sample has 4
        coordinate values. The data type of :attr:`target_bbox` is float32 or
        float64.

        **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
        represents whether a positive sample is fake positive, if a positive
        sample is false positive, the corresponding entries in
        :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
        of total positive samples in a mini-batch, and each sample has 4
        coordinate values. The data type of :attr:`bbox_inside_weight` is float32
        or float64.

        **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
        of positive samples. :math:`N` is the batch size. **Notice: The number
        of positive samples is used as the denominator of later loss function,
        to avoid the condition that the denominator is zero, this OP has added 1
        to the actual number of positive samples of each image.** The data type of
        :attr:`fg_num` is int32.
220 221 222 223 224

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
225 226 227 228 229 230 231 232 233 234 235
          bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
                            dtype='float32')
          cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
                            dtype='float32')
          anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
                            dtype='float32')
          anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
                            dtype='float32')
          gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
                            dtype='float32')
          gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
236
                            dtype='int32')
237
          is_crowd = fluid.data(name='is_crowd', shape=[1],
238
                            dtype='int32')
239
          im_info = fluid.data(name='im_info', shape=[1, 3],
240
                            dtype='float32')
241
          score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
242 243 244 245 246
                fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
                anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)

    """

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
    check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
                             'retinanet_target_assign')
    check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
                             'retinanet_target_assign')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'retinanet_target_assign')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'retinanet_target_assign')

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
    helper = LayerHelper('retinanet_target_assign', **locals())
    # Assign target label to anchors
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    fg_num = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type="retinanet_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'GtLabels': gt_labels,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
            'ForegroundNumber': fg_num
        },
        attrs={
            'positive_overlap': positive_overlap,
            'negative_overlap': negative_overlap
        })

    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
    bbox_inside_weight.stop_gradient = True
    fg_num.stop_gradient = True

    cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)

    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num


311 312
def rpn_target_assign(bbox_pred,
                      cls_logits,
Y
Yuan Gao 已提交
313
                      anchor_box,
314
                      anchor_var,
315 316 317
                      gt_boxes,
                      is_crowd,
                      im_info,
Y
Yuan Gao 已提交
318
                      rpn_batch_size_per_im=256,
319 320
                      rpn_straddle_thresh=0.0,
                      rpn_fg_fraction=0.5,
Y
Yuan Gao 已提交
321
                      rpn_positive_overlap=0.7,
322 323
                      rpn_negative_overlap=0.3,
                      use_random=True):
Y
Yuan Gao 已提交
324
    """
H
haowang101779990 已提交
325
    **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
Y
Yuan Gao 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342

    This layer can be, for given the  Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each each anchor, these target labels are used for
    train RPN. The classification targets is a binary class label (of being
    an object or not). Following the paper of Faster-RCNN, the positive labels
    are two kinds of anchors: (i) the anchor/anchors with the highest IoU
    overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
    higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
    that a single ground-truth box may assign positive labels to multiple
    anchors. A non-positive anchor is when its IoU ratio is lower than
    rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
    neither positive nor negative do not contribute to the training objective.
    The regression targets are the encoded ground-truth boxes associated with
    the positive anchors.

    Args:
343
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Y
Yuan Gao 已提交
344 345
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
346
            is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
347 348 349
        cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
            predicted confidence predictions. N is the batch size, 1 is the
            frontground and background sigmoid, M is number of bounding boxes.
350
            The data type can be float32 or float64.
Y
Yuan Gao 已提交
351 352 353 354 355
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
356
            coordinate of the anchor box. The data type can be float32 or float64.
357
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded 
358
            variances of anchors. The data type can be float32 or float64.
翟飞跃 已提交
359
        gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
Y
Yuan Gao 已提交
360
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
361
            bboxes of mini-batch input. The data type can be float32 or float64.
362
        is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
363
                             The data type must be int32.
364 365
        im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
        3 is the height, width and scale.
Y
Yuan Gao 已提交
366
        rpn_batch_size_per_im(int): Total number of RPN examples per image.
367
                                    The data type must be int32.
368
        rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
369
            by straddle_thresh pixels. The data type must be float32.
370
        rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
371
            foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
Y
Yuan Gao 已提交
372 373
        rpn_positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
374
            example. The data type must be float32.
Y
Yuan Gao 已提交
375 376
        rpn_negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
377
            examples. The data type must be float32.
Y
Yuan Gao 已提交
378 379

    Returns:
M
minqiyang 已提交
380
        tuple:
381 382 383 384 385 386 387 388 389 390 391 392 393
        A tuple(predicted_scores, predicted_location, target_label,
        target_bbox, bbox_inside_weight) is returned. The predicted_scores 
        and predicted_location is the predicted result of the RPN.
        The target_label and target_bbox is the ground truth,
        respectively. The predicted_location is a 2D Tensor with shape
        [F, 4], and the shape of target_bbox is same as the shape of
        the predicted_location, F is the number of the foreground
        anchors. The predicted_scores is a 2D Tensor with shape
        [F + B, 1], and the shape of target_label is same as the shape
        of the predicted_scores, B is the number of the background
        anchors, the F and B is depends on the input of this operator.
        Bbox_inside_weight represents whether the predicted loc is fake_fg
        or not and the shape is [F, 4].
Y
Yuan Gao 已提交
394 395 396 397

    Examples:
        .. code-block:: python

B
Bai Yifan 已提交
398
            import paddle.fluid as fluid
399 400 401 402 403 404 405
            bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
            cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
            anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
            anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
            im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
406 407
            loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
                bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
H
haowang101779990 已提交
408

Y
Yuan Gao 已提交
409 410 411
    """

    helper = LayerHelper('rpn_target_assign', **locals())
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427

    check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
                             'rpn_target_assign')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'rpn_target_assign')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'rpn_target_assign')

428
    # Assign target label to anchors
J
jerrywgz 已提交
429 430 431 432 433 434 435
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
Y
Yuan Gao 已提交
436 437
    helper.append_op(
        type="rpn_target_assign",
438 439 440 441 442 443
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
Y
Yuan Gao 已提交
444 445 446
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
447
            'TargetLabel': target_label,
J
jerrywgz 已提交
448
            'TargetBBox': target_bbox,
J
jerrywgz 已提交
449
            'BBoxInsideWeight': bbox_inside_weight
Y
Yuan Gao 已提交
450 451 452
        },
        attrs={
            'rpn_batch_size_per_im': rpn_batch_size_per_im,
453
            'rpn_straddle_thresh': rpn_straddle_thresh,
Y
Yuan Gao 已提交
454 455
            'rpn_positive_overlap': rpn_positive_overlap,
            'rpn_negative_overlap': rpn_negative_overlap,
456 457
            'rpn_fg_fraction': rpn_fg_fraction,
            'use_random': use_random
Y
Yuan Gao 已提交
458 459
        })

460 461 462 463
    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
J
jerrywgz 已提交
464
    bbox_inside_weight.stop_gradient = True
Y
Yuan Gao 已提交
465

466 467 468 469
    cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
470

J
jerrywgz 已提交
471
    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
Y
Yuan Gao 已提交
472 473


474
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
475
    r"""
476 477 478
	:alias_main: paddle.nn.functional.sigmoid_focal_loss
	:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
	:old_api: paddle.fluid.layers.sigmoid_focal_loss
S
swtkiwi 已提交
479

480 481
    **Sigmoid Focal Loss Operator.**

482 483 484 485 486
    `Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
    class imbalance existed on the training phase of many computer vision tasks. This OP computes
    the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
    measured between the sigmoid value and target label. 

487 488 489
    The focal loss is given as followed:

    .. math::
490 491 492 493 494 495 496
  
        \\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
        \\begin{array}{rcl}
        - \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
        - \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
        \\end{array} \\right.

497 498 499 500 501 502 503

    We know that
    
    .. math::
        \\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}


504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
    Args:
        x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
            all samples. :math:`N` is the number of all samples responsible for optimization in
            a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
            is the total number of positive and negative samples in a mini-batch; Samples are images
            for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
            is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
            float32 or float64.
        label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
            classification. :math:`N` is the number of all samples responsible for optimization in a
            mini-batch, each sample has one target category. The values for positive samples are in the
            range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
            is int32.
        fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
            mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
519
        gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
520
            set to 2.0.
521
        alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
522 523 524
            is set to 0.25.

    Returns:
525 526 527
        Variable(the data type is float32 or float64): 
            A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
            tensor :attr:`x`.
528 529 530 531

    Examples:
        .. code-block:: python

532
            import numpy as np
533
            import paddle.fluid as fluid
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
            
            num_classes = 10  # exclude background
            image_width = 16
            image_height = 16
            batch_size = 32
            max_iter = 20
            
            
            def gen_train_data():
                x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
                                                    image_width)).astype('float64')
                label_data = np.random.randint(0, num_classes,
                                               (batch_size, 1)).astype('int32')
                return {"x": x_data, "label": label_data}
            
            
            def get_focal_loss(pred, label, fg_num, num_classes):
                pred = fluid.layers.reshape(pred, [-1, num_classes])
                label = fluid.layers.reshape(label, [-1, 1])
                label.stop_gradient = True
                loss = fluid.layers.sigmoid_focal_loss(
                    pred, label, fg_num, gamma=2.0, alpha=0.25)
                loss = fluid.layers.reduce_sum(loss)
                return loss
            
            
            def build_model(mode='train'):
                x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
                output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
                output = fluid.layers.fc(
                    input=output,
                    size=num_classes,
                    # Notice: size is set to be the number of target classes (excluding backgorund)
                    # because sigmoid activation will be done in the sigmoid_focal_loss op.
                    act=None)
                if mode == 'train':
                    label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
                    # Obtain the fg_num needed by the sigmoid_focal_loss op:
                    # 0 in label represents background, >=1 in label represents foreground,
                    # find the elements in label which are greater or equal than 1, then
                    # computed the numbers of these elements.
                    data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
                    fg_label = fluid.layers.greater_equal(label, data)
                    fg_label = fluid.layers.cast(fg_label, dtype='int32')
                    fg_num = fluid.layers.reduce_sum(fg_label)
                    fg_num.stop_gradient = True
                    avg_loss = get_focal_loss(output, label, fg_num, num_classes)
                    return avg_loss
                else:
                    # During evaluating or testing phase,
                    # output of the final fc layer should be connected to a sigmoid layer.
                    pred = fluid.layers.sigmoid(output)
                    return pred
            
            
            loss = build_model('train')
            moment_optimizer = fluid.optimizer.MomentumOptimizer(
                learning_rate=0.001, momentum=0.9)
            moment_optimizer.minimize(loss)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for i in range(max_iter):
                outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
                print(outs)
599 600
    """

601 602 603 604 605
    check_variable_and_dtype(x, 'x', ['float32', 'float64'],
                             'sigmoid_focal_loss')
    check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
    check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
    helper = LayerHelper("sigmoid_focal_loss", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="sigmoid_focal_loss",
        inputs={"X": x,
                "Label": label,
                "FgNum": fg_num},
        attrs={"gamma": gamma,
               'alpha': alpha},
        outputs={"Out": out})
    return out


Y
Yuan Gao 已提交
621 622
def detection_output(loc,
                     scores,
623 624 625 626 627 628 629
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
630 631
                     nms_eta=1.0,
                     return_index=False):
632
    """
S
swtkiwi 已提交
633

Q
qingqing01 已提交
634 635
    Given the regression locations, classification confidences and prior boxes,
    calculate the detection outputs by performing following steps:
636

Q
qingqing01 已提交
637 638
    1. Decode input bounding box predictions according to the prior boxes and
       regression locations.
639 640 641 642 643
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.
644 645 646

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Q
qingqing01 已提交
647 648
            predicted locations of M bounding bboxes. Data type should be
            float32 or float64. N is the batch size,
649 650
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
Y
Yuan Gao 已提交
651
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
Q
qingqing01 已提交
652 653 654
            predicted confidence predictions. Data type should be float32
            or float64. N is the batch size, C is the
            class number, M is number of bounding boxes.
655
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
Q
qingqing01 已提交
656 657
            each box is represented as [xmin, ymin, xmax, ymax]. Data type
            should be float32 or float64.
658
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
Q
qingqing01 已提交
659 660
            of variance. Data type should be float32 or float64.
        background_label(int): The index of background label,
661
            the background label will be ignored. If set to -1, then all
Q
qingqing01 已提交
662 663
            categories will be considered. Default: 0.
        nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
664
        nms_top_k(int): Maximum number of detections to be kept according
T
tianshuo78520a 已提交
665
            to the confidences after filtering detections based on
Q
qingqing01 已提交
666
            score_threshold and before NMS. Default: 400.
667
        keep_top_k(int): Number of total bboxes to be kept per image after
Q
qingqing01 已提交
668
            NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
669 670
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
Q
qingqing01 已提交
671 672 673
            Default: 0.01.
        nms_eta(float): The parameter for adaptive NMS. It works only when the
            value is less than 1.0. Default: 1.0.
674
        return_index(bool): Whether return selected index. Default: False
675 676

    Returns:
M
minqiyang 已提交
677

678 679 680
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, a tuple with one Variable(Out) is returned. 

Q
qingqing01 已提交
681 682 683 684 685 686 687 688 689 690 691 692
        Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
        Data type is the same as input (loc). Each row has six values:
        [label, confidence, xmin, ymin, xmax, ymax]. `No` is
        the total number of detections in this mini-batch. For each instance,
        the offsets in first dimension are called LoD, the offset number is
        N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
        detected results, if it is 0, the i-th image has no detected results.

        Index (Variable): Only return when return_index is True. A 2-D LoDTensor
        with shape [No, 1] represents the selected index which type is Integer.
        The index is the absolute value cross batches. No is the same number
        as Out. If the index is used to gather other attribute such as age,
693 694 695
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
        N is the batch size and M is the number of boxes.

696 697 698 699

    Examples:
        .. code-block:: python

700
            import paddle.fluid as fluid
701 702 703
            import paddle

            paddle.enable_static()
704

Q
qingqing01 已提交
705 706 707 708
            pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
            pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
            loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
709
            nmsed_outs, index = fluid.layers.detection_output(scores=scores,
710 711
                                       loc=loc,
                                       prior_box=pb,
712 713
                                       prior_box_var=pbv,
                                       return_index=True)
714 715
    """
    helper = LayerHelper("detection_output", **locals())
716 717 718 719 720
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size')
721
    scores = nn.softmax(input=scores)
Y
Yuan Gao 已提交
722
    scores = nn.transpose(scores, perm=[0, 2, 1])
723
    scores.stop_gradient = True
X
Xin Pan 已提交
724 725
    nmsed_outs = helper.create_variable_for_type_inference(
        dtype=decoded_box.dtype)
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
    if return_index:
        index = helper.create_variable_for_type_inference(dtype='int')
        helper.append_op(
            type="multiclass_nms2",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs,
                     'Index': index},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
        index.stop_gradient = True
    else:
        helper.append_op(
            type="multiclass_nms",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
757
    nmsed_outs.stop_gradient = True
758 759
    if return_index:
        return nmsed_outs, index
760
    return nmsed_outs
C
chengduoZH 已提交
761 762


X
Xin Pan 已提交
763
@templatedoc()
764
def iou_similarity(x, y, box_normalized=True, name=None):
X
Xin Pan 已提交
765
    """
766 767 768
	:alias_main: paddle.nn.functional.iou_similarity
	:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
	:old_api: paddle.fluid.layers.iou_similarity
S
swtkiwi 已提交
769

X
Xin Pan 已提交
770 771 772
    ${comment}

    Args:
L
LielinJiang 已提交
773 774
        x (Variable): ${x_comment}.The data type is float32 or float64.
        y (Variable): ${y_comment}.The data type is float32 or float64.
T
tianshuo78520a 已提交
775
        box_normalized(bool): Whether treat the priorbox as a normalized box.
776
            Set true by default.
X
Xin Pan 已提交
777
    Returns:
L
LielinJiang 已提交
778
        Variable: ${out_comment}.The data type is same with x.
779 780 781 782

    Examples:
        .. code-block:: python

L
LielinJiang 已提交
783
            import numpy as np
784 785
            import paddle.fluid as fluid

L
LielinJiang 已提交
786 787 788 789 790 791
            use_gpu = False
            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)

            x = fluid.data(name='x', shape=[None, 4], dtype='float32')
            y = fluid.data(name='y', shape=[None, 4], dtype='float32')
792
            iou = fluid.layers.iou_similarity(x=x, y=y)
L
LielinJiang 已提交
793 794 795 796 797 798 799 800 801 802 803

            exe.run(fluid.default_startup_program())
            test_program = fluid.default_main_program().clone(for_test=True)

            [out_iou] = exe.run(test_program,
                    fetch_list=iou,
                    feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
                                         [0., 0., 1.0, 1.0]]).astype('float32'),
                          'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
            # out_iou is [[0.2857143],
            #             [0.       ]] with shape: [2, 1]
X
Xin Pan 已提交
804 805
    """
    helper = LayerHelper("iou_similarity", **locals())
806
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
807 808 809 810 811

    helper.append_op(
        type="iou_similarity",
        inputs={"X": x,
                "Y": y},
812
        attrs={"box_normalized": box_normalized},
X
Xin Pan 已提交
813 814 815 816 817 818 819 820 821 822
        outputs={"Out": out})
    return out


@templatedoc()
def box_coder(prior_box,
              prior_box_var,
              target_box,
              code_type="encode_center_size",
              box_normalized=True,
823 824
              name=None,
              axis=0):
825
    r"""
S
swtkiwi 已提交
826

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
    **Box Coder Layer**

    Encode/Decode the target bounding box with the priorbox information.
    
    The Encoding schema described below:

    .. math::

        ox = (tx - px) / pw / pxv

        oy = (ty - py) / ph / pyv

        ow = \log(\abs(tw / pw)) / pwv 

        oh = \log(\abs(th / ph)) / phv 

    The Decoding schema described below:
    
    .. math::
  
        ox = (pw * pxv * tx * + px) - tw / 2

        oy = (ph * pyv * ty * + py) - th / 2

        ow = \exp(pwv * tw) * pw + tw / 2

        oh = \exp(phv * th) * ph + th / 2   

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, 
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote 
    the priorbox's (anchor) center coordinates, width and height. `pxv`, 
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, 
    `ow`, `oh` denote the encoded/decoded coordinates, width and height. 

    During Box Decoding, two modes for broadcast are supported. Say target 
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or 
    [M, 4]. Then prior box will broadcast to target box along the 
    assigned axis. 
X
Xin Pan 已提交
865 866

    Args:
867
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape 
W
wangguanzhong 已提交
868 869 870 871 872 873 874 875 876 877
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the 
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system. 
            [xmax, ymax] is the right bottom coordinate of the anchor box.       
        prior_box_var(List|Variable|None): prior_box_var supports three types 
            of input. One is variable with shape [M, 4] which holds M group and 
            data type is float32 or float64. The second is list consist of 
            4 elements shared by all boxes and data type is float32 or float64. 
            Other is None and not involved in calculation. 
878
        target_box(Variable): This input can be a 2-D LoDTensor with shape 
W
wangguanzhong 已提交
879 880 881 882 883 884 885 886
            [N, 4] when code_type is 'encode_center_size'. This input also can 
            be a 3-D Tensor with shape [N, M, 4] when code_type is 
            'decode_center_size'. Each box is represented as 
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64. 
            This tensor can contain LoD information to represent a batch of inputs. 
        code_type(str): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size` 
            by default.
T
tianshuo78520a 已提交
887
        box_normalized(bool): Whether treat the priorbox as a normalized box.
W
wangguanzhong 已提交
888 889 890 891
            Set true by default.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
892
        axis(int): Which axis in PriorBox to broadcast for box decode, 
W
wangguanzhong 已提交
893 894 895 896
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and 
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is 
            `decode_center_size`. Set 0 by default. 
X
Xin Pan 已提交
897 898

    Returns:
W
wangguanzhong 已提交
899 900
        Variable:

901
        output_box(Variable): When code_type is 'encode_center_size', the 
W
wangguanzhong 已提交
902 903 904
        output tensor of box_coder_op with shape [N, M, 4] representing the 
        result of N target boxes encoded with M Prior boxes and variances. 
        When code_type is 'decode_center_size', N represents the batch size 
T
tianshuo78520a 已提交
905
        and M represents the number of decoded boxes.
906 907 908 909 910

    Examples:
 
        .. code-block:: python
 
911
            import paddle.fluid as fluid
912 913
            import paddle
            paddle.enable_static()
W
wangguanzhong 已提交
914
            # For encode
915
            prior_box_encode = fluid.data(name='prior_box_encode',
W
wangguanzhong 已提交
916
                                  shape=[512, 4],
917 918 919 920
                                  dtype='float32')
            target_box_encode = fluid.data(name='target_box_encode',
                                   shape=[81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
921 922 923 924 925
            output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_encode,
                                    code_type="encode_center_size")
            # For decode
926
            prior_box_decode = fluid.data(name='prior_box_decode',
W
wangguanzhong 已提交
927
                                  shape=[512, 4],
928 929 930 931
                                  dtype='float32')
            target_box_decode = fluid.data(name='target_box_decode',
                                   shape=[512, 81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
932 933 934 935 936 937
            output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_decode,
                                    code_type="decode_center_size",
                                    box_normalized=False,
                                    axis=1)
X
Xin Pan 已提交
938
    """
939 940 941 942
    check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
                             'box_coder')
    check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
                             'box_coder')
X
Xin Pan 已提交
943 944
    helper = LayerHelper("box_coder", **locals())

945 946
    output_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)
X
Xin Pan 已提交
947

948 949 950 951 952 953 954 955 956 957 958 959
    inputs = {"PriorBox": prior_box, "TargetBox": target_box}
    attrs = {
        "code_type": code_type,
        "box_normalized": box_normalized,
        "axis": axis
    }
    if isinstance(prior_box_var, Variable):
        inputs['PriorBoxVar'] = prior_box_var
    elif isinstance(prior_box_var, list):
        attrs['variance'] = prior_box_var
    else:
        raise TypeError("Input variance of box_coder must be Variable or lisz")
X
Xin Pan 已提交
960 961
    helper.append_op(
        type="box_coder",
962 963
        inputs=inputs,
        attrs=attrs,
X
Xin Pan 已提交
964 965 966 967 968 969 970 971 972 973
        outputs={"OutputBox": output_box})
    return output_box


@templatedoc()
def polygon_box_transform(input, name=None):
    """
    ${comment}

    Args:
974 975 976 977
        input(Variable): The input with shape [batch_size, geometry_channels, height, width].
                         A Tensor with type float32, float64.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
X
Xin Pan 已提交
978 979

    Returns:
980
        Variable: The output with the same shape as input. A Tensor with type float32, float64.
B
Bai Yifan 已提交
981 982 983 984 985

    Examples:
        .. code-block:: python
            
            import paddle.fluid as fluid
B
Bai Yifan 已提交
986
            input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
B
Bai Yifan 已提交
987
            out = fluid.layers.polygon_box_transform(input)
X
Xin Pan 已提交
988
    """
989 990
    check_variable_and_dtype(input, "input", ['float32', 'float64'],
                             'polygon_box_transform')
X
Xin Pan 已提交
991
    helper = LayerHelper("polygon_box_transform", **locals())
992
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
Xin Pan 已提交
993 994 995 996 997 998 999 1000 1001

    helper.append_op(
        type="polygon_box_transform",
        inputs={"Input": input},
        attrs={},
        outputs={"Output": output})
    return output


1002
@deprecated(since="2.0.0", update_to="paddle.vision.ops.yolo_loss")
D
dengkaipeng 已提交
1003 1004
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
1005 1006
                gt_box,
                gt_label,
D
dengkaipeng 已提交
1007
                anchors,
1008
                anchor_mask,
D
dengkaipeng 已提交
1009 1010
                class_num,
                ignore_thresh,
1011
                downsample_ratio,
1012
                gt_score=None,
D
dengkaipeng 已提交
1013
                use_label_smooth=True,
1014 1015
                name=None,
                scale_x_y=1.):
D
dengkaipeng 已提交
1016
    """
S
swtkiwi 已提交
1017

D
dengkaipeng 已提交
1018 1019 1020
    ${comment}

    Args:
X
xiaoting 已提交
1021
        x (Variable): ${x_comment}The data type is float32 or float64. 
1022
        gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
T
tianshuo78520a 已提交
1023 1024
                          in the third dimension, x, y, w, h should be stored. 
                          x,y is the center coordinate of boxes, w, h are the
1025 1026
                          width and height, x, y, w, h should be divided by 
                          input image height to scale to [0, 1].
D
dengkaipeng 已提交
1027
                          N is the batch number and B is the max box number in 
X
xiaoting 已提交
1028
                          an image.The data type is float32 or float64. 
T
tianshuo78520a 已提交
1029
        gt_label (Variable): class id of ground truth boxes, should be in shape
X
xiaoting 已提交
1030
                            of [N, B].The data type is int32. 
D
dengkaipeng 已提交
1031
        anchors (list|tuple): ${anchors_comment}
1032
        anchor_mask (list|tuple): ${anchor_mask_comment}
D
dengkaipeng 已提交
1033 1034
        class_num (int): ${class_num_comment}
        ignore_thresh (float): ${ignore_thresh_comment}
1035
        downsample_ratio (int): ${downsample_ratio_comment}
X
xiaoting 已提交
1036 1037 1038
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
T
tianshuo78520a 已提交
1039
        gt_score (Variable): mixup score of ground truth boxes, should be in shape
1040
                            of [N, B]. Default None.
1041
        use_label_smooth (bool): ${use_label_smooth_comment}
1042
        scale_x_y (float): ${scale_x_y_comment}
D
dengkaipeng 已提交
1043 1044

    Returns:
1045
        Variable: A 1-D tensor with shape [N], the value of yolov3 loss
D
dengkaipeng 已提交
1046 1047 1048

    Raises:
        TypeError: Input x of yolov3_loss must be Variable
D
dengkaipeng 已提交
1049 1050
        TypeError: Input gtbox of yolov3_loss must be Variable
        TypeError: Input gtlabel of yolov3_loss must be Variable
D
dengkaipeng 已提交
1051
        TypeError: Input gtscore of yolov3_loss must be None or Variable
D
dengkaipeng 已提交
1052 1053 1054
        TypeError: Attr anchors of yolov3_loss must be list or tuple
        TypeError: Attr class_num of yolov3_loss must be an integer
        TypeError: Attr ignore_thresh of yolov3_loss must be a float number
1055
        TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
D
dengkaipeng 已提交
1056 1057

    Examples:
1058 1059
      .. code-block:: python

1060
          import paddle.fluid as fluid
1061 1062
          import paddle
          paddle.enable_static()
X
xiaoting 已提交
1063 1064 1065 1066
          x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
          gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
          gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
          gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
1067 1068
          anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
          anchor_mask = [0, 1, 2]
1069 1070
          loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
                                          gt_score=gt_score, anchors=anchors, 
1071 1072
                                          anchor_mask=anchor_mask, class_num=80,
                                          ignore_thresh=0.7, downsample_ratio=32)
D
dengkaipeng 已提交
1073 1074 1075 1076 1077
    """
    helper = LayerHelper('yolov3_loss', **locals())

    if not isinstance(x, Variable):
        raise TypeError("Input x of yolov3_loss must be Variable")
1078
    if not isinstance(gt_box, Variable):
D
dengkaipeng 已提交
1079
        raise TypeError("Input gtbox of yolov3_loss must be Variable")
1080
    if not isinstance(gt_label, Variable):
D
dengkaipeng 已提交
1081
        raise TypeError("Input gtlabel of yolov3_loss must be Variable")
1082
    if gt_score is not None and not isinstance(gt_score, Variable):
1083
        raise TypeError("Input gtscore of yolov3_loss must be Variable")
D
dengkaipeng 已提交
1084 1085
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
        raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
1086 1087
    if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
        raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
D
dengkaipeng 已提交
1088 1089 1090 1091 1092
    if not isinstance(class_num, int):
        raise TypeError("Attr class_num of yolov3_loss must be an integer")
    if not isinstance(ignore_thresh, float):
        raise TypeError(
            "Attr ignore_thresh of yolov3_loss must be a float number")
1093 1094 1095
    if not isinstance(use_label_smooth, bool):
        raise TypeError(
            "Attr use_label_smooth of yolov3_loss must be a bool value")
D
dengkaipeng 已提交
1096

1097
    loss = helper.create_variable_for_type_inference(dtype=x.dtype)
D
dengkaipeng 已提交
1098

1099 1100 1101
    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

1102 1103
    inputs = {
        "X": x,
1104 1105
        "GTBox": gt_box,
        "GTLabel": gt_label,
1106
    }
1107
    if gt_score is not None:
1108
        inputs["GTScore"] = gt_score
1109

D
dengkaipeng 已提交
1110 1111
    attrs = {
        "anchors": anchors,
1112
        "anchor_mask": anchor_mask,
D
dengkaipeng 已提交
1113 1114
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
1115
        "downsample_ratio": downsample_ratio,
1116
        "use_label_smooth": use_label_smooth,
1117
        "scale_x_y": scale_x_y,
D
dengkaipeng 已提交
1118 1119 1120 1121
    }

    helper.append_op(
        type='yolov3_loss',
1122
        inputs=inputs,
1123 1124 1125 1126 1127
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask
        },
D
dengkaipeng 已提交
1128 1129 1130 1131
        attrs=attrs)
    return loss


1132
@deprecated(since="2.0.0", update_to="paddle.vision.ops.yolo_box")
D
dengkaipeng 已提交
1133
@templatedoc(op_type="yolo_box")
1134 1135 1136 1137 1138 1139
def yolo_box(x,
             img_size,
             anchors,
             class_num,
             conf_thresh,
             downsample_ratio,
1140
             clip_bbox=True,
1141
             name=None,
1142 1143 1144
             scale_x_y=1.,
             iou_aware=False,
             iou_aware_factor=0.5):
D
dengkaipeng 已提交
1145
    """
S
swtkiwi 已提交
1146

D
dengkaipeng 已提交
1147 1148 1149
    ${comment}

    Args:
X
xiaoting 已提交
1150 1151
        x (Variable): ${x_comment} The data type is float32 or float64. 
        img_size (Variable): ${img_size_comment} The data type is int32. 
D
dengkaipeng 已提交
1152 1153 1154 1155
        anchors (list|tuple): ${anchors_comment}
        class_num (int): ${class_num_comment}
        conf_thresh (float): ${conf_thresh_comment}
        downsample_ratio (int): ${downsample_ratio_comment}
1156
        clip_bbox (bool): ${clip_bbox_comment}
1157
        scale_x_y (float): ${scale_x_y_comment}
X
xiaoting 已提交
1158 1159 1160
        name (string): The default value is None.  Normally there is no need 
                       for user to set this property.  For more information, 
                       please refer to :ref:`api_guide_Name`
1161 1162
        iou_aware (bool): ${iou_aware_comment}
        iou_aware_factor (float): ${iou_aware_factor_comment}
D
dengkaipeng 已提交
1163 1164

    Returns:
D
dengkaipeng 已提交
1165
        Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
D
dengkaipeng 已提交
1166 1167
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification 
        scores of boxes.
D
dengkaipeng 已提交
1168 1169 1170 1171 1172 1173 1174 1175

    Raises:
        TypeError: Input x of yolov_box must be Variable
        TypeError: Attr anchors of yolo box must be list or tuple
        TypeError: Attr class_num of yolo box must be an integer
        TypeError: Attr conf_thresh of yolo box must be a float number

    Examples:
D
dengkaipeng 已提交
1176

D
dengkaipeng 已提交
1177 1178
    .. code-block:: python

X
xiaoting 已提交
1179
        import paddle.fluid as fluid
1180 1181
        import paddle
        paddle.enable_static()
X
xiaoting 已提交
1182 1183
        x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
        img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
D
dengkaipeng 已提交
1184
        anchors = [10, 13, 16, 30, 33, 23]
X
xiaoting 已提交
1185
        boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors, 
D
dengkaipeng 已提交
1186 1187 1188 1189 1190
                                        conf_thresh=0.01, downsample_ratio=32)
    """
    helper = LayerHelper('yolo_box', **locals())

    if not isinstance(x, Variable):
1191 1192 1193
        raise TypeError("Input x of yolo_box must be Variable")
    if not isinstance(img_size, Variable):
        raise TypeError("Input img_size of yolo_box must be Variable")
D
dengkaipeng 已提交
1194
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
1195
        raise TypeError("Attr anchors of yolo_box must be list or tuple")
D
dengkaipeng 已提交
1196
    if not isinstance(class_num, int):
1197
        raise TypeError("Attr class_num of yolo_box must be an integer")
D
dengkaipeng 已提交
1198
    if not isinstance(conf_thresh, float):
1199
        raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
D
dengkaipeng 已提交
1200 1201 1202 1203 1204 1205 1206

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
D
dengkaipeng 已提交
1207
        "conf_thresh": conf_thresh,
D
dengkaipeng 已提交
1208
        "downsample_ratio": downsample_ratio,
1209
        "clip_bbox": clip_bbox,
1210
        "scale_x_y": scale_x_y,
1211 1212
        "iou_aware": iou_aware,
        "iou_aware_factor": iou_aware_factor
D
dengkaipeng 已提交
1213 1214 1215 1216
    }

    helper.append_op(
        type='yolo_box',
1217 1218 1219 1220
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
D
dengkaipeng 已提交
1221 1222 1223 1224 1225 1226 1227 1228
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs)
    return boxes, scores


X
Xin Pan 已提交
1229
@templatedoc()
1230 1231
def detection_map(detect_res,
                  label,
1232 1233
                  class_num,
                  background_label=0,
1234 1235
                  overlap_threshold=0.3,
                  evaluate_difficult=True,
1236 1237 1238 1239
                  has_state=None,
                  input_states=None,
                  out_states=None,
                  ap_version='integral'):
X
Xin Pan 已提交
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
    """
    ${comment}

    Args:
        detect_res: ${detect_res_comment}
        label:  ${label_comment}
        class_num: ${class_num_comment}
        background_label: ${background_label_comment}
        overlap_threshold: ${overlap_threshold_comment}
        evaluate_difficult: ${evaluate_difficult_comment}
        has_state: ${has_state_comment}
1251 1252 1253 1254 1255 1256 1257 1258
        input_states: (tuple|None) If not None, It contains 3 elements:
            (1) pos_count ${pos_count_comment}.
            (2) true_pos ${true_pos_comment}.
            (3) false_pos ${false_pos_comment}.
        out_states: (tuple|None) If not None, it contains 3 elements.
            (1) accum_pos_count ${accum_pos_count_comment}.
            (2) accum_true_pos ${accum_true_pos_comment}.
            (3) accum_false_pos ${accum_false_pos_comment}.
X
Xin Pan 已提交
1259 1260 1261 1262 1263 1264 1265 1266 1267
        ap_version: ${ap_type_comment}

    Returns:
        ${map_comment}


    Examples:
          .. code-block:: python

1268
            import paddle.fluid as fluid
1269
            from fluid.layers import detection
1270
            detect_res = fluid.data(
X
Xin Pan 已提交
1271 1272 1273
                name='detect_res',
                shape=[10, 6],
                dtype='float32')
1274
            label = fluid.data(
X
Xin Pan 已提交
1275 1276 1277 1278
                name='label',
                shape=[10, 6],
                dtype='float32')

1279
            map_out = detection.detection_map(detect_res, label, 21)
X
Xin Pan 已提交
1280
    """
1281 1282
    helper = LayerHelper("detection_map", **locals())

1283
    def __create_var(type):
X
Xin Pan 已提交
1284
        return helper.create_variable_for_type_inference(dtype=type)
1285 1286

    map_out = __create_var('float32')
Z
zhongpu 已提交
1287 1288 1289 1290 1291 1292
    accum_pos_count_out = out_states[
        0] if out_states is not None else __create_var('int32')
    accum_true_pos_out = out_states[
        1] if out_states is not None else __create_var('float32')
    accum_false_pos_out = out_states[
        2] if out_states is not None else __create_var('float32')
1293

Z
zhongpu 已提交
1294 1295 1296
    pos_count = input_states[0] if input_states is not None else None
    true_pos = input_states[1] if input_states is not None else None
    false_pos = input_states[2] if input_states is not None else None
1297

1298 1299 1300 1301 1302
    helper.append_op(
        type="detection_map",
        inputs={
            'Label': label,
            'DetectRes': detect_res,
1303
            'HasState': has_state,
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
            'PosCount': pos_count,
            'TruePos': true_pos,
            'FalsePos': false_pos
        },
        outputs={
            'MAP': map_out,
            'AccumPosCount': accum_pos_count_out,
            'AccumTruePos': accum_true_pos_out,
            'AccumFalsePos': accum_false_pos_out
        },
        attrs={
            'overlap_threshold': overlap_threshold,
            'evaluate_difficult': evaluate_difficult,
1317 1318
            'ap_type': ap_version,
            'class_num': class_num,
1319
        })
1320
    return map_out
1321 1322


1323 1324 1325 1326
def bipartite_match(dist_matrix,
                    match_type=None,
                    dist_threshold=None,
                    name=None):
1327
    """
S
swtkiwi 已提交
1328

Y
yuyang18 已提交
1329 1330
    This operator implements a greedy bipartite matching algorithm, which is
    used to obtain the matching with the maximum distance based on the input
1331
    distance matrix. For input 2D matrix, the bipartite matching algorithm can
Y
yuyang18 已提交
1332 1333 1334 1335
    find the matched column for each row (matched means the largest distance),
    also can find the matched row for each column. And this operator only
    calculate matched indices from column to row. For each instance,
    the number of matched indices is the column number of the input distance
W
wangguanzhong 已提交
1336
    matrix. **The OP only supports CPU**.
Y
yuyang18 已提交
1337 1338 1339

    There are two outputs, matched indices and distance.
    A simple description, this algorithm matched the best (maximum distance)
1340 1341 1342
    row entity to the column entity and the matched indices are not duplicated
    in each row of ColToRowMatchIndices. If the column entity is not matched
    any row entity, set -1 in ColToRowMatchIndices.
C
chengduoZH 已提交
1343

Y
yuyang18 已提交
1344
    NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1345 1346 1347
    If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
    If Tensor, the height of ColToRowMatchIndices is 1.

Y
yuyang18 已提交
1348 1349 1350
    NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
    layer. Please consider to use :code:`ssd_loss` instead.

1351 1352
    Args:
        dist_matrix(Variable): This input is a 2-D LoDTensor with shape
W
wangguanzhong 已提交
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
            [K, M]. The data type is float32 or float64. It is pair-wise 
            distance matrix between the entities represented by each row and 
            each column. For example, assumed one entity is A with shape [K], 
            another entity is B with shape [M]. The dist_matrix[i][j] is the 
            distance between A[i] and B[j]. The bigger the distance is, the 
            better matching the pairs are. NOTE: This tensor can contain LoD 
            information to represent a batch of inputs. One instance of this 
            batch can contain different numbers of entities.
        match_type(str, optional): The type of matching method, should be
           'bipartite' or 'per_prediction'. None ('bipartite') by default.
        dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1364
            this threshold is to determine the extra matching bboxes based
Y
yuyang18 已提交
1365
            on the maximum distance, 0.5 by default.
W
wangguanzhong 已提交
1366 1367 1368 1369
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.
 
1370
    Returns:
W
wangguanzhong 已提交
1371
        Tuple:
Y
yuyang18 已提交
1372

W
wangguanzhong 已提交
1373 1374
        matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
        type is int32. N is the batch size. If match_indices[i][j] is -1, it
Y
yuyang18 已提交
1375 1376 1377 1378 1379
        means B[j] does not match any entity in i-th instance.
        Otherwise, it means B[j] is matched to row
        match_indices[i][j] in i-th instance. The row number of
        i-th instance is saved in match_indices[i][j].

W
wangguanzhong 已提交
1380 1381
        matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
        type is float32. N is batch size. If match_indices[i][j] is -1,
Y
yuyang18 已提交
1382 1383 1384 1385 1386 1387 1388
        match_distance[i][j] is also -1.0. Otherwise, assumed
        match_distance[i][j] = d, and the row offsets of each instance
        are called LoD. Then match_distance[i][j] =
        dist_matrix[d+LoD[i]][j].

    Examples:

1389
        >>> import paddle.fluid as fluid
1390 1391
        >>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
        >>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
Y
yuyang18 已提交
1392 1393
        >>> iou = fluid.layers.iou_similarity(x=x, y=y)
        >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
1394 1395
    """
    helper = LayerHelper('bipartite_match', **locals())
X
Xin Pan 已提交
1396 1397 1398
    match_indices = helper.create_variable_for_type_inference(dtype='int32')
    match_distance = helper.create_variable_for_type_inference(
        dtype=dist_matrix.dtype)
1399 1400 1401
    helper.append_op(
        type='bipartite_match',
        inputs={'DistMat': dist_matrix},
1402 1403 1404 1405
        attrs={
            'match_type': match_type,
            'dist_threshold': dist_threshold,
        },
1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
        outputs={
            'ColToRowMatchIndices': match_indices,
            'ColToRowMatchDist': match_distance
        })
    return match_indices, match_distance


def target_assign(input,
                  matched_indices,
                  negative_indices=None,
                  mismatch_value=None,
                  name=None):
    """
S
swtkiwi 已提交
1419

1420 1421 1422 1423
    This operator can be, for given the target bounding boxes or labels,
    to assign classification and regression targets to each prediction as well as
    weights to prediction. The weights is used to specify which prediction would
    not contribute to training loss.
C
chengduoZH 已提交
1424

1425 1426 1427 1428 1429
    For each instance, the output `out` and`out_weight` are assigned based on
    `match_indices` and `negative_indices`.
    Assumed that the row offset for each instance in `input` is called lod,
    this operator assigns classification/regression targets by performing the
    following steps:
C
chengduoZH 已提交
1430

1431
    1. Assigning all outputs based on `match_indices`:
C
chengduoZH 已提交
1432

1433 1434 1435
    .. code-block:: text

        If id = match_indices[i][j] > 0,
C
chengduoZH 已提交
1436

1437 1438
            out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
            out_weight[i][j] = 1.
C
chengduoZH 已提交
1439

1440
        Otherwise,
C
chengduoZH 已提交
1441

1442 1443
            out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][j] = 0.
C
chengduoZH 已提交
1444

Q
qingqing01 已提交
1445
    2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
C
chengduoZH 已提交
1446

Q
qingqing01 已提交
1447 1448
    Assumed that i-th instance in `neg_indices` is called `neg_indice`,
    for i-th instance:
M
minqiyang 已提交
1449

1450
    .. code-block:: text
C
chengduoZH 已提交
1451

Q
qingqing01 已提交
1452 1453 1454
        for id in neg_indice:
            out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][id] = 1.0
1455 1456

    Args:
Q
qingqing01 已提交
1457 1458 1459
       input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
           Data type should be int32 or float32.
       matched_indices (Variable): The input matched indices
1460 1461 1462
           is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
           the j-th entity of column is not matched to any entity of row in
           i-th instance.
Q
qingqing01 已提交
1463 1464
       negative_indices (Variable, optional): The input negative example indices
           are an optional input with shape [Neg, 1] and int32 type, where Neg is
1465
           the total number of negative example indices.
Q
qingqing01 已提交
1466 1467 1468 1469 1470
       mismatch_value (float32, optional): Fill this value to the mismatched
           location.
       name (string): The default value is None.  Normally there is no need for
           user to set this property.  For more information, please refer
           to :ref:`api_guide_Name`.
1471 1472

    Returns:
Q
qingqing01 已提交
1473 1474 1475 1476 1477 1478 1479 1480
        tuple: A tuple(out, out_weight) is returned.

        out (Variable): a 3D Tensor with shape [N, P, K] and same data type
        with `input`, N and P is the same as they are in `matched_indices`,
        K is the same as it in input of X.

        out_weight (Variable): the weight for output with the shape of [N, P, 1].
        Data type is float32.
1481 1482 1483 1484 1485

    Examples:

        .. code-block:: python

1486
            import paddle.fluid as fluid
1487 1488
            import paddle
            paddle.enable_static()
Q
qingqing01 已提交
1489
            x = fluid.data(
1490 1491 1492
                name='x',
                shape=[4, 20, 4],
                dtype='float',
Q
qingqing01 已提交
1493 1494
                lod_level=1)
            matched_id = fluid.data(
1495 1496
                name='indices',
                shape=[8, 20],
Q
qingqing01 已提交
1497
                dtype='int32')
1498 1499 1500 1501
            trg, trg_weight = fluid.layers.target_assign(
                x,
                matched_id,
                mismatch_value=0)
1502 1503
    """
    helper = LayerHelper('target_assign', **locals())
X
Xin Pan 已提交
1504 1505
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_weight = helper.create_variable_for_type_inference(dtype='float32')
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
    helper.append_op(
        type='target_assign',
        inputs={
            'X': input,
            'MatchIndices': matched_indices,
            'NegIndices': negative_indices
        },
        outputs={'Out': out,
                 'OutWeight': out_weight},
        attrs={'mismatch_value': mismatch_value})
    return out, out_weight


def ssd_loss(location,
             confidence,
             gt_box,
             gt_label,
             prior_box,
             prior_box_var=None,
             background_label=0,
             overlap_threshold=0.5,
             neg_pos_ratio=3.0,
             neg_overlap=0.5,
             loc_loss_weight=1.0,
             conf_loss_weight=1.0,
             match_type='per_prediction',
             mining_type='max_negative',
1533
             normalize=True,
1534
             sample_size=None):
1535
    r"""
1536 1537 1538
	:alias_main: paddle.nn.functional.ssd_loss
	:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
	:old_api: paddle.fluid.layers.ssd_loss
S
swtkiwi 已提交
1539

Y
yuyang18 已提交
1540
    **Multi-box loss layer for object detection algorithm of SSD**
1541

翟飞跃 已提交
1542 1543
    This layer is to compute detection loss for SSD given the location offset
    predictions, confidence predictions, prior boxes and ground-truth bounding
1544 1545 1546 1547
    boxes and labels, and the type of hard example mining. The returned loss
    is a weighted sum of the localization loss (or regression loss) and
    confidence loss (or classification loss) by performing the following steps:

Y
yuyang18 已提交
1548
    1. Find matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1549

1550
      1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
Y
yuyang18 已提交
1551

T
tianshuo78520a 已提交
1552
      1.2 Compute matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1553

1554
    2. Compute confidence for mining hard examples
Y
yuyang18 已提交
1555

1556
      2.1. Get the target label based on matched indices.
Y
yuyang18 已提交
1557

1558
      2.2. Compute confidence loss.
Y
yuyang18 已提交
1559

1560 1561
    3. Apply hard example mining to get the negative example indices and update
       the matched indices.
Y
yuyang18 已提交
1562

1563
    4. Assign classification and regression targets
Y
yuyang18 已提交
1564

1565
      4.1. Encoded bbox according to the prior boxes.
Y
yuyang18 已提交
1566

1567
      4.2. Assign regression targets.
Y
yuyang18 已提交
1568

1569
      4.3. Assign classification targets.
Y
yuyang18 已提交
1570

1571
    5. Compute the overall objective loss.
Y
yuyang18 已提交
1572

1573
      5.1 Compute confidence loss.
Y
yuyang18 已提交
1574

1575
      5.2 Compute localization loss.
Y
yuyang18 已提交
1576

1577 1578 1579 1580 1581 1582
      5.3 Compute the overall weighted loss.

    Args:
        location (Variable): The location predictions are a 3D Tensor with
            shape [N, Np, 4], N is the batch size, Np is total number of
            predictions for each instance. 4 is the number of coordinate values,
1583 1584
            the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
            float64.
1585 1586
        confidence (Variable): The confidence predictions are a 3D Tensor
            with shape [N, Np, C], N and Np are the same as they are in
1587 1588
            `location`, C is the class number.The data type is float32 or
            float64.
翟飞跃 已提交
1589
        gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
1590
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
1591
            bboxes of mini-batch input.The data type is float32 or float64.
1592
        gt_label (Variable): The ground-truth labels are a 2D LoDTensor
1593 1594 1595
            with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
            mini-batch input, 1 is the number of class. The data type is float32
            or float64.
1596
        prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
1597 1598
            Np and 4 are the same as they are in `location`. The data type is
            float32 or float64.
1599
        prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
1600
            with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
1601 1602
        background_label (int): The index of background label, 0 by default.
        overlap_threshold (float): If match_type is 'per_prediction', use
1603 1604
            'overlap_threshold' to determine the extra matching bboxes when finding \
            matched boxes. 0.5 by default.
1605
        neg_pos_ratio (float): The ratio of the negative boxes to the positive
翟飞跃 已提交
1606
            boxes, used only when mining_type is 'max_negative', 3.0 by default.
1607
        neg_overlap (float): The negative overlap upper bound for the unmatched
1608
            predictions. Use only when mining_type is 'max_negative',
1609 1610 1611 1612
            0.5 by default.
        loc_loss_weight (float): Weight for localization loss, 1.0 by default.
        conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
        match_type (str): The type of matching method during training, should
翟飞跃 已提交
1613
            be 'bipartite' or 'per_prediction', 'per_prediction' by default.
1614 1615
        mining_type (str): The hard example mining type, should be 'hard_example'
            or 'max_negative', now only support `max_negative`.
1616
        normalize (bool): Whether to normalize the SSD loss by the total number
Y
yuyang18 已提交
1617
            of output locations, True by default.
1618 1619
        sample_size (int): The max sample size of negative box, used only when
            mining_type is 'hard_example'.
1620 1621

    Returns:
1622 1623 1624
        Variable(Tensor):  The weighted sum of the localization loss and confidence loss, \
        with shape [N * Np, 1], N and Np are the same as they are in
        `location`.The data type is float32 or float64.
1625 1626

    Raises:
Y
yuyang18 已提交
1627 1628
        ValueError: If mining_type is 'hard_example', now only support mining \
        type of `max_negative`.
Y
yuyang18 已提交
1629 1630

    Examples:
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649

        .. code-block:: python

            import paddle.fluid as fluid
            pb = fluid.data(
                           name='prior_box',
                           shape=[10, 4],
                           dtype='float32')
            pbv = fluid.data(
                           name='prior_box_var',
                           shape=[10, 4],
                           dtype='float32')
            loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
            gt_box = fluid.data(
                 name='gt_box', shape=[4], lod_level=1, dtype='float32')
            gt_label = fluid.data(
                 name='gt_label', shape=[1], lod_level=1, dtype='float32')
            loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
1650 1651 1652 1653 1654 1655 1656
    """

    helper = LayerHelper('ssd_loss', **locals())
    if mining_type != 'max_negative':
        raise ValueError("Only support mining_type == max_negative now.")

    num, num_prior, num_class = confidence.shape
G
merge  
gongweibao 已提交
1657
    conf_shape = nn.shape(confidence)
1658 1659

    def __reshape_to_2d(var):
1660
        return nn.flatten(x=var, axis=2)
1661

T
tianshuo78520a 已提交
1662
    # 1. Find matched bounding box by prior box.
1663 1664
    #   1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
    iou = iou_similarity(x=gt_box, y=prior_box)
T
tianshuo78520a 已提交
1665
    #   1.2 Compute matched bounding box by bipartite matching algorithm.
1666 1667
    matched_indices, matched_dist = bipartite_match(iou, match_type,
                                                    overlap_threshold)
1668 1669 1670

    # 2. Compute confidence for mining hard examples
    # 2.1. Get the target label based on matched indices
1671 1672
    gt_label = nn.reshape(
        x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
1673
    gt_label.stop_gradient = True
1674 1675 1676 1677 1678 1679 1680
    target_label, _ = target_assign(
        gt_label, matched_indices, mismatch_value=background_label)
    # 2.2. Compute confidence loss.
    # Reshape confidence to 2D tensor.
    confidence = __reshape_to_2d(confidence)
    target_label = tensor.cast(x=target_label, dtype='int64')
    target_label = __reshape_to_2d(target_label)
1681
    target_label.stop_gradient = True
1682
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1683
    # 3. Mining hard examples
G
merge  
gongweibao 已提交
1684
    actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
1685
    actual_shape.stop_gradient = True
1686 1687
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1688
    conf_loss = nn.reshape(
1689
        x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
1690
    conf_loss.stop_gradient = True
X
Xin Pan 已提交
1691
    neg_indices = helper.create_variable_for_type_inference(dtype='int32')
1692
    dtype = matched_indices.dtype
X
Xin Pan 已提交
1693 1694
    updated_matched_indices = helper.create_variable_for_type_inference(
        dtype=dtype)
1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
    helper.append_op(
        type='mine_hard_examples',
        inputs={
            'ClsLoss': conf_loss,
            'LocLoss': None,
            'MatchIndices': matched_indices,
            'MatchDist': matched_dist,
        },
        outputs={
            'NegIndices': neg_indices,
            'UpdatedMatchIndices': updated_matched_indices
        },
        attrs={
            'neg_pos_ratio': neg_pos_ratio,
B
Bai Yifan 已提交
1709
            'neg_dist_threshold': neg_overlap,
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
            'mining_type': mining_type,
            'sample_size': sample_size,
        })

    # 4. Assign classification and regression targets
    # 4.1. Encoded bbox according to the prior boxes.
    encoded_bbox = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=gt_box,
        code_type='encode_center_size')
    # 4.2. Assign regression targets
    target_bbox, target_loc_weight = target_assign(
        encoded_bbox, updated_matched_indices, mismatch_value=background_label)
    # 4.3. Assign classification targets
    target_label, target_conf_weight = target_assign(
        gt_label,
        updated_matched_indices,
        negative_indices=neg_indices,
        mismatch_value=background_label)

    # 5. Compute loss.
    # 5.1 Compute confidence loss.
    target_label = __reshape_to_2d(target_label)
    target_label = tensor.cast(x=target_label, dtype='int64')
1735

1736
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1737 1738 1739
    target_conf_weight = __reshape_to_2d(target_conf_weight)
    conf_loss = conf_loss * target_conf_weight

1740 1741 1742 1743
    # the target_label and target_conf_weight do not have gradient.
    target_label.stop_gradient = True
    target_conf_weight.stop_gradient = True

1744 1745 1746 1747 1748 1749 1750 1751
    # 5.2 Compute regression loss.
    location = __reshape_to_2d(location)
    target_bbox = __reshape_to_2d(target_bbox)

    loc_loss = nn.smooth_l1(location, target_bbox)
    target_loc_weight = __reshape_to_2d(target_loc_weight)
    loc_loss = loc_loss * target_loc_weight

1752 1753 1754 1755
    # the target_bbox and target_loc_weight do not have gradient.
    target_bbox.stop_gradient = True
    target_loc_weight.stop_gradient = True

1756 1757
    # 5.3 Compute overall weighted loss.
    loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
1758
    # reshape to [N, Np], N is the batch size and Np is the prior box number.
1759 1760 1761
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
    loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
1762 1763 1764 1765 1766
    loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
    if normalize:
        normalizer = nn.reduce_sum(target_loc_weight)
        loss = loss / normalizer

1767
    return loss
C
chengduoZH 已提交
1768 1769


1770 1771 1772 1773
def prior_box(input,
              image,
              min_sizes,
              max_sizes=None,
1774
              aspect_ratios=[1.],
1775 1776 1777 1778 1779
              variance=[0.1, 0.1, 0.2, 0.2],
              flip=False,
              clip=False,
              steps=[0.0, 0.0],
              offset=0.5,
1780 1781
              name=None,
              min_max_aspect_ratios_order=False):
1782
    """
S
swtkiwi 已提交
1783

R
ruri 已提交
1784
    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
1785 1786 1787 1788 1789
    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

R
ruri 已提交
1790
    Parameters:
T
tianshuo78520a 已提交
1791
       input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
R
ruri 已提交
1792 1793 1794 1795
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes(list|tuple|float): the min sizes of generated prior boxes.
       max_sizes(list|tuple|None): the max sizes of generated prior boxes.
1796
            Default: None.
R
ruri 已提交
1797
       aspect_ratios(list|tuple|float): the aspect ratios of generated
1798
            prior boxes. Default: [1.].
1799 1800 1801 1802
       variance(list|tuple): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1803
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1804 1805
            step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
            height or weight of the input will be automatically calculated.
1806
            Default: [0., 0.]
1807
       offset(float): Prior boxes center offset. Default: 0.5
1808
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1809
            in order of [min, max, aspect_ratios], which is consistent with
1810 1811 1812
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
R
ruri 已提交
1813
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1814 1815

    Returns:
R
ruri 已提交
1816
        Tuple: A tuple with two Variable (boxes, variances)
Q
update  
qiaolongfei 已提交
1817

R
ruri 已提交
1818 1819
        boxes(Variable): the output prior boxes of PriorBox.
	4-D tensor, the layout is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1820
        H is the height of input, W is the width of input,
R
ruri 已提交
1821
        num_priors is the total box count of each position of input.
Q
update  
qiaolongfei 已提交
1822

R
ruri 已提交
1823 1824
        variances(Variable): the expanded variances of PriorBox.
    	4-D tensor, the layput is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1825
        H is the height of input, W is the width of input
R
ruri 已提交
1826
        num_priors is the total box count of each position of input
1827 1828 1829

    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1830

R
ruri 已提交
1831 1832 1833
	    #declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
1834 1835
        import paddle
        paddle.enable_static()
R
ruri 已提交
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
	    input = fluid.data(name="input", shape=[None,3,6,9])
	    image = fluid.data(name="image", shape=[None,3,9,12])
	    box, var = fluid.layers.prior_box(
                 input=input,
                 image=image,
		 min_sizes=[100.],
                 clip=True,
                 flip=True)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
 
	    # prepare a batch of data
	    input_data = np.random.rand(1,3,6,9).astype("float32")
	    image_data = np.random.rand(1,3,9,12).astype("float32")
 
	    box_out, var_out = exe.run(fluid.default_main_program(),
                feed={"input":input_data,"image":image_data},
                fetch_list=[box,var],
                return_numpy=True)
 
	    # print(box_out.shape)
	    # (6, 9, 1, 4)
	    # print(var_out.shape)
	    # (6, 9, 1, 4)

	    # imperative mode
	    import paddle.fluid.dygraph as dg

	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		image = dg.to_variable(image_data)
    		box, var = fluid.layers.prior_box(
		    input=input,
		    image=image,
		    min_sizes=[100.],
		    clip=True,
		    flip=True)
		# print(box.shape)
		# [6L, 9L, 1L, 4L]
                # print(var.shape)
		# [6L, 9L, 1L, 4L]

1880 1881 1882
    """
    helper = LayerHelper("prior_box", **locals())
    dtype = helper.input_dtype()
1883 1884
    check_variable_and_dtype(
        input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
1885

1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(min_sizes):
        min_sizes = [min_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    min_sizes = list(map(float, min_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    steps = list(map(float, steps))

1901 1902 1903 1904 1905 1906 1907 1908
    attrs = {
        'min_sizes': min_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'flip': flip,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
1909 1910
        'offset': offset,
        'min_max_aspect_ratios_order': min_max_aspect_ratios_order
1911 1912
    }
    if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
1913 1914
        if not _is_list_or_tuple_(max_sizes):
            max_sizes = [max_sizes]
1915 1916
        attrs['max_sizes'] = max_sizes

X
Xin Pan 已提交
1917 1918
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
    helper.append_op(
        type="prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


R
ruri 已提交
1931 1932 1933 1934 1935 1936 1937 1938 1939
def density_prior_box(input,
                      image,
                      densities=None,
                      fixed_sizes=None,
                      fixed_ratios=None,
                      variance=[0.1, 0.1, 0.2, 0.2],
                      clip=False,
                      steps=[0.0, 0.0],
                      offset=0.5,
1940
                      flatten_to_2d=False,
R
ruri 已提交
1941
                      name=None):
1942
    r"""
R
ruri 已提交
1943

R
ruri 已提交
1944
    This op generates density prior boxes for SSD(Single Shot MultiBox Detector) 
R
ruri 已提交
1945 1946 1947 1948 1949 1950
    algorithm. Each position of the input produce N prior boxes, N is 
    determined by the count of densities, fixed_sizes and fixed_ratios. 
    Boxes center at grid points around each input position is generated by 
    this operator, and the grid points is determined by densities and 
    the count of density prior box is determined by fixed_sizes and fixed_ratios. 
    Obviously, the number of fixed_sizes is equal to the number of densities.
R
ruri 已提交
1951
    
R
ruri 已提交
1952
    For densities_i in densities:
R
ruri 已提交
1953 1954
    
    .. math::
R
ruri 已提交
1955

R
ruri 已提交
1956 1957 1958 1959 1960 1961 1962
        N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)

    N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.

    Parameters:
       input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
R
ruri 已提交
1963
            the layout is NCHW.
R
ruri 已提交
1964
       densities(list|tuple|None): The densities of generated density prior 
R
ruri 已提交
1965 1966
            boxes, this attribute should be a list or tuple of integers. 
            Default: None.
R
ruri 已提交
1967
       fixed_sizes(list|tuple|None): The fixed sizes of generated density
R
ruri 已提交
1968 1969
            prior boxes, this attribute should a list or tuple of same 
            length with :attr:`densities`. Default: None.
R
ruri 已提交
1970
       fixed_ratios(list|tuple|None): The fixed ratios of generated density
R
ruri 已提交
1971 1972 1973
            prior boxes, if this attribute is not set and :attr:`densities`
            and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
            to generate density prior boxes.
R
ruri 已提交
1974
       variance(list|tuple): The variances to be encoded in density prior boxes.
R
ruri 已提交
1975
            Default:[0.1, 0.1, 0.2, 0.2].
R
ruri 已提交
1976
       clip(bool): Whether to clip out of boundary boxes. Default: False.
翟飞跃 已提交
1977
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1978 1979
            step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
            height or weight of the input will be automatically calculated.
R
ruri 已提交
1980 1981
            Default: [0., 0.]
       offset(float): Prior boxes center offset. Default: 0.5
1982 1983
       flatten_to_2d(bool): Whether to flatten output prior boxes and variance
           to 2D shape, the second dim is 4. Default: False.
R
ruri 已提交
1984 1985
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
    
R
ruri 已提交
1986
    Returns:
R
ruri 已提交
1987
        Tuple: A tuple with two Variable (boxes, variances)
R
ruri 已提交
1988 1989

        boxes: the output density prior boxes of PriorBox.
R
ruri 已提交
1990 1991 1992
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1993 1994

        variances: the expanded variances of PriorBox.
R
ruri 已提交
1995 1996 1997
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
1998 1999 2000


    Examples:
R
ruri 已提交
2001

R
ruri 已提交
2002 2003
        .. code-block:: python

R
ruri 已提交
2004
            #declarative mode
R
ruri 已提交
2005

R
ruri 已提交
2006 2007
            import paddle.fluid as fluid
            import numpy as np
2008 2009
            import paddle
            paddle.enable_static()
R
ruri 已提交
2010

R
ruri 已提交
2011 2012 2013
            input = fluid.data(name="input", shape=[None,3,6,9])
            image = fluid.data(name="image", shape=[None,3,9,12])
            box, var = fluid.layers.density_prior_box(
R
ruri 已提交
2014 2015 2016 2017 2018 2019 2020 2021
                 input=input,
                 image=image,
                 densities=[4, 2, 1],
                 fixed_sizes=[32.0, 64.0, 128.0],
                 fixed_ratios=[1.],
                 clip=True,
                 flatten_to_2d=True)

R
ruri 已提交
2022 2023 2024
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
R
ruri 已提交
2025
 
R
ruri 已提交
2026 2027 2028 2029 2030 2031
            # prepare a batch of data
            input_data = np.random.rand(1,3,6,9).astype("float32")
            image_data = np.random.rand(1,3,9,12).astype("float32")

            box_out, var_out = exe.run(
                fluid.default_main_program(),
R
ruri 已提交
2032
                feed={"input":input_data,
R
ruri 已提交
2033
                      "image":image_data},
R
ruri 已提交
2034 2035 2036
                fetch_list=[box,var],
                return_numpy=True)

R
ruri 已提交
2037 2038 2039 2040
            # print(box_out.shape)
            # (1134, 4)
            # print(var_out.shape)
            # (1134, 4)
R
ruri 已提交
2041 2042


R
ruri 已提交
2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
            #imperative mode
            import paddle.fluid.dygraph as dg

            with dg.guard(place) as g:
                input = dg.to_variable(input_data)
                image = dg.to_variable(image_data)
                box, var = fluid.layers.density_prior_box(
                    input=input,
                    image=image,
                    densities=[4, 2, 1],
                    fixed_sizes=[32.0, 64.0, 128.0],
                    fixed_ratios=[1.],
                    clip=True)

                # print(box.shape)
                # [6L, 9L, 21L, 4L]
                # print(var.shape)
                # [6L, 9L, 21L, 4L]
R
ruri 已提交
2061

R
ruri 已提交
2062 2063 2064
    """
    helper = LayerHelper("density_prior_box", **locals())
    dtype = helper.input_dtype()
2065 2066
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'density_prior_box')
R
ruri 已提交
2067 2068 2069 2070

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

2071 2072 2073
    check_type(densities, 'densities', (list, tuple), 'density_prior_box')
    check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
    check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
R
ruri 已提交
2074 2075
    if len(densities) != len(fixed_sizes):
        raise ValueError('densities and fixed_sizes length should be euqal.')
2076

R
ruri 已提交
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    densities = list(map(int, densities))
    fixed_sizes = list(map(float, fixed_sizes))
    fixed_ratios = list(map(float, fixed_ratios))
    steps = list(map(float, steps))

    attrs = {
        'variances': variance,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
        'offset': offset,
2092 2093 2094 2095
        'densities': densities,
        'fixed_sizes': fixed_sizes,
        'fixed_ratios': fixed_ratios,
        'flatten_to_2d': flatten_to_2d,
R
ruri 已提交
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
    }
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="density_prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


2111
@static_only
C
chengduoZH 已提交
2112
def multi_box_head(inputs,
C
chengduoZH 已提交
2113 2114
                   image,
                   base_size,
C
chengduoZH 已提交
2115
                   num_classes,
C
chengduoZH 已提交
2116
                   aspect_ratios,
2117 2118
                   min_ratio=None,
                   max_ratio=None,
C
chengduoZH 已提交
2119 2120
                   min_sizes=None,
                   max_sizes=None,
C
chengduoZH 已提交
2121 2122 2123 2124
                   steps=None,
                   step_w=None,
                   step_h=None,
                   offset=0.5,
2125 2126
                   variance=[0.1, 0.1, 0.2, 0.2],
                   flip=True,
C
chengduoZH 已提交
2127
                   clip=False,
C
chengduoZH 已提交
2128
                   kernel_size=1,
C
chengduoZH 已提交
2129
                   pad=0,
C
chengduoZH 已提交
2130
                   stride=1,
2131 2132
                   name=None,
                   min_max_aspect_ratios_order=False):
C
chengduoZH 已提交
2133
    """
2134
	:api_attr: Static Graph
S
swtkiwi 已提交
2135

Q
qingqing01 已提交
2136 2137 2138 2139
    Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
    regression location and classification confidence on multiple input feature
    maps, then output the concatenate results. The details of this algorithm,
    please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
C
chengduoZH 已提交
2140
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
2141 2142

    Args:
Q
qingqing01 已提交
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
       inputs (list(Variable)|tuple(Variable)): The list of input variables,
           the format of all Variables are 4-D Tensor, layout is NCHW.
           Data type should be float32 or float64.
       image (Variable): The input image, layout is NCHW. Data type should be
           the same as inputs.
       base_size(int): the base_size is input image size. When len(inputs) > 2
           and `min_size` and `max_size` are None, the `min_size` and `max_size`
           are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
           formula is as follows:

              ..  code-block:: text

                  min_sizes = []
                  max_sizes = []
                  step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
                  for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
                      min_sizes.append(base_size * ratio / 100.)
                      max_sizes.append(base_size * (ratio + step) / 100.)
                      min_sizes = [base_size * .10] + min_sizes
                      max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2164
       num_classes(int): The number of classes.
Q
qingqing01 已提交
2165 2166
       aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
           prior boxes. The length of input and aspect_ratios must be equal.
C
chengduoZH 已提交
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       steps(list|tuple): If step_w and step_h are the same,
            step_w and step_h can be replaced by steps.
       step_w(list|tuple): Prior boxes step
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically
            calculated. Default: None.
       step_h(list|tuple): Prior boxes step across height, If
            step_h[i] == 0.0, the prior boxes step across height of
            the inputs[i] will be automatically calculated. Default: None.
       offset(float): Prior boxes center offset. Default: 0.5
       variance(list|tuple): the variances to be encoded in prior boxes.
2186
            Default:[0.1, 0.1, 0.2, 0.2].
C
chengduoZH 已提交
2187 2188 2189 2190 2191
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
       kernel_size(int): The kernel size of conv2d. Default: 1.
       pad(int|list|tuple): The padding of conv2d. Default:0.
       stride(int|list|tuple): The stride of conv2d. Default:1,
Q
qingqing01 已提交
2192 2193 2194
       name(str): The default value is None.  Normally there is no need
           for user to set this property.  For more information, please
           refer to :ref:`api_guide_Name`.
2195
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
2196
            in order of [min, max, aspect_ratios], which is consistent with
2197
            Caffe. Please note, this order affects the weights order of
T
tianshuo78520a 已提交
2198
            convolution layer followed by and does not affect the final
2199
            detection results. Default: False.
C
chengduoZH 已提交
2200 2201

    Returns:
Q
update  
qiaolongfei 已提交
2202 2203
        tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)

Q
qingqing01 已提交
2204 2205 2206
        mbox_loc (Variable): The predicted boxes' location of the inputs. The
        layout is [N, num_priors, 4], where N is batch size, ``num_priors``
        is the number of prior boxes. Data type is the same as input.
Q
update  
qiaolongfei 已提交
2207

Q
qingqing01 已提交
2208 2209 2210 2211
        mbox_conf (Variable): The predicted boxes' confidence of the inputs.
        The layout is [N, num_priors, C], where ``N`` and ``num_priors`` 
        has the same meaning as above. C is the number of Classes.
        Data type is the same as input.
Q
update  
qiaolongfei 已提交
2212

Q
qingqing01 已提交
2213 2214 2215
        boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
        The meaning of num_priors is the same as above.
        Data type is the same as input.
C
chengduoZH 已提交
2216

Q
qingqing01 已提交
2217 2218
        variances (Variable): the expanded variances for prior boxes.
        The layout is [num_priors, 4]. Data type is the same as input.
C
chengduoZH 已提交
2219

Q
qingqing01 已提交
2220
    Examples 1: set min_ratio and max_ratio:
C
chengduoZH 已提交
2221
        .. code-block:: python
Q
update  
qiaolongfei 已提交
2222

2223 2224
          import paddle
          paddle.enable_static()
2225

2226 2227 2228 2229 2230 2231 2232
          images = paddle.static.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = paddle.static.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = paddle.static.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = paddle.static.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = paddle.static.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = paddle.static.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = paddle.static.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
2233

2234
          mbox_locs, mbox_confs, box, var = paddle.static.nn.multi_box_head(
2235
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
C
chengduoZH 已提交
2236 2237 2238 2239 2240 2241 2242 2243 2244
            image=images,
            num_classes=21,
            min_ratio=20,
            max_ratio=90,
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)
Q
qingqing01 已提交
2245 2246 2247 2248

    Examples 2: set min_sizes and max_sizes:
        .. code-block:: python

2249 2250
          import paddle
          paddle.enable_static()
Q
qingqing01 已提交
2251

2252 2253 2254 2255 2256 2257 2258
          images = paddle.static.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = paddle.static.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = paddle.static.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = paddle.static.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = paddle.static.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = paddle.static.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = paddle.static.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
Q
qingqing01 已提交
2259

2260
          mbox_locs, mbox_confs, box, var = paddle.static.nn.multi_box_head(
Q
qingqing01 已提交
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
            image=images,
            num_classes=21,
            min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
            max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)

C
chengduoZH 已提交
2272 2273
    """

C
chengduoZH 已提交
2274
    def _reshape_with_axis_(input, axis=1):
2275
        out = nn.flatten(x=input, axis=axis)
C
chengduoZH 已提交
2276
        return out
2277

2278 2279
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))
2280

C
chengduoZH 已提交
2281 2282 2283 2284
    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

2285 2286
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
2287

C
chengduoZH 已提交
2288 2289 2290 2291 2292
    num_layer = len(inputs)

    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
2293
    elif min_sizes is None and max_sizes is None:
C
chengduoZH 已提交
2294 2295 2296
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
M
minqiyang 已提交
2297
        for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
C
chengduoZH 已提交
2298 2299 2300 2301 2302
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2303 2304 2305 2306 2307
    if aspect_ratios:
        _is_list_or_tuple_and_equal(
            aspect_ratios, num_layer,
            'aspect_ratios should be list or tuple, and the length of inputs '
            'and aspect_ratios should be the same.')
Z
zhongpu 已提交
2308
    if step_h is not None:
C
chengduoZH 已提交
2309 2310 2311 2312
        _is_list_or_tuple_and_equal(
            step_h, num_layer,
            'step_h should be list or tuple, and the length of inputs and '
            'step_h should be the same.')
Z
zhongpu 已提交
2313
    if step_w is not None:
C
chengduoZH 已提交
2314 2315 2316 2317
        _is_list_or_tuple_and_equal(
            step_w, num_layer,
            'step_w should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
Z
zhongpu 已提交
2318
    if steps is not None:
C
chengduoZH 已提交
2319 2320 2321 2322 2323 2324 2325
        _is_list_or_tuple_and_equal(
            steps, num_layer,
            'steps should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
        step_w = steps
        step_h = steps

C
chengduoZH 已提交
2326 2327
    mbox_locs = []
    mbox_confs = []
C
chengduoZH 已提交
2328 2329
    box_results = []
    var_results = []
C
chengduoZH 已提交
2330 2331
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
C
chengduoZH 已提交
2332 2333
        max_size = max_sizes[i]

2334
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
2335
            min_size = [min_size]
C
chengduoZH 已提交
2336 2337
        if not _is_list_or_tuple_(max_size):
            max_size = [max_size]
C
chengduoZH 已提交
2338 2339 2340 2341

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
2342
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
2343
                aspect_ratio = [aspect_ratio]
2344
        step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
C
chengduoZH 已提交
2345

2346
        box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
2347 2348
                             variance, flip, clip, step, offset, None,
                             min_max_aspect_ratios_order)
C
chengduoZH 已提交
2349 2350 2351 2352 2353

        box_results.append(box)
        var_results.append(var)

        num_boxes = box.shape[2]
C
chengduoZH 已提交
2354

2355
        # get loc
Y
Yuan Gao 已提交
2356
        num_loc_output = num_boxes * 4
2357
        mbox_loc = nn.conv2d(
C
chengduoZH 已提交
2358
            input=input,
2359 2360 2361 2362 2363
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)

2364
        mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
2365
        mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
Y
Yuan Gao 已提交
2366
        mbox_locs.append(mbox_loc_flatten)
C
chengduoZH 已提交
2367

2368
        # get conf
C
chengduoZH 已提交
2369
        num_conf_output = num_boxes * num_classes
2370
        conf_loc = nn.conv2d(
C
chengduoZH 已提交
2371
            input=input,
2372 2373 2374 2375
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)
2376
        conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
2377
        conf_loc_flatten = nn.flatten(conf_loc, axis=1)
Y
Yuan Gao 已提交
2378
        mbox_confs.append(conf_loc_flatten)
C
chengduoZH 已提交
2379

C
chengduoZH 已提交
2380 2381 2382
    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
Y
Yuan Gao 已提交
2383 2384
        mbox_locs_concat = mbox_locs[0]
        mbox_confs_concat = mbox_confs[0]
C
chengduoZH 已提交
2385 2386 2387 2388 2389 2390 2391 2392 2393
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))

        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
Y
Yuan Gao 已提交
2394
        mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
2395
        mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
Y
Yuan Gao 已提交
2396
        mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
2397 2398
        mbox_confs_concat = nn.reshape(
            mbox_confs_concat, shape=[0, -1, num_classes])
C
chengduoZH 已提交
2399

2400 2401
    box.stop_gradient = True
    var.stop_gradient = True
Y
Yuan Gao 已提交
2402
    return mbox_locs_concat, mbox_confs_concat, box, var
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412


def anchor_generator(input,
                     anchor_sizes=None,
                     aspect_ratios=None,
                     variance=[0.1, 0.1, 0.2, 0.2],
                     stride=None,
                     offset=0.5,
                     name=None):
    """
S
swtkiwi 已提交
2413

2414 2415 2416 2417 2418 2419 2420 2421
    **Anchor generator operator**

    Generate anchors for Faster RCNN algorithm.
    Each position of the input produce N anchors, N =
    size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
    is firstly aspect_ratios loop then anchor_sizes loop.

    Args:
W
wangguanzhong 已提交
2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
       input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
       anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
          anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
          For instance, the anchor size of 64 means the area of this anchor 
          equals to 64**2. None by default.
       aspect_ratios(float32|list|tuple, optional): The height / width ratios 
           of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
       variance(list|tuple, optional): The variances to be used in box 
           regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by 
           default.
       stride(list|tuple, optional): The anchors stride across width and height.
           The data type is float32. e.g. [16.0, 16.0]. None by default.
       offset(float32, optional): Prior boxes center offset. 0.5 by default.
       name(str, optional): For detailed information, please refer 
           to :ref:`api_guide_Name`. Usually name is no need to set and None 
           by default. 
2438 2439

    Returns:
W
wangguanzhong 已提交
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
        Tuple:

        Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
        H is the height of input, W is the width of input,
        num_anchors is the box count of each position. 
        Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
 
        Variances(Variable): The expanded variances of anchors
        with a layout of [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_anchors is the box count of each position.
        Each variance is in (xcenter, ycenter, w, h) format.
2452 2453 2454 2455 2456 2457


    Examples:

        .. code-block:: python

2458
            import paddle.fluid as fluid
2459 2460 2461
            import paddle

            paddle.enable_static()
2462
            conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
J
jerrywgz 已提交
2463
            anchor, var = fluid.layers.anchor_generator(
2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
                input=conv1,
                anchor_sizes=[64, 128, 256, 512],
                aspect_ratios=[0.5, 1.0, 2.0],
                variance=[0.1, 0.1, 0.2, 0.2],
                stride=[16.0, 16.0],
                offset=0.5)
    """
    helper = LayerHelper("anchor_generator", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(anchor_sizes):
        anchor_sizes = [anchor_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(stride) and len(stride) == 2):
        raise ValueError('stride should be a list or tuple ',
                         'with length 2, (stride_width, stride_height).')

    anchor_sizes = list(map(float, anchor_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    stride = list(map(float, stride))

    attrs = {
        'anchor_sizes': anchor_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'stride': stride,
        'offset': offset
    }

X
Xin Pan 已提交
2497 2498
    anchor = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
2499 2500 2501 2502 2503 2504 2505 2506 2507
    helper.append_op(
        type="anchor_generator",
        inputs={"Input": input},
        outputs={"Anchors": anchor,
                 "Variances": var},
        attrs=attrs, )
    anchor.stop_gradient = True
    var.stop_gradient = True
    return anchor, var
2508 2509


W
whs 已提交
2510 2511 2512 2513
def roi_perspective_transform(input,
                              rois,
                              transformed_height,
                              transformed_width,
S
SunGaofeng 已提交
2514 2515
                              spatial_scale=1.0,
                              name=None):
W
whs 已提交
2516
    """
S
SunGaofeng 已提交
2517
    **The** `rois` **of this op should be a LoDTensor.**
W
whs 已提交
2518

S
SunGaofeng 已提交
2519 2520 2521 2522 2523
    ROI perspective transform op applies perspective transform to map each roi into an 
    rectangular region. Perspective transform is a type of transformation in linear algebra.

    Parameters:
        input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of 
W
whs 已提交
2524 2525
                          input tensor is NCHW. Where N is batch size, C is the
                          number of input channels, H is the height of the feature,
S
SunGaofeng 已提交
2526 2527 2528
                          and W is the width of the feature. The data type is float32.
        rois (Variable):  2-D LoDTensor, ROIs (Regions of Interest) to be transformed. 
                          It should be a 2-D LoDTensor of shape (num_rois, 8). Given as 
W
whs 已提交
2529 2530 2531
                          [[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the 
                          top left coordinates, and (x2, y2) is the top right 
                          coordinates, and (x3, y3) is the bottom right coordinates, 
S
SunGaofeng 已提交
2532 2533 2534 2535
                          and (x4, y4) is the bottom left coordinates. The data type is the
                          same as `input` 
        transformed_height (int): The height of transformed output.
        transformed_width (int): The width of transformed output.
W
whs 已提交
2536
        spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
S
SunGaofeng 已提交
2537 2538 2539
        name(str, optional): The default value is None.  
                             Normally there is no need for user to set this property.  
                             For more information, please refer to :ref:`api_guide_Name`
W
whs 已提交
2540 2541

    Returns:
S
SunGaofeng 已提交
2542
            A tuple with three Variables. (out, mask, transform_matrix)
2543 2544

            out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2545
            (num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
2546 2547

            mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2548
            (num_rois, 1, transformed_h, transformed_w). The data type is int32
2549 2550

            transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
S
SunGaofeng 已提交
2551 2552 2553 2554
            a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`

    Return Type:
        tuple
W
whs 已提交
2555 2556 2557 2558

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
2559
            import paddle.fluid as fluid
2560

S
SunGaofeng 已提交
2561 2562
            x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
2563
            out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
W
whs 已提交
2564
    """
2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
    check_variable_and_dtype(input, 'input', ['float32'],
                             'roi_perspective_transform')
    check_variable_and_dtype(rois, 'rois', ['float32'],
                             'roi_perspective_transform')
    check_type(transformed_height, 'transformed_height', int,
               'roi_perspective_transform')
    check_type(transformed_width, 'transformed_width', int,
               'roi_perspective_transform')
    check_type(spatial_scale, 'spatial_scale', float,
               'roi_perspective_transform')

W
whs 已提交
2576 2577
    helper = LayerHelper('roi_perspective_transform', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2578
    out = helper.create_variable_for_type_inference(dtype)
2579 2580
    mask = helper.create_variable_for_type_inference(dtype="int32")
    transform_matrix = helper.create_variable_for_type_inference(dtype)
2581 2582
    out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
    out2in_w = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
2583 2584 2585 2586
    helper.append_op(
        type="roi_perspective_transform",
        inputs={"X": input,
                "ROIs": rois},
2587 2588 2589
        outputs={
            "Out": out,
            "Out2InIdx": out2in_idx,
2590 2591 2592
            "Out2InWeights": out2in_w,
            "Mask": mask,
            "TransformMatrix": transform_matrix
2593
        },
W
whs 已提交
2594 2595 2596 2597 2598
        attrs={
            "transformed_height": transformed_height,
            "transformed_width": transformed_width,
            "spatial_scale": spatial_scale
        })
2599
    return out, mask, transform_matrix
W
whs 已提交
2600 2601


2602 2603
def generate_proposal_labels(rpn_rois,
                             gt_classes,
2604
                             is_crowd,
2605
                             gt_boxes,
2606
                             im_info,
2607 2608 2609 2610 2611 2612
                             batch_size_per_im=256,
                             fg_fraction=0.25,
                             fg_thresh=0.25,
                             bg_thresh_hi=0.5,
                             bg_thresh_lo=0.0,
                             bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
2613
                             class_nums=None,
2614 2615
                             use_random=True,
                             is_cls_agnostic=False,
2616 2617 2618
                             is_cascade_rcnn=False,
                             max_overlap=None,
                             return_max_overlap=False):
2619
    """
S
swtkiwi 已提交
2620

2621
    **Generate Proposal Labels of Faster-RCNN**
2622

B
buxingyuan 已提交
2623
    This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
2624
    to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
2625 2626 2627

    RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
    were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
2628
    If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
2629 2630
    If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
    then it was considered as a background sample.
B
buxingyuan 已提交
2631
    After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
2632
    then we apply random sampling to make sure
B
buxingyuan 已提交
2633
    the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
2634 2635 2636 2637 2638

    For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
    Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.

    Args:
2639 2640 2641
        rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
        is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
B
buxingyuan 已提交
2642 2643 2644
        gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
        im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.

2645 2646 2647 2648 2649 2650 2651
        batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
        fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
        fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
        bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
        bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
        bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
        class_nums(int): Class number. The data type must be int32.
B
buxingyuan 已提交
2652
        use_random(bool): Use random sampling to choose foreground and background boxes.
2653 2654
        is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
        is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
2655 2656
        max_overlap(Variable): Maximum overlap between each proposal box and ground-truth.
        return_max_overlap(bool): Whether return the maximum overlap between each sampled RoI and ground-truth.
B
Bai Yifan 已提交
2657

2658 2659
    Returns:
        tuple:
2660
        A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, max_overlap)``.
2661 2662 2663 2664 2665 2666

        - **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
        - **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
        - **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
        - **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
        - **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
2667
        - **max_overlap**: 1-D LoDTensor with shape ``[P]``. P is the number of output ``rois``. The maximum overlap between each sampled RoI and ground-truth.
2668

B
Bai Yifan 已提交
2669 2670 2671
    Examples:
        .. code-block:: python

2672
            import paddle
B
Bai Yifan 已提交
2673
            import paddle.fluid as fluid
2674
            paddle.enable_static()
2675
            rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
2676 2677
            gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='int32')
            is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='int32')
2678 2679
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
2680
            rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
B
Bai Yifan 已提交
2681 2682 2683
                           rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
                           class_nums=10)

2684 2685 2686 2687
    """

    helper = LayerHelper('generate_proposal_labels', **locals())

2688 2689 2690 2691 2692 2693
    check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
                             'generate_proposal_labels')
    check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
                             'generate_proposal_labels')
    check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
                             'generate_proposal_labels')
2694 2695
    if is_cascade_rcnn:
        assert max_overlap is not None, "Input max_overlap of generate_proposal_labels should not be None if is_cascade_rcnn is True"
2696

X
Xin Pan 已提交
2697 2698 2699 2700 2701 2702 2703 2704 2705
    rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
    labels_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    bbox_targets = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_inside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_outside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
2706 2707
    max_overlap_with_gt = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
2708

2709 2710 2711 2712 2713 2714 2715 2716 2717
    inputs = {
        'RpnRois': rpn_rois,
        'GtClasses': gt_classes,
        'IsCrowd': is_crowd,
        'GtBoxes': gt_boxes,
        'ImInfo': im_info,
    }
    if max_overlap is not None:
        inputs['MaxOverlap'] = max_overlap
2718 2719
    helper.append_op(
        type="generate_proposal_labels",
2720
        inputs=inputs,
2721 2722 2723 2724 2725
        outputs={
            'Rois': rois,
            'LabelsInt32': labels_int32,
            'BboxTargets': bbox_targets,
            'BboxInsideWeights': bbox_inside_weights,
2726 2727
            'BboxOutsideWeights': bbox_outside_weights,
            'MaxOverlapWithGT': max_overlap_with_gt
2728 2729 2730 2731 2732 2733 2734 2735
        },
        attrs={
            'batch_size_per_im': batch_size_per_im,
            'fg_fraction': fg_fraction,
            'fg_thresh': fg_thresh,
            'bg_thresh_hi': bg_thresh_hi,
            'bg_thresh_lo': bg_thresh_lo,
            'bbox_reg_weights': bbox_reg_weights,
2736
            'class_nums': class_nums,
2737 2738 2739
            'use_random': use_random,
            'is_cls_agnostic': is_cls_agnostic,
            'is_cascade_rcnn': is_cascade_rcnn
2740 2741 2742 2743 2744 2745 2746
        })

    rois.stop_gradient = True
    labels_int32.stop_gradient = True
    bbox_targets.stop_gradient = True
    bbox_inside_weights.stop_gradient = True
    bbox_outside_weights.stop_gradient = True
2747
    max_overlap_with_gt.stop_gradient = True
2748

2749 2750
    if return_max_overlap:
        return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, max_overlap_with_gt
2751 2752 2753
    return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights


2754 2755
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
                         labels_int32, num_classes, resolution):
2756
    r"""
S
swtkiwi 已提交
2757

Q
qingqing01 已提交
2758
    **Generate Mask Labels for Mask-RCNN**
2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793

    This operator can be, for given the RoIs and corresponding labels,
    to sample foreground RoIs. This mask branch also has
    a :math: `K \\times M^{2}` dimensional output targets for each foreground
    RoI, which encodes K binary masks of resolution M x M, one for each of the
    K classes. This mask targets are used to compute loss of mask branch.

    Please note, the data format of groud-truth segmentation, assumed the
    segmentations are as follows. The first instance has two gt objects.
    The second instance has one gt object, this object has two gt segmentations.

        .. code-block:: python

            #[
            #  [[[229.14, 370.9, 229.14, 370.9, ...]],
            #   [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
            #  [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
            #]

            batch_masks = []
            for semgs in batch_semgs:
                gt_masks = []
                for semg in semgs:
                    gt_segm = []
                    for polys in semg:
                        gt_segm.append(np.array(polys).reshape(-1, 2))
                    gt_masks.append(gt_segm)
                batch_masks.append(gt_masks)
            
            
            place = fluid.CPUPlace()
            feeder = fluid.DataFeeder(place=place, feed_list=feeds)
            feeder.feed(batch_masks)

    Args:
Q
qingqing01 已提交
2794 2795 2796 2797 2798 2799
        im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
            data type. N is the batch size, each element is
            [height, width, scale] of image. Image scale is
            target_size / original_size, target_size is the size after resize,
            original_size is the original image size.
        gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
T
tianshuo78520a 已提交
2800
            should be int. M is the total number of ground-truth, each
Q
qingqing01 已提交
2801 2802 2803 2804 2805 2806 2807
            element is a class label.
        is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
            as gt_classes, each element is a flag indicating whether a
            groundtruth is crowd.
        gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
            float32 data type, it's LoD level is 3.
            Usually users do not needs to understand LoD,
2808
            The users should return correct data format in reader.
Q
qingqing01 已提交
2809
            The LoD[0] represents the ground-truth objects number of
2810 2811 2812 2813
            each instance. LoD[1] represents the segmentation counts of each
            objects. LoD[2] represents the polygons number of each segmentation.
            S the total number of polygons coordinate points. Each element is
            (x, y) coordinate points.
Q
qingqing01 已提交
2814 2815 2816 2817
        rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
            float32. R is the total number of RoIs, each element is a bounding
            box with (xmin, ymin, xmax, ymax) format in the range of original image.
        labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
T
tianshuo78520a 已提交
2818
            of int32. R is the same as it in `rois`. Each element represents
2819
            a class label of a RoI.
Q
qingqing01 已提交
2820 2821
        num_classes (int): Class number.
        resolution (int): Resolution of mask predictions.
2822 2823

    Returns:
Q
qingqing01 已提交
2824 2825 2826
        mask_rois (Variable):  A 2D LoDTensor with shape [P, 4] and same data
        type as `rois`. P is the total number of sampled RoIs. Each element
        is a bounding box with [xmin, ymin, xmax, ymax] format in range of
T
tianshuo78520a 已提交
2827
        original image size.
Q
qingqing01 已提交
2828 2829

        mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
T
tianshuo78520a 已提交
2830
        and int data type, each element represents the output mask RoI
Q
qingqing01 已提交
2831 2832 2833 2834
        index with regard to input RoIs.

        mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
        data type, K is the classes number and M is the resolution of mask
T
tianshuo78520a 已提交
2835
        predictions. Each element represents the binary mask targets.
2836 2837 2838 2839

    Examples:
        .. code-block:: python

2840 2841
          import paddle.fluid as fluid

Q
qingqing01 已提交
2842
          im_info = fluid.data(name="im_info", shape=[None, 3],
2843
              dtype="float32")
Q
qingqing01 已提交
2844
          gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
2845
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2846
          is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
2847
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2848
          gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
2849
              dtype="float32", lod_level=3)
2850
          # rois, roi_labels can be the output of
2851
          # fluid.layers.generate_proposal_labels.
Q
qingqing01 已提交
2852
          rois = fluid.data(name="rois", shape=[None, 4],
2853
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2854
          roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
2855
              dtype="int32", lod_level=1)
2856 2857 2858 2859 2860 2861
          mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
              im_info=im_info,
              gt_classes=gt_classes,
              is_crowd=is_crowd,
              gt_segms=gt_masks,
              rois=rois,
2862
              labels_int32=roi_labels,
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
              num_classes=81,
              resolution=14)
    """

    helper = LayerHelper('generate_mask_labels', **locals())

    mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
    roi_has_mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)

    helper.append_op(
        type="generate_mask_labels",
        inputs={
            'ImInfo': im_info,
            'GtClasses': gt_classes,
            'IsCrowd': is_crowd,
            'GtSegms': gt_segms,
            'Rois': rois,
            'LabelsInt32': labels_int32
        },
        outputs={
            'MaskRois': mask_rois,
            'RoiHasMaskInt32': roi_has_mask_int32,
            'MaskInt32': mask_int32
        },
        attrs={'num_classes': num_classes,
               'resolution': resolution})

    mask_rois.stop_gradient = True
    roi_has_mask_int32.stop_gradient = True
    mask_int32.stop_gradient = True

    return mask_rois, roi_has_mask_int32, mask_int32


2900 2901 2902 2903 2904 2905 2906 2907 2908 2909
def generate_proposals(scores,
                       bbox_deltas,
                       im_info,
                       anchors,
                       variances,
                       pre_nms_top_n=6000,
                       post_nms_top_n=1000,
                       nms_thresh=0.5,
                       min_size=0.1,
                       eta=1.0,
2910 2911
                       return_rois_num=False,
                       name=None):
2912
    """
S
swtkiwi 已提交
2913

H
haowang101779990 已提交
2914 2915
    **Generate proposal Faster-RCNN**

2916 2917 2918 2919
    This operation proposes RoIs according to each box with their
    probability to be a foreground object and 
    the box can be calculated by anchors. Bbox_deltais and scores
    to be an object are the output of RPN. Final proposals
H
haowang101779990 已提交
2920 2921 2922 2923
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

2924 2925
    1. Transposes and resizes scores and bbox_deltas in size of
       (H*W*A, 1) and (H*W*A, 4)
H
haowang101779990 已提交
2926 2927 2928 2929 2930 2931
    2. Calculate box locations as proposals candidates. 
    3. Clip boxes to image
    4. Remove predicted boxes with small area. 
    5. Apply NMS to get final proposals as output.

    Args:
2932 2933 2934
        scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
2935
            width of the feature map. The data type must be float32.
2936
        bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
T
tianshuo78520a 已提交
2937
            represents the difference between predicted box location and
2938
            anchor location. The data type must be float32.
2939
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
2940 2941
            image information for N batch. Height and width are the input sizes 
            and scale is the ratio of network input size and original size. 
2942
            The data type can be float32 or float64.
2943 2944 2945
        anchors(Variable):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
2946 2947
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
2948
            [H, W, num_priors, 4]. Each variance is in
2949
            (xcenter, ycenter, w, h) format. The data type must be float32.
2950
        pre_nms_top_n(float): Number of total bboxes to be kept per
2951
            image before NMS. The data type must be float32. `6000` by default.
2952
        post_nms_top_n(float): Number of total bboxes to be kept per
2953 2954
            image after NMS. The data type must be float32. `1000` by default.
        nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
2955
        min_size(float): Remove predicted boxes with either height or
2956 2957 2958
            width < min_size. The data type must be float32. `0.1` by default.
        eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration.
F
FDInSky 已提交
2959 2960 2961 2962
        return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's 
            num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
            the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model. 
            'False' by default. 
2963 2964 2965 2966
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

2967 2968 2969 2970 2971 2972
    Returns:
        tuple:
        A tuple with format ``(rpn_rois, rpn_roi_probs)``.

        - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
B
Bai Yifan 已提交
2973 2974 2975 2976 2977

    Examples:
        .. code-block:: python
        
            import paddle.fluid as fluid
2978 2979
            import paddle
            paddle.enable_static()
2980 2981 2982 2983 2984
            scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
            bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
            anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
            variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
B
Bai Yifan 已提交
2985 2986 2987
            rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
                         im_info, anchors, variances)

2988
    """
2989 2990 2991 2992 2993 2994 2995 2996
    if in_dygraph_mode():
        assert return_rois_num, "return_rois_num should be True in dygraph mode."
        attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,
                 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta)
        rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals(
            scores, bbox_deltas, im_info, anchors, variances, *attrs)
        return rpn_rois, rpn_roi_probs, rpn_rois_num

2997 2998
    helper = LayerHelper('generate_proposals', **locals())

2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009
    check_variable_and_dtype(scores, 'scores', ['float32'],
                             'generate_proposals')
    check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
                             'generate_proposals')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'generate_proposals')
    check_variable_and_dtype(anchors, 'anchors', ['float32'],
                             'generate_proposals')
    check_variable_and_dtype(variances, 'variances', ['float32'],
                             'generate_proposals')

X
Xin Pan 已提交
3010 3011 3012 3013
    rpn_rois = helper.create_variable_for_type_inference(
        dtype=bbox_deltas.dtype)
    rpn_roi_probs = helper.create_variable_for_type_inference(
        dtype=scores.dtype)
3014 3015 3016 3017 3018 3019 3020 3021
    outputs = {
        'RpnRois': rpn_rois,
        'RpnRoiProbs': rpn_roi_probs,
    }
    if return_rois_num:
        rpn_rois_num = helper.create_variable_for_type_inference(dtype='int32')
        rpn_rois_num.stop_gradient = True
        outputs['RpnRoisNum'] = rpn_rois_num
F
FDInSky 已提交
3022

3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038
    helper.append_op(
        type="generate_proposals",
        inputs={
            'Scores': scores,
            'BboxDeltas': bbox_deltas,
            'ImInfo': im_info,
            'Anchors': anchors,
            'Variances': variances
        },
        attrs={
            'pre_nms_topN': pre_nms_top_n,
            'post_nms_topN': post_nms_top_n,
            'nms_thresh': nms_thresh,
            'min_size': min_size,
            'eta': eta
        },
3039
        outputs=outputs)
3040 3041 3042
    rpn_rois.stop_gradient = True
    rpn_roi_probs.stop_gradient = True

F
FDInSky 已提交
3043
    if return_rois_num:
3044
        return rpn_rois, rpn_roi_probs, rpn_rois_num
F
FDInSky 已提交
3045 3046
    else:
        return rpn_rois, rpn_roi_probs
J
jerrywgz 已提交
3047 3048


J
jerrywgz 已提交
3049
def box_clip(input, im_info, name=None):
J
jerrywgz 已提交
3050
    """
S
swtkiwi 已提交
3051
	
J
jerrywgz 已提交
3052
    Clip the box into the size given by im_info
J
jerrywgz 已提交
3053
    For each input box, The formula is given as follows:
3054 3055 3056
        
    .. code-block:: text

J
jerrywgz 已提交
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
        xmin = max(min(xmin, im_w - 1), 0)
        ymin = max(min(ymin, im_h - 1), 0) 
        xmax = max(min(xmax, im_w - 1), 0)
        ymax = max(min(ymax, im_h - 1), 0)
    
    where im_w and im_h are computed from im_info:
 
    .. code-block:: text

        im_h = round(height / scale)
        im_w = round(weight / scale)
J
jerrywgz 已提交
3068 3069

    Args:
W
wangguanzhong 已提交
3070 3071 3072
        input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
            the last dimension is 4 and data type is float32 or float64.
        im_info(Variable): The 2-D Tensor with shape [N, 3] with layout 
T
tianshuo78520a 已提交
3073
            (height, width, scale) representing the information of image. 
3074
            Height and width are the input sizes and scale is the ratio of network input
W
wangguanzhong 已提交
3075 3076 3077 3078
            size and original size. The data type is float32 or float64.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3079 3080
    
    Returns:
W
wangguanzhong 已提交
3081 3082
        Variable:

T
tianshuo78520a 已提交
3083
        output(Variable): The clipped tensor with data type float32 or float64. 
W
wangguanzhong 已提交
3084 3085
        The shape is same as input.

3086
        
J
jerrywgz 已提交
3087 3088
    Examples:
        .. code-block:: python
3089
        
3090
            import paddle.fluid as fluid
3091 3092
            import paddle
            paddle.enable_static()
3093 3094 3095
            boxes = fluid.data(
                name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
            im_info = fluid.data(name='im_info', shape=[-1 ,3])
J
jerrywgz 已提交
3096
            out = fluid.layers.box_clip(
J
jerrywgz 已提交
3097
                input=boxes, im_info=im_info)
J
jerrywgz 已提交
3098 3099
    """

3100 3101 3102 3103
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'box_clip')

J
jerrywgz 已提交
3104
    helper = LayerHelper("box_clip", **locals())
J
jerrywgz 已提交
3105
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
3106
    inputs = {"Input": input, "ImInfo": im_info}
J
jerrywgz 已提交
3107
    helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
J
jerrywgz 已提交
3108

3109 3110
    return output

J
jerrywgz 已提交
3111

3112 3113 3114 3115 3116 3117 3118 3119
def retinanet_detection_output(bboxes,
                               scores,
                               anchors,
                               im_info,
                               score_threshold=0.05,
                               nms_top_k=1000,
                               keep_top_k=100,
                               nms_threshold=0.3,
3120
                               nms_eta=1.0):
3121
    """
3122
    **Detection Output Layer for the detector RetinaNet.**
3123

3124 3125 3126 3127
    In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many 
    `FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
    and location predictions, this OP is to get the detection results by
    performing following steps:
3128

3129 3130 3131
    1. For each FPN level, decode box predictions according to the anchor
       boxes from at most :attr:`nms_top_k` top-scoring predictions after
       thresholding detector confidence at :attr:`score_threshold`.
3132 3133 3134 3135
    2. Merge top predictions from all levels and apply multi-class non 
       maximum suppression (NMS) on them to get the final detections.

    Args:
3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
        bboxes(List): A list of Tensors from multiple FPN levels represents
            the location prediction for all anchor boxes. Each element is
            a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
            batch size, :math:`Mi` is the number of bounding boxes from
            :math:`i`-th FPN level and each bounding box has four coordinate
            values and the layout is [xmin, ymin, xmax, ymax]. The data type
            of each element is float32 or float64.
        scores(List): A list of Tensors from multiple FPN levels represents
            the category prediction for all anchor boxes. Each element is a
            3-D Tensor with shape :math:`[N, Mi, C]`,  :math:`N` is the batch
            size, :math:`C` is the class number (**excluding background**),
            :math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
            level. The data type of each element is float32 or float64.
        anchors(List): A list of Tensors from multiple FPN levels represents
            the locations of all anchor boxes. Each element is a 2-D Tensor
            with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
            boxes from :math:`i`-th FPN level, and each bounding box has four
3153
            coordinate values and the layout is [xmin, ymin, xmax, ymax].
3154 3155 3156
            The data type of each element is float32 or float64.
        im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
3157
            information of each image is a 3-vector which are the height and width
3158 3159
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
3160
        score_threshold(float): Threshold to filter out bounding boxes
3161
            with a confidence score before NMS, default value is set to 0.05.
3162
        nms_top_k(int): Maximum number of detections per FPN layer to be
3163 3164
            kept according to the confidences before NMS, default value is set to
            1000.
3165
        keep_top_k(int): Number of total bounding boxes to be kept per image after
3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
            NMS step. Default value is set to 100, -1 means keeping all bounding
            boxes after NMS step.
        nms_threshold(float): The Intersection-over-Union(IoU) threshold used to 
            filter out boxes in NMS.
        nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
            Default value is set to 1., which represents the value of
            :attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
            to be lower than 1. and the value of :attr:`nms_threshold` is set to
            be higher than 0.5, everytime a bounding box is filtered out,
            the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
            = :attr:`nms_threshold` * :attr:`nms_eta`  will not be stopped until
            the actual value of :attr:`nms_threshold` is lower than or equal to
            0.5.

    **Notice**: In some cases where the image sizes are very small, it's possible
    that there is no detection if :attr:`score_threshold` are used at all
    levels. Hence, this OP do not filter out anchors from the highest FPN level
    before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
T
tianshuo78520a 已提交
3184
    :attr:`anchors` is required to be from the highest FPN level.
3185 3186

    Returns:
3187 3188
        Variable(The data type is float32 or float64):
            The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
3189
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
3190 3191 3192
            :math:`No` is the total number of detections in this mini-batch.
            The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
            results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
3193 3194 3195 3196 3197 3198
            has no detected results. If all images have no detected results,
            LoD will be set to 0, and the output tensor is empty (None).

    Examples:
        .. code-block:: python

3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215
           import paddle.fluid as fluid

           bboxes_low = fluid.data(
               name='bboxes_low', shape=[1, 44, 4], dtype='float32')
           bboxes_high = fluid.data(
               name='bboxes_high', shape=[1, 11, 4], dtype='float32')
           scores_low = fluid.data(
               name='scores_low', shape=[1, 44, 10], dtype='float32')
           scores_high = fluid.data(
               name='scores_high', shape=[1, 11, 10], dtype='float32')
           anchors_low = fluid.data(
               name='anchors_low', shape=[44, 4], dtype='float32')
           anchors_high = fluid.data(
               name='anchors_high', shape=[11, 4], dtype='float32')
           im_info = fluid.data(
               name="im_info", shape=[1, 3], dtype='float32')
           nmsed_outs = fluid.layers.retinanet_detection_output(
3216 3217 3218 3219 3220 3221 3222 3223 3224
               bboxes=[bboxes_low, bboxes_high],
               scores=[scores_low, scores_high],
               anchors=[anchors_low, anchors_high],
               im_info=im_info,
               score_threshold=0.05,
               nms_top_k=1000,
               keep_top_k=100,
               nms_threshold=0.45,
               nms_eta=1.0)
3225 3226
    """

3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244
    check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
    for i, bbox in enumerate(bboxes):
        check_variable_and_dtype(bbox, 'bbox{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_type(scores, 'scores', (list), 'retinanet_detection_output')
    for i, score in enumerate(scores):
        check_variable_and_dtype(score, 'score{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
    for i, anchor in enumerate(anchors):
        check_variable_and_dtype(anchor, 'anchor{}'.format(i),
                                 ['float32', 'float64'],
                                 'retinanet_detection_output')
    check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
                             'retinanet_detection_output')

3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267
    helper = LayerHelper('retinanet_detection_output', **locals())
    output = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('scores'))
    helper.append_op(
        type="retinanet_detection_output",
        inputs={
            'BBoxes': bboxes,
            'Scores': scores,
            'Anchors': anchors,
            'ImInfo': im_info
        },
        attrs={
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'keep_top_k': keep_top_k,
            'nms_eta': 1.,
        },
        outputs={'Out': output})
    output.stop_gradient = True
    return output


J
jerrywgz 已提交
3268 3269 3270 3271 3272
def multiclass_nms(bboxes,
                   scores,
                   score_threshold,
                   nms_top_k,
                   keep_top_k,
J
jerrywgz 已提交
3273
                   nms_threshold=0.3,
J
jerrywgz 已提交
3274 3275
                   normalized=True,
                   nms_eta=1.,
3276 3277
                   background_label=0,
                   name=None):
J
jerrywgz 已提交
3278
    """
S
swtkiwi 已提交
3279

3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
    **Multiclass NMS**
    
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307
    See below for an example:

    .. code-block:: text

        if:
            box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
            box1.scores = (0.7, 0.2, 0.4)  which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)

            box2.data = (3.0, 4.0, 8.0, 5.0)
            box2.score = (0.3, 0.3, 0.1)

            nms_threshold = 0.3
            background_label = 0
            score_threshold = 0
3308

3309 3310 3311 3312 3313 3314 3315

        Then:
            iou = 4/11 > 0.3
            out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],    
                         [2, 0.4, 2.0, 3.0, 7.0, 5.0]]
                         
            Out format is (label, confidence, xmin, ymin, xmax, ymax)
3316 3317 3318 3319 3320 3321 3322 3323
    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is 
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
X
xiaoting 已提交
3324
                           The data type is float32 or float64.
3325 3326
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
                           M is the number of bounding boxes, C is the 
X
xiaoting 已提交
3327
                           class number. The data type is float32 or float64.   
3328 3329 3330 3331 3332 3333 3334
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is 
                           number of bounding boxes. For each category there 
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
X
xiaoting 已提交
3335
                           of BBoxes.The data type is float32 or float64. 
3336 3337 3338
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
X
xiaoting 已提交
3339
                           case with shape [M, C, 4].The data type is float32 or float64. 
3340 3341 3342 3343 3344 3345 3346
        background_label (int): The index of background label, the background 
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided, 
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3347
                         the confidences after the filtering detections based
3348 3349 3350 3351 3352 3353 3354 3355 3356
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
X
xiaoting 已提交
3357
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
3358 3359 3360 3361 3362
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values: 
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the 
             total number of detections. If there is no detected boxes for all
J
jerrywgz 已提交
3363 3364 3365 3366
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed 
             from {0} to {1}) 
3367

3368

3369 3370 3371
    Examples:
        .. code-block:: python

3372

3373
            import paddle.fluid as fluid
3374 3375
            import paddle
            paddle.enable_static()
X
xiaoting 已提交
3376
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
3377
                                      dtype='float32', lod_level=1)
X
xiaoting 已提交
3378
            scores = fluid.data(name='scores', shape=[None,81],
3379 3380 3381 3382 3383 3384 3385 3386 3387
                                      dtype='float32', lod_level=1)
            out = fluid.layers.multiclass_nms(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
J
jerrywgz 已提交
3388
    """
X
xiaoting 已提交
3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400
    check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
                             'multiclass_nms')
    check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
                             'multiclass_nms')
    check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
    check_type(normalized, 'normalized', bool, 'multiclass_nms')
    check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
    check_type(background_label, 'background_label', int, 'multiclass_nms')

J
jerrywgz 已提交
3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417
    helper = LayerHelper('multiclass_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    helper.append_op(
        type="multiclass_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True
J
jerrywgz 已提交
3418 3419

    return output
3420 3421


3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
def locality_aware_nms(bboxes,
                       scores,
                       score_threshold,
                       nms_top_k,
                       keep_top_k,
                       nms_threshold=0.3,
                       normalized=True,
                       nms_eta=1.,
                       background_label=-1,
                       name=None):
    """
    **Local Aware NMS**
    
    `Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
    suppression (LANMS) on boxes and scores.

    Firstly, this operator merge box and score according their IOU
    (intersection over union). In the NMS step, this operator greedily selects a
    subset of detection bounding boxes that have high scores larger than score_threshold,
    if providing this threshold, then selects the largest nms_top_k confidences scores
    if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
    IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
    of nms_threshold and nms_eta.

    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
                           represents the predicted locations of M bounding
                           bboxes, N is the batch size. Each bounding box
                           has four coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
                           predicted confidence predictions. N is the batch
                           size, C is the class number, M is number of bounding
                           boxes. Now only support 1 class. For each category
                           there are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension of
                           BBoxes. The data type is float32 or float64.
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: -1
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided,
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3470
                         the confidences after the filtering detections based
3471 3472 3473
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
3474 3475
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
                          Default: None.

    Returns:
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values:
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
             total number of detections. If there is no detected boxes for all
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1}). The data type is float32 or float64.


    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
                                      dtype='float32')
            scores = fluid.data(name='scores', shape=[None, 1, 81],
                                      dtype='float32')
            out = fluid.layers.locality_aware_nms(bboxes=boxes,
                                              scores=scores,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
    """
3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
    check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
                             'locality_aware_nms')
    check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
                             'locality_aware_nms')
    check_type(background_label, 'background_label', int, 'locality_aware_nms')
    check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
    check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
    check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
    check_type(normalized, 'normalized', bool, 'locality_aware_nms')

3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551
    shape = scores.shape
    assert len(shape) == 3, "dim size of scores must be 3"
    assert shape[
        1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"

    helper = LayerHelper('locality_aware_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    out = {'Out': output}

    helper.append_op(
        type="locality_aware_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True

    return output


Y
Yang Zhang 已提交
3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
def matrix_nms(bboxes,
               scores,
               score_threshold,
               post_threshold,
               nms_top_k,
               keep_top_k,
               use_gaussian=False,
               gaussian_sigma=2.,
               background_label=0,
               normalized=True,
               return_index=False,
               name=None):
    """
    **Matrix NMS**

    This operator does matrix non maximum suppression (NMS).

    First selects a subset of candidate bounding boxes that have higher scores
    than score_threshold (if provided), then the top k candidate is selected if
    nms_top_k is larger than -1. Score of the remaining candidate are then
    decayed according to the Matrix NMS scheme.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is
                           number of bounding boxes. For each category there
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes. The data type is float32 or float64.
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score.
        post_threshold (float): Threshold to filter out bounding boxes with
                                low confidence score AFTER decaying.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences after the filtering detections based
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        use_gaussian (bool): Use Gaussian as the decay function. Default: False
        gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        normalized (bool): Whether detections are normalized. Default: True
        return_index(bool): Whether return selected index. Default: False
        name(str): Name of the matrix nms op. Default: None.

    Returns:
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, one Variable(Out) is returned.

        Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
             detection results.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1})

        Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
            selected indices, which are absolute values cross batches.

    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
                                      dtype='float32', lod_level=1)
            scores = fluid.data(name='scores', shape=[None,81],
                                      dtype='float32', lod_level=1)
            out = fluid.layers.matrix_nms(bboxes=boxes,
                                          scores=scores,
                                          background_label=0,
                                          score_threshold=0.5,
                                          post_threshold=0.1,
                                          nms_top_k=400,
                                          keep_top_k=200,
                                          normalized=False)
    """
    check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
                             'matrix_nms')
    check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
                             'matrix_nms')
    check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
    check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
    check_type(normalized, 'normalized', bool, 'matrix_nms')
    check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
    check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
    check_type(background_label, 'background_label', int, 'matrix_nms')

    helper = LayerHelper('matrix_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    index = helper.create_variable_for_type_inference(dtype='int')
    helper.append_op(
        type="matrix_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'post_threshold': post_threshold,
            'nms_top_k': nms_top_k,
            'gaussian_sigma': gaussian_sigma,
            'use_gaussian': use_gaussian,
            'keep_top_k': keep_top_k,
            'normalized': normalized
        },
        outputs={'Out': output,
                 'Index': index})
    output.stop_gradient = True

    if return_index:
        return output, index
    else:
        return output


3679 3680 3681 3682 3683
def distribute_fpn_proposals(fpn_rois,
                             min_level,
                             max_level,
                             refer_level,
                             refer_scale,
3684
                             rois_num=None,
3685
                             name=None):
3686
    r"""
S
swtkiwi 已提交
3687
	
W
wangguanzhong 已提交
3688 3689 3690 3691 3692 3693
    **This op only takes LoDTensor as input.** In Feature Pyramid Networks 
    (FPN) models, it is needed to distribute all proposals into different FPN 
    level, with respect to scale of the proposals, the referring scale and the 
    referring level. Besides, to restore the order of proposals, we return an 
    array which indicates the original index of rois in current proposals. 
    To compute FPN level for each roi, the formula is given as follows:
3694
    
J
jerrywgz 已提交
3695
    .. math::
3696

J
jerrywgz 已提交
3697
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
3698

J
jerrywgz 已提交
3699 3700 3701
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)

    where BBoxArea is a function to compute the area of each roi.
3702 3703

    Args:
W
wangguanzhong 已提交
3704 3705 3706 3707 3708 3709 3710 3711 3712

        fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is 
            float32 or float64. The input fpn_rois.
        min_level(int32): The lowest level of FPN layer where the proposals come 
            from.
        max_level(int32): The highest level of FPN layer where the proposals
            come from.
        refer_level(int32): The referring level of FPN layer with specified scale.
        refer_scale(int32): The referring scale of FPN layer with specified level.
3713 3714 3715 3716 3717
        rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image. 
            The shape is [B] and data type is int32. B is the number of images.
            If it is not None then return a list of 1-D Tensor. Each element 
            is the output RoIs' number of each image on the corresponding level
            and the shape is [B]. None by default.
W
wangguanzhong 已提交
3718 3719 3720
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3721

3722
    Returns:
W
wangguanzhong 已提交
3723 3724 3725 3726 3727 3728 3729 3730 3731 3732
        Tuple:

        multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4] 
        and data type of float32 and float64. The length is 
        max_level-min_level+1. The proposals in each FPN level.

        restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is 
        the number of total rois. The data type is int32. It is
        used to restore the order of fpn_rois.

3733 3734 3735 3736
        rois_num_per_level(List): A list of 1-D Tensor and each Tensor is 
        the RoIs' number in each image on the corresponding level. The shape 
        is [B] and data type of int32. B is the number of images

3737 3738 3739 3740

    Examples:
        .. code-block:: python

3741
            import paddle.fluid as fluid
3742 3743
            import paddle
            paddle.enable_static()
3744 3745
            fpn_rois = fluid.data(
                name='data', shape=[None, 4], dtype='float32', lod_level=1)
3746
            multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
3747 3748 3749
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
3750 3751 3752
                refer_level=4,
                refer_scale=224)
    """
3753 3754 3755 3756 3757 3758 3759 3760 3761 3762
    num_lvl = max_level - min_level + 1

    if in_dygraph_mode():
        assert rois_num is not None, "rois_num should not be None in dygraph mode."
        attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',
                 refer_level, 'refer_scale', refer_scale)
        multi_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(
            fpn_rois, rois_num, num_lvl, num_lvl, *attrs)
        return multi_rois, restore_ind, rois_num_per_level

3763 3764
    check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
                             'distribute_fpn_proposals')
3765
    helper = LayerHelper('distribute_fpn_proposals', **locals())
3766
    dtype = helper.input_dtype('fpn_rois')
3767 3768 3769
    multi_rois = [
        helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
    ]
3770

3771
    restore_ind = helper.create_variable_for_type_inference(dtype='int32')
3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786

    inputs = {'FpnRois': fpn_rois}
    outputs = {
        'MultiFpnRois': multi_rois,
        'RestoreIndex': restore_ind,
    }

    if rois_num is not None:
        inputs['RoisNum'] = rois_num
        rois_num_per_level = [
            helper.create_variable_for_type_inference(dtype='int32')
            for i in range(num_lvl)
        ]
        outputs['MultiLevelRoIsNum'] = rois_num_per_level

3787 3788
    helper.append_op(
        type='distribute_fpn_proposals',
3789 3790
        inputs=inputs,
        outputs=outputs,
3791 3792 3793 3794 3795 3796
        attrs={
            'min_level': min_level,
            'max_level': max_level,
            'refer_level': refer_level,
            'refer_scale': refer_scale
        })
3797 3798
    if rois_num is not None:
        return multi_rois, restore_ind, rois_num_per_level
3799
    return multi_rois, restore_ind
3800 3801


3802
@templatedoc()
J
jerrywgz 已提交
3803 3804 3805 3806 3807 3808
def box_decoder_and_assign(prior_box,
                           prior_box_var,
                           target_box,
                           box_score,
                           box_clip,
                           name=None):
3809
    """
S
swtkiwi 已提交
3810
	
3811 3812 3813 3814 3815 3816
    ${comment}
    Args:
        prior_box(${prior_box_type}): ${prior_box_comment}
        prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
        target_box(${target_box_type}): ${target_box_comment}
        box_score(${box_score_type}): ${box_score_comment}
J
jerrywgz 已提交
3817
        box_clip(${box_clip_type}): ${box_clip_comment}
W
wangguanzhong 已提交
3818 3819 3820 3821
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

3822
    Returns:
W
wangguanzhong 已提交
3823
        Tuple:
J
jerrywgz 已提交
3824

W
wangguanzhong 已提交
3825 3826 3827
        decode_box(${decode_box_type}): ${decode_box_comment}

        output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
J
jerrywgz 已提交
3828 3829


3830 3831 3832
    Examples:
        .. code-block:: python

3833
            import paddle.fluid as fluid
3834 3835
            import paddle
            paddle.enable_static()
3836 3837 3838 3839 3840 3841 3842 3843
            pb = fluid.data(
                name='prior_box', shape=[None, 4], dtype='float32')
            pbv = fluid.data(
                name='prior_box_var', shape=[4], dtype='float32')
            loc = fluid.data(
                name='target_box', shape=[None, 4*81], dtype='float32')
            scores = fluid.data(
                name='scores', shape=[None, 81], dtype='float32')
J
jerrywgz 已提交
3844
            decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
J
jerrywgz 已提交
3845
                pb, pbv, loc, scores, 4.135)
3846 3847

    """
3848 3849 3850 3851 3852 3853
    check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
                             'box_decoder_and_assign')
    check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
                             'box_decoder_and_assign')
    check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
                             'box_decoder_and_assign')
3854 3855
    helper = LayerHelper("box_decoder_and_assign", **locals())

J
jerrywgz 已提交
3856
    decoded_box = helper.create_variable_for_type_inference(
3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870
        dtype=prior_box.dtype)
    output_assign_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)

    helper.append_op(
        type="box_decoder_and_assign",
        inputs={
            "PriorBox": prior_box,
            "PriorBoxVar": prior_box_var,
            "TargetBox": target_box,
            "BoxScore": box_score
        },
        attrs={"box_clip": box_clip},
        outputs={
J
jerrywgz 已提交
3871
            "DecodeBox": decoded_box,
3872 3873
            "OutputAssignBox": output_assign_box
        })
J
jerrywgz 已提交
3874
    return decoded_box, output_assign_box
3875 3876 3877 3878 3879 3880 3881


def collect_fpn_proposals(multi_rois,
                          multi_scores,
                          min_level,
                          max_level,
                          post_nms_top_n,
3882
                          rois_num_per_level=None,
3883 3884
                          name=None):
    """
S
swtkiwi 已提交
3885
	
W
wangguanzhong 已提交
3886 3887 3888
    **This OP only supports LoDTensor as input**. Concat multi-level RoIs 
    (Region of Interest) and select N RoIs with respect to multi_scores. 
    This operation performs the following steps:
3889 3890 3891 3892 3893 3894 3895 3896

    1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
    2. Concat multi-level RoIs and scores
    3. Sort scores and select post_nms_top_n scores
    4. Gather RoIs by selected indices from scores
    5. Re-sort RoIs by corresponding batch_id

    Args:
W
wangguanzhong 已提交
3897 3898 3899 3900 3901 3902
        multi_rois(list): List of RoIs to collect. Element in list is 2-D 
            LoDTensor with shape [N, 4] and data type is float32 or float64, 
            N is the number of RoIs.
        multi_scores(list): List of scores of RoIs to collect. Element in list 
            is 2-D LoDTensor with shape [N, 1] and data type is float32 or
            float64, N is the number of RoIs.
3903 3904 3905
        min_level(int): The lowest level of FPN layer to collect
        max_level(int): The highest level of FPN layer to collect
        post_nms_top_n(int): The number of selected RoIs
3906 3907 3908 3909 3910 3911
        rois_num_per_level(list, optional): The List of RoIs' numbers. 
            Each element is 1-D Tensor which contains the RoIs' number of each 
            image on each level and the shape is [B] and data type is 
            int32, B is the number of images. If it is not None then return 
            a 1-D Tensor contains the output RoIs' number of each image and 
            the shape is [B]. Default: None
W
wangguanzhong 已提交
3912 3913 3914 3915
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.        

3916
    Returns:
W
wangguanzhong 已提交
3917 3918 3919 3920 3921
        Variable:

        fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is 
        float32 or float64. Selected RoIs. 

3922 3923 3924
        rois_num(Tensor): 1-D Tensor contains the RoIs's number of each 
        image. The shape is [B] and data type is int32. B is the number of 
        images. 
3925 3926 3927 3928

    Examples:
        .. code-block:: python
           
3929
            import paddle.fluid as fluid
3930 3931
            import paddle
            paddle.enable_static()
3932 3933 3934
            multi_rois = []
            multi_scores = []
            for i in range(4):
3935 3936
                multi_rois.append(fluid.data(
                    name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
3937
            for i in range(4):
3938 3939
                multi_scores.append(fluid.data(
                    name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
3940 3941 3942 3943 3944 3945 3946 3947

            fpn_rois = fluid.layers.collect_fpn_proposals(
                multi_rois=multi_rois, 
                multi_scores=multi_scores,
                min_level=2, 
                max_level=5, 
                post_nms_top_n=2000)
    """
3948 3949
    check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
    check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
    num_lvl = max_level - min_level + 1
    input_rois = multi_rois[:num_lvl]
    input_scores = multi_scores[:num_lvl]

    if in_dygraph_mode():
        assert rois_num_per_level is not None, "rois_num_per_level should not be None in dygraph mode."
        attrs = ('post_nms_topN', post_nms_top_n)
        output_rois, rois_num = core.ops.collect_fpn_proposals(
            input_rois, input_scores, rois_num_per_level, *attrs)

3960 3961
    helper = LayerHelper('collect_fpn_proposals', **locals())
    dtype = helper.input_dtype('multi_rois')
3962 3963
    check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
                'collect_fpn_proposals')
3964 3965
    output_rois = helper.create_variable_for_type_inference(dtype)
    output_rois.stop_gradient = True
3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976

    inputs = {
        'MultiLevelRois': input_rois,
        'MultiLevelScores': input_scores,
    }
    outputs = {'FpnRois': output_rois}
    if rois_num_per_level is not None:
        inputs['MultiLevelRoIsNum'] = rois_num_per_level
        rois_num = helper.create_variable_for_type_inference(dtype='int32')
        rois_num.stop_gradient = True
        outputs['RoisNum'] = rois_num
3977 3978
    helper.append_op(
        type='collect_fpn_proposals',
3979 3980
        inputs=inputs,
        outputs=outputs,
3981
        attrs={'post_nms_topN': post_nms_top_n})
3982 3983
    if rois_num_per_level is not None:
        return output_rois, rois_num
3984
    return output_rois