detection.py 165.8 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15 16 17
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

18 19
import paddle

20
from .layer_function_generator import templatedoc
21
from ..layer_helper import LayerHelper
Z
zhiboniu 已提交
22
from ..framework import Variable, _non_static_mode, static_only, in_dygraph_mode
23
from .. import core
24
from .loss import softmax_with_cross_entropy
25 26
from . import tensor
from . import nn
27
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
C
chengduoZH 已提交
28
import math
29
import numpy as np
30
from functools import reduce
31 32 33 34 35 36
from ..data_feeder import (
    convert_dtype,
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
37
from paddle.utils import deprecated
38
from paddle import _C_ops, _legacy_C_ops
L
lyq 已提交
39
from ..framework import in_dygraph_mode
40

C
chengduoZH 已提交
41
__all__ = [
42 43 44 45 46 47 48 49
    'prior_box',
    'density_prior_box',
    'multi_box_head',
    'bipartite_match',
    'target_assign',
    'detection_output',
    'ssd_loss',
    'rpn_target_assign',
50
    'retinanet_target_assign',
51
    'sigmoid_focal_loss',
52 53 54 55
    'anchor_generator',
    'roi_perspective_transform',
    'generate_proposal_labels',
    'generate_proposals',
56
    'generate_mask_labels',
57 58 59 60
    'iou_similarity',
    'box_coder',
    'polygon_box_transform',
    'yolov3_loss',
D
dengkaipeng 已提交
61
    'yolo_box',
62
    'box_clip',
J
jerrywgz 已提交
63
    'multiclass_nms',
64
    'locality_aware_nms',
Y
Yang Zhang 已提交
65
    'matrix_nms',
66
    'retinanet_detection_output',
67
    'distribute_fpn_proposals',
68
    'box_decoder_and_assign',
69
    'collect_fpn_proposals',
C
chengduoZH 已提交
70
]
71 72


73 74 75 76 77 78 79 80 81 82 83 84 85
def retinanet_target_assign(
    bbox_pred,
    cls_logits,
    anchor_box,
    anchor_var,
    gt_boxes,
    gt_labels,
    is_crowd,
    im_info,
    num_classes=1,
    positive_overlap=0.5,
    negative_overlap=0.4,
):
86
    r"""
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
    **Target Assign Layer for the detector RetinaNet.**

    This OP finds out positive and negative samples from all anchors
    for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
    and assigns target labels for classification along with target locations for
    regression to each sample, then takes out the part belonging to positive and
    negative samples from category prediction( :attr:`cls_logits`) and location
    prediction( :attr:`bbox_pred`) which belong to all anchors.

    The searching principles for positive and negative samples are as followed:

    1. Anchors are assigned to ground-truth boxes when it has the highest IoU
    overlap with a ground-truth box.

    2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
    higher than :attr:`positive_overlap` with any ground-truth box.

    3. Anchors are assigned to background when its IoU overlap is lower than
    :attr:`negative_overlap` for all ground-truth boxes.

    4. Anchors which do not meet the above conditions do not participate in
    the training process.

    Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
T
tianshuo78520a 已提交
111
    regression for each anchor, hence the target label for each positive(or negative)
112 113 114 115 116 117 118 119 120 121 122 123 124 125
    sample is a :math:`C`-vector and the target locations for each positive sample
    is a 4-vector. As for a positive sample, if the category of its assigned
    ground-truth box is class :math:`i`, the corresponding entry in its length
    :math:`C` label vector is set to 1 and all other entries is set to 0, its box
    regression targets are computed as the offset between itself and its assigned
    ground-truth box. As for a negative sample, all entries in its length :math:`C`
    label vector are set to 0 and box regression targets are omitted because
    negative samples do not participate in the training process of location
    regression.

    After the assignment, the part belonging to positive and negative samples is
    taken out from category prediction( :attr:`cls_logits` ), and the part
    belonging to positive samples is taken out from location
    prediction( :attr:`bbox_pred` ).
126 127

    Args:
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
        bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
            the predicted locations of all anchors. :math:`N` is the batch size( the
            number of images in a mini-batch), :math:`M` is the number of all anchors
            of one image, and each anchor has 4 coordinate values. The data type of
            :attr:`bbox_pred` is float32 or float64.
        cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
            the predicted categories of all anchors. :math:`N` is the batch size,
            :math:`M` is the number of all anchors of one image, and :math:`C` is
            the number of categories (**Notice: excluding background**). The data type
            of :attr:`cls_logits` is float32 or float64.
        anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
            the locations of all anchors. :math:`M` is the number of all anchors of
            one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
            :math:`[xmin, ymin]` is the left top coordinate of the anchor box,
            :math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
            The data type of :attr:`anchor_box` is float32 or float64. Please refer
144
            to the OP :ref:`api_fluid_layers_anchor_generator`
145
            for the generation of :attr:`anchor_box`.
146
        anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
            factors of anchor locations used in loss function. :math:`M` is number of
            all anchors of one image, each anchor possesses a 4-vector expanded factor.
            The data type of :attr:`anchor_var` is float32 or float64. Please refer
            to the OP :ref:`api_fluid_layers_anchor_generator`
            for the generation of :attr:`anchor_var`.
        gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
            locations of all ground-truth boxes. :math:`G` is the total number of
            all ground-truth boxes in a mini-batch, and each ground-truth box has 4
            coordinate values. The data type of :attr:`gt_boxes` is float32 or
            float64.
        gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
            categories of all ground-truth boxes, and the values are in the range of
            :math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
            in a mini-batch, and each ground-truth box has one category. The data type
            of :attr:`gt_labels` is int32.
        is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
            indicates whether a ground-truth box is a crowd. If the value is 1, the
            corresponding box is a crowd, it is ignored during training. :math:`G` is
            the total number of all ground-truth boxes in a mini-batch. The data type
            of :attr:`is_crowd` is int32.
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
169
            information of each image is a 3-vector which are the height and width
170 171 172 173 174 175 176 177 178 179 180 181
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
        num_classes(int32): The number of categories for classification, the default
            value is 1.
        positive_overlap(float32): Minimum overlap required between an anchor
            and ground-truth box for the anchor to be a positive sample, the default
            value is 0.5.
        negative_overlap(float32): Maximum overlap allowed between an anchor
            and ground-truth box for the anchor to be a negative sample, the default
            value is 0.4. :attr:`negative_overlap` should be less than or equal to
            :attr:`positive_overlap`, if not, the actual value of
            :attr:`positive_overlap` is :attr:`negative_overlap`.
182 183

    Returns:
184
        A tuple with 6 Variables:
185

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
        **predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
        category prediction belonging to positive and negative samples. :math:`F`
        is the number of positive samples in a mini-batch, :math:`B` is the number
        of negative samples, and :math:`C` is the number of categories
        (**Notice: excluding background**). The data type of :attr:`predict_scores`
        is float32 or float64.

        **predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        location prediction belonging to positive samples. :math:`F` is the number
        of positive samples. :math:`F` is the number of positive samples, and each
        sample has 4 coordinate values. The data type of :attr:`predict_location`
        is float32 or float64.

        **target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
        target labels for classification belonging to positive and negative
        samples. :math:`F` is the number of positive samples, :math:`B` is the
        number of negative, and each sample has one target category. The data type
        of :attr:`target_label` is int32.

        **target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
        target locations for box regression belonging to positive samples.
        :math:`F` is the number of positive samples, and each sample has 4
        coordinate values. The data type of :attr:`target_bbox` is float32 or
        float64.

        **bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
        represents whether a positive sample is fake positive, if a positive
        sample is false positive, the corresponding entries in
        :attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
        of total positive samples in a mini-batch, and each sample has 4
        coordinate values. The data type of :attr:`bbox_inside_weight` is float32
        or float64.

        **fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
        of positive samples. :math:`N` is the batch size. **Notice: The number
        of positive samples is used as the denominator of later loss function,
        to avoid the condition that the denominator is zero, this OP has added 1
        to the actual number of positive samples of each image.** The data type of
        :attr:`fg_num` is int32.
225 226 227 228 229

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
230 231 232 233 234 235 236 237 238 239 240
          bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
                            dtype='float32')
          cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
                            dtype='float32')
          anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
                            dtype='float32')
          anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
                            dtype='float32')
          gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
                            dtype='float32')
          gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
241
                            dtype='int32')
242
          is_crowd = fluid.data(name='is_crowd', shape=[1],
243
                            dtype='int32')
244
          im_info = fluid.data(name='im_info', shape=[1, 3],
245
                            dtype='float32')
246
          score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
247 248 249 250 251
                fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
                anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)

    """

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
    check_variable_and_dtype(
        bbox_pred,
        'bbox_pred',
        ['float32', 'float64'],
        'retinanet_target_assign',
    )
    check_variable_and_dtype(
        cls_logits,
        'cls_logits',
        ['float32', 'float64'],
        'retinanet_target_assign',
    )
    check_variable_and_dtype(
        anchor_box,
        'anchor_box',
        ['float32', 'float64'],
        'retinanet_target_assign',
    )
    check_variable_and_dtype(
        anchor_var,
        'anchor_var',
        ['float32', 'float64'],
        'retinanet_target_assign',
    )
    check_variable_and_dtype(
        gt_boxes, 'gt_boxes', ['float32', 'float64'], 'retinanet_target_assign'
    )
    check_variable_and_dtype(
        gt_labels, 'gt_labels', ['int32'], 'retinanet_target_assign'
    )
    check_variable_and_dtype(
        is_crowd, 'is_crowd', ['int32'], 'retinanet_target_assign'
    )
    check_variable_and_dtype(
        im_info, 'im_info', ['float32', 'float64'], 'retinanet_target_assign'
    )
288

289 290 291 292 293 294
    helper = LayerHelper('retinanet_target_assign', **locals())
    # Assign target label to anchors
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
295 296
        dtype=anchor_box.dtype
    )
297
    bbox_inside_weight = helper.create_variable_for_type_inference(
298 299
        dtype=anchor_box.dtype
    )
300
    fg_num = helper.create_variable_for_type_inference(dtype='int32')
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
    helper.append_op(
        type="retinanet_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'GtLabels': gt_labels,
            'IsCrowd': is_crowd,
            'ImInfo': im_info,
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
            'ForegroundNumber': fg_num,
        },
        attrs={
            'positive_overlap': positive_overlap,
            'negative_overlap': negative_overlap,
        },
    )
323 324 325 326 327 328 329 330

    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
    bbox_inside_weight.stop_gradient = True
    fg_num.stop_gradient = True

331 332
    cls_logits = paddle.reshape(x=cls_logits, shape=(-1, num_classes))
    bbox_pred = paddle.reshape(x=bbox_pred, shape=(-1, 4))
333 334
    predicted_cls_logits = paddle.gather(cls_logits, score_index)
    predicted_bbox_pred = paddle.gather(bbox_pred, loc_index)
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
    return (
        predicted_cls_logits,
        predicted_bbox_pred,
        target_label,
        target_bbox,
        bbox_inside_weight,
        fg_num,
    )


def rpn_target_assign(
    bbox_pred,
    cls_logits,
    anchor_box,
    anchor_var,
    gt_boxes,
    is_crowd,
    im_info,
    rpn_batch_size_per_im=256,
    rpn_straddle_thresh=0.0,
    rpn_fg_fraction=0.5,
    rpn_positive_overlap=0.7,
    rpn_negative_overlap=0.3,
    use_random=True,
):
Y
Yuan Gao 已提交
361
    """
H
haowang101779990 已提交
362
    **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
Y
Yuan Gao 已提交
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379

    This layer can be, for given the  Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each each anchor, these target labels are used for
    train RPN. The classification targets is a binary class label (of being
    an object or not). Following the paper of Faster-RCNN, the positive labels
    are two kinds of anchors: (i) the anchor/anchors with the highest IoU
    overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
    higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
    that a single ground-truth box may assign positive labels to multiple
    anchors. A non-positive anchor is when its IoU ratio is lower than
    rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
    neither positive nor negative do not contribute to the training objective.
    The regression targets are the encoded ground-truth boxes associated with
    the positive anchors.

    Args:
380
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Y
Yuan Gao 已提交
381 382
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
383
            is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
384 385 386
        cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
            predicted confidence predictions. N is the batch size, 1 is the
            frontground and background sigmoid, M is number of bounding boxes.
387
            The data type can be float32 or float64.
Y
Yuan Gao 已提交
388 389 390 391 392
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
393
            coordinate of the anchor box. The data type can be float32 or float64.
394
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
395
            variances of anchors. The data type can be float32 or float64.
翟飞跃 已提交
396
        gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
Y
Yuan Gao 已提交
397
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
398
            bboxes of mini-batch input. The data type can be float32 or float64.
399
        is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
400
                             The data type must be int32.
401 402
        im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
        3 is the height, width and scale.
Y
Yuan Gao 已提交
403
        rpn_batch_size_per_im(int): Total number of RPN examples per image.
404
                                    The data type must be int32.
405
        rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
406
            by straddle_thresh pixels. The data type must be float32.
407
        rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
408
            foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
Y
Yuan Gao 已提交
409 410
        rpn_positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
411
            example. The data type must be float32.
Y
Yuan Gao 已提交
412 413
        rpn_negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
414
            examples. The data type must be float32.
Y
Yuan Gao 已提交
415 416

    Returns:
M
minqiyang 已提交
417
        tuple:
418
        A tuple(predicted_scores, predicted_location, target_label,
419
        target_bbox, bbox_inside_weight) is returned. The predicted_scores
420 421 422 423 424 425 426 427 428 429 430
        and predicted_location is the predicted result of the RPN.
        The target_label and target_bbox is the ground truth,
        respectively. The predicted_location is a 2D Tensor with shape
        [F, 4], and the shape of target_bbox is same as the shape of
        the predicted_location, F is the number of the foreground
        anchors. The predicted_scores is a 2D Tensor with shape
        [F + B, 1], and the shape of target_label is same as the shape
        of the predicted_scores, B is the number of the background
        anchors, the F and B is depends on the input of this operator.
        Bbox_inside_weight represents whether the predicted loc is fake_fg
        or not and the shape is [F, 4].
Y
Yuan Gao 已提交
431 432 433 434

    Examples:
        .. code-block:: python

B
Bai Yifan 已提交
435
            import paddle.fluid as fluid
436 437 438 439 440 441 442
            bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
            cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
            anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
            anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
            im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
443 444
            loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
                bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
H
haowang101779990 已提交
445

Y
Yuan Gao 已提交
446 447 448
    """

    helper = LayerHelper('rpn_target_assign', **locals())
449

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
    check_variable_and_dtype(
        bbox_pred, 'bbox_pred', ['float32', 'float64'], 'rpn_target_assign'
    )
    check_variable_and_dtype(
        cls_logits, 'cls_logits', ['float32', 'float64'], 'rpn_target_assign'
    )
    check_variable_and_dtype(
        anchor_box, 'anchor_box', ['float32', 'float64'], 'rpn_target_assign'
    )
    check_variable_and_dtype(
        anchor_var, 'anchor_var', ['float32', 'float64'], 'rpn_target_assign'
    )
    check_variable_and_dtype(
        gt_boxes, 'gt_boxes', ['float32', 'float64'], 'rpn_target_assign'
    )
    check_variable_and_dtype(
        is_crowd, 'is_crowd', ['int32'], 'rpn_target_assign'
    )
    check_variable_and_dtype(
        im_info, 'im_info', ['float32', 'float64'], 'rpn_target_assign'
    )
471

472
    # Assign target label to anchors
J
jerrywgz 已提交
473 474 475 476
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
477 478
        dtype=anchor_box.dtype
    )
J
jerrywgz 已提交
479
    bbox_inside_weight = helper.create_variable_for_type_inference(
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
        dtype=anchor_box.dtype
    )
    helper.append_op(
        type="rpn_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'IsCrowd': is_crowd,
            'ImInfo': im_info,
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
        },
        attrs={
            'rpn_batch_size_per_im': rpn_batch_size_per_im,
            'rpn_straddle_thresh': rpn_straddle_thresh,
            'rpn_positive_overlap': rpn_positive_overlap,
            'rpn_negative_overlap': rpn_negative_overlap,
            'rpn_fg_fraction': rpn_fg_fraction,
            'use_random': use_random,
        },
    )
Y
Yuan Gao 已提交
506

507 508 509 510
    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
J
jerrywgz 已提交
511
    bbox_inside_weight.stop_gradient = True
Y
Yuan Gao 已提交
512

513 514
    cls_logits = paddle.reshape(x=cls_logits, shape=(-1, 1))
    bbox_pred = paddle.reshape(x=bbox_pred, shape=(-1, 4))
515 516
    predicted_cls_logits = paddle.gather(cls_logits, score_index)
    predicted_bbox_pred = paddle.gather(bbox_pred, loc_index)
517

518 519 520 521 522 523 524
    return (
        predicted_cls_logits,
        predicted_bbox_pred,
        target_label,
        target_bbox,
        bbox_inside_weight,
    )
Y
Yuan Gao 已提交
525 526


527
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
528
    r"""
529 530 531
	:alias_main: paddle.nn.functional.sigmoid_focal_loss
	:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
	:old_api: paddle.fluid.layers.sigmoid_focal_loss
S
swtkiwi 已提交
532

533 534
    **Sigmoid Focal Loss Operator.**

535 536 537
    `Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
    class imbalance existed on the training phase of many computer vision tasks. This OP computes
    the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
538
    measured between the sigmoid value and target label.
539

540 541 542
    The focal loss is given as followed:

    .. math::
543

544 545 546 547 548 549
        \\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
        \\begin{array}{rcl}
        - \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
        - \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
        \\end{array} \\right.

550 551

    We know that
552

553 554 555 556
    .. math::
        \\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}


557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
    Args:
        x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
            all samples. :math:`N` is the number of all samples responsible for optimization in
            a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
            is the total number of positive and negative samples in a mini-batch; Samples are images
            for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
            is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
            float32 or float64.
        label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
            classification. :math:`N` is the number of all samples responsible for optimization in a
            mini-batch, each sample has one target category. The values for positive samples are in the
            range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
            is int32.
        fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
            mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
572
        gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
573
            set to 2.0.
574
        alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
575 576 577
            is set to 0.25.

    Returns:
578
        Variable(the data type is float32 or float64):
579 580
            A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
            tensor :attr:`x`.
581 582 583 584

    Examples:
        .. code-block:: python

585
            import paddle
586
            import numpy as np
587
            import paddle.fluid as fluid
588

589 590 591 592 593
            num_classes = 10  # exclude background
            image_width = 16
            image_height = 16
            batch_size = 32
            max_iter = 20
594

595
            paddle.enable_static()
596 597 598 599 600 601
            def gen_train_data():
                x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
                                                    image_width)).astype('float64')
                label_data = np.random.randint(0, num_classes,
                                               (batch_size, 1)).astype('int32')
                return {"x": x_data, "label": label_data}
602 603


604
            def get_focal_loss(pred, label, fg_num, num_classes):
605 606
                pred = paddle.reshape(pred, [-1, num_classes])
                label = paddle.reshape(label, [-1, 1])
607 608 609
                label.stop_gradient = True
                loss = fluid.layers.sigmoid_focal_loss(
                    pred, label, fg_num, gamma=2.0, alpha=0.25)
610
                loss = paddle.sum(loss)
611
                return loss
612 613


614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
            def build_model(mode='train'):
                x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
                output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
                output = fluid.layers.fc(
                    input=output,
                    size=num_classes,
                    # Notice: size is set to be the number of target classes (excluding backgorund)
                    # because sigmoid activation will be done in the sigmoid_focal_loss op.
                    act=None)
                if mode == 'train':
                    label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
                    # Obtain the fg_num needed by the sigmoid_focal_loss op:
                    # 0 in label represents background, >=1 in label represents foreground,
                    # find the elements in label which are greater or equal than 1, then
                    # computed the numbers of these elements.
                    data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
                    fg_label = fluid.layers.greater_equal(label, data)
                    fg_label = fluid.layers.cast(fg_label, dtype='int32')
632
                    fg_num = paddle.sum(fg_label, dtype='int32')
633 634 635 636 637 638 639 640
                    fg_num.stop_gradient = True
                    avg_loss = get_focal_loss(output, label, fg_num, num_classes)
                    return avg_loss
                else:
                    # During evaluating or testing phase,
                    # output of the final fc layer should be connected to a sigmoid layer.
                    pred = fluid.layers.sigmoid(output)
                    return pred
641 642


643 644 645 646 647 648 649 650 651 652
            loss = build_model('train')
            moment_optimizer = fluid.optimizer.MomentumOptimizer(
                learning_rate=0.001, momentum=0.9)
            moment_optimizer.minimize(loss)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            for i in range(max_iter):
                outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
                print(outs)
653 654
    """

655 656 657
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64'], 'sigmoid_focal_loss'
    )
658 659 660
    check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
    check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')

661 662 663 664
    helper = LayerHelper("sigmoid_focal_loss", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

665 666 667 668 669 670
    helper.append_op(
        type="sigmoid_focal_loss",
        inputs={"X": x, "Label": label, "FgNum": fg_num},
        attrs={"gamma": gamma, 'alpha': alpha},
        outputs={"Out": out},
    )
671 672 673
    return out


674 675 676 677 678 679 680 681 682 683 684 685 686
def detection_output(
    loc,
    scores,
    prior_box,
    prior_box_var,
    background_label=0,
    nms_threshold=0.3,
    nms_top_k=400,
    keep_top_k=200,
    score_threshold=0.01,
    nms_eta=1.0,
    return_index=False,
):
687
    """
S
swtkiwi 已提交
688

Q
qingqing01 已提交
689 690
    Given the regression locations, classification confidences and prior boxes,
    calculate the detection outputs by performing following steps:
691

Q
qingqing01 已提交
692 693
    1. Decode input bounding box predictions according to the prior boxes and
       regression locations.
694 695 696 697 698
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.
699 700 701

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Q
qingqing01 已提交
702 703
            predicted locations of M bounding bboxes. Data type should be
            float32 or float64. N is the batch size,
704 705
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
Y
Yuan Gao 已提交
706
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
Q
qingqing01 已提交
707 708 709
            predicted confidence predictions. Data type should be float32
            or float64. N is the batch size, C is the
            class number, M is number of bounding boxes.
710
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
Q
qingqing01 已提交
711 712
            each box is represented as [xmin, ymin, xmax, ymax]. Data type
            should be float32 or float64.
713
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
Q
qingqing01 已提交
714 715
            of variance. Data type should be float32 or float64.
        background_label(int): The index of background label,
716
            the background label will be ignored. If set to -1, then all
Q
qingqing01 已提交
717 718
            categories will be considered. Default: 0.
        nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
719
        nms_top_k(int): Maximum number of detections to be kept according
T
tianshuo78520a 已提交
720
            to the confidences after filtering detections based on
Q
qingqing01 已提交
721
            score_threshold and before NMS. Default: 400.
722
        keep_top_k(int): Number of total bboxes to be kept per image after
Q
qingqing01 已提交
723
            NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
724 725
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
Q
qingqing01 已提交
726 727 728
            Default: 0.01.
        nms_eta(float): The parameter for adaptive NMS. It works only when the
            value is less than 1.0. Default: 1.0.
729
        return_index(bool): Whether return selected index. Default: False
730 731

    Returns:
M
minqiyang 已提交
732

733
        A tuple with two Variables: (Out, Index) if return_index is True,
734
        otherwise, a tuple with one Variable(Out) is returned.
735

Q
qingqing01 已提交
736 737 738 739 740 741 742 743 744 745 746 747
        Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
        Data type is the same as input (loc). Each row has six values:
        [label, confidence, xmin, ymin, xmax, ymax]. `No` is
        the total number of detections in this mini-batch. For each instance,
        the offsets in first dimension are called LoD, the offset number is
        N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
        detected results, if it is 0, the i-th image has no detected results.

        Index (Variable): Only return when return_index is True. A 2-D LoDTensor
        with shape [No, 1] represents the selected index which type is Integer.
        The index is the absolute value cross batches. No is the same number
        as Out. If the index is used to gather other attribute such as age,
748 749 750
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
        N is the batch size and M is the number of boxes.

751 752 753 754

    Examples:
        .. code-block:: python

755
            import paddle.fluid as fluid
756 757 758
            import paddle

            paddle.enable_static()
759

Q
qingqing01 已提交
760 761 762 763
            pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
            pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
            loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
764
            nmsed_outs, index = fluid.layers.detection_output(scores=scores,
765 766
                                       loc=loc,
                                       prior_box=pb,
767 768
                                       prior_box_var=pbv,
                                       return_index=True)
769 770
    """
    helper = LayerHelper("detection_output", **locals())
771 772 773 774 775 776
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size',
    )
777
    scores = nn.softmax(input=scores)
778
    scores = paddle.transpose(scores, perm=[0, 2, 1])
779
    scores.stop_gradient = True
X
Xin Pan 已提交
780
    nmsed_outs = helper.create_variable_for_type_inference(
781 782
        dtype=decoded_box.dtype
    )
783 784
    if return_index:
        index = helper.create_variable_for_type_inference(dtype='int')
785 786 787 788 789 790 791 792 793 794 795 796 797
        helper.append_op(
            type="multiclass_nms2",
            inputs={'Scores': scores, 'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs, 'Index': index},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            },
        )
798 799
        index.stop_gradient = True
    else:
800 801 802 803 804 805 806 807 808 809 810 811 812
        helper.append_op(
            type="multiclass_nms",
            inputs={'Scores': scores, 'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            },
        )
813
    nmsed_outs.stop_gradient = True
814 815
    if return_index:
        return nmsed_outs, index
816
    return nmsed_outs
C
chengduoZH 已提交
817 818


X
Xin Pan 已提交
819
@templatedoc()
820
def iou_similarity(x, y, box_normalized=True, name=None):
X
Xin Pan 已提交
821
    """
822 823 824
        :alias_main: paddle.nn.functional.iou_similarity
        :alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
        :old_api: paddle.fluid.layers.iou_similarity
S
swtkiwi 已提交
825

X
Xin Pan 已提交
826 827 828
    ${comment}

    Args:
L
LielinJiang 已提交
829 830
        x (Variable): ${x_comment}.The data type is float32 or float64.
        y (Variable): ${y_comment}.The data type is float32 or float64.
T
tianshuo78520a 已提交
831
        box_normalized(bool): Whether treat the priorbox as a normalized box.
832
            Set true by default.
X
Xin Pan 已提交
833
    Returns:
L
LielinJiang 已提交
834
        Variable: ${out_comment}.The data type is same with x.
835 836 837 838

    Examples:
        .. code-block:: python

L
LielinJiang 已提交
839
            import numpy as np
840 841
            import paddle.fluid as fluid

L
LielinJiang 已提交
842 843 844 845 846 847
            use_gpu = False
            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)

            x = fluid.data(name='x', shape=[None, 4], dtype='float32')
            y = fluid.data(name='y', shape=[None, 4], dtype='float32')
848
            iou = fluid.layers.iou_similarity(x=x, y=y)
L
LielinJiang 已提交
849 850 851 852 853 854 855 856 857 858 859

            exe.run(fluid.default_startup_program())
            test_program = fluid.default_main_program().clone(for_test=True)

            [out_iou] = exe.run(test_program,
                    fetch_list=iou,
                    feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
                                         [0., 0., 1.0, 1.0]]).astype('float32'),
                          'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
            # out_iou is [[0.2857143],
            #             [0.       ]] with shape: [2, 1]
X
Xin Pan 已提交
860 861
    """
    helper = LayerHelper("iou_similarity", **locals())
862
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
863

864 865 866 867 868 869
    helper.append_op(
        type="iou_similarity",
        inputs={"X": x, "Y": y},
        attrs={"box_normalized": box_normalized},
        outputs={"Out": out},
    )
X
Xin Pan 已提交
870 871 872 873
    return out


@templatedoc()
874 875 876 877 878 879 880 881 882
def box_coder(
    prior_box,
    prior_box_var,
    target_box,
    code_type="encode_center_size",
    box_normalized=True,
    name=None,
    axis=0,
):
883
    r"""
S
swtkiwi 已提交
884

885 886 887
    **Box Coder Layer**

    Encode/Decode the target bounding box with the priorbox information.
888

889 890 891 892 893 894 895 896
    The Encoding schema described below:

    .. math::

        ox = (tx - px) / pw / pxv

        oy = (ty - py) / ph / pyv

897
        ow = \log(\abs(tw / pw)) / pwv
898

899
        oh = \log(\abs(th / ph)) / phv
900 901

    The Decoding schema described below:
902

903
    .. math::
904

905 906 907 908 909 910
        ox = (pw * pxv * tx * + px) - tw / 2

        oy = (ph * pyv * ty * + py) - th / 2

        ow = \exp(pwv * tw) * pw + tw / 2

911
        oh = \exp(phv * th) * ph + th / 2
912

913 914 915 916 917
    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
    the priorbox's (anchor) center coordinates, width and height. `pxv`,
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
    `ow`, `oh` denote the encoded/decoded coordinates, width and height.
918

919 920 921 922
    During Box Decoding, two modes for broadcast are supported. Say target
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
    [M, 4]. Then prior box will broadcast to target box along the
    assigned axis.
X
Xin Pan 已提交
923 924

    Args:
925
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
W
wangguanzhong 已提交
926
            [M, 4] holds M boxes and data type is float32 or float64. Each box
927
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
W
wangguanzhong 已提交
928
            left top coordinate of the anchor box, if the input is image feature
929 930 931 932 933 934 935 936 937 938 939 940 941
            map, they are close to the origin of the coordinate system.
            [xmax, ymax] is the right bottom coordinate of the anchor box.
        prior_box_var(List|Variable|None): prior_box_var supports three types
            of input. One is variable with shape [M, 4] which holds M group and
            data type is float32 or float64. The second is list consist of
            4 elements shared by all boxes and data type is float32 or float64.
            Other is None and not involved in calculation.
        target_box(Variable): This input can be a 2-D LoDTensor with shape
            [N, 4] when code_type is 'encode_center_size'. This input also can
            be a 3-D Tensor with shape [N, M, 4] when code_type is
            'decode_center_size'. Each box is represented as
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64.
            This tensor can contain LoD information to represent a batch of inputs.
W
wangguanzhong 已提交
942
        code_type(str): The code type used with the target box. It can be
943
            `encode_center_size` or `decode_center_size`. `encode_center_size`
W
wangguanzhong 已提交
944
            by default.
T
tianshuo78520a 已提交
945
        box_normalized(bool): Whether treat the priorbox as a normalized box.
W
wangguanzhong 已提交
946
            Set true by default.
947 948 949 950 951
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
        axis(int): Which axis in PriorBox to broadcast for box decode,
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and
W
wangguanzhong 已提交
952
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
953 954
            for decoding. It is only valid when code type is
            `decode_center_size`. Set 0 by default.
X
Xin Pan 已提交
955 956

    Returns:
W
wangguanzhong 已提交
957 958
        Variable:

959 960 961 962
        output_box(Variable): When code_type is 'encode_center_size', the
        output tensor of box_coder_op with shape [N, M, 4] representing the
        result of N target boxes encoded with M Prior boxes and variances.
        When code_type is 'decode_center_size', N represents the batch size
T
tianshuo78520a 已提交
963
        and M represents the number of decoded boxes.
964 965

    Examples:
966

967
        .. code-block:: python
968

969
            import paddle.fluid as fluid
970 971
            import paddle
            paddle.enable_static()
W
wangguanzhong 已提交
972
            # For encode
973
            prior_box_encode = fluid.data(name='prior_box_encode',
W
wangguanzhong 已提交
974
                                  shape=[512, 4],
975 976 977 978
                                  dtype='float32')
            target_box_encode = fluid.data(name='target_box_encode',
                                   shape=[81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
979 980 981 982 983
            output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_encode,
                                    code_type="encode_center_size")
            # For decode
984
            prior_box_decode = fluid.data(name='prior_box_decode',
W
wangguanzhong 已提交
985
                                  shape=[512, 4],
986 987 988 989
                                  dtype='float32')
            target_box_decode = fluid.data(name='target_box_decode',
                                   shape=[512, 81, 4],
                                   dtype='float32')
W
wangguanzhong 已提交
990 991 992 993 994 995
            output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_decode,
                                    code_type="decode_center_size",
                                    box_normalized=False,
                                    axis=1)
X
Xin Pan 已提交
996
    """
997 998 999 1000 1001 1002 1003 1004
    return paddle.vision.ops.box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=target_box,
        code_type=code_type,
        box_normalized=box_normalized,
        axis=axis,
        name=name,
1005
    )
X
Xin Pan 已提交
1006 1007 1008 1009 1010 1011 1012 1013


@templatedoc()
def polygon_box_transform(input, name=None):
    """
    ${comment}

    Args:
1014 1015 1016 1017
        input(Variable): The input with shape [batch_size, geometry_channels, height, width].
                         A Tensor with type float32, float64.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
X
Xin Pan 已提交
1018 1019

    Returns:
1020
        Variable: The output with the same shape as input. A Tensor with type float32, float64.
B
Bai Yifan 已提交
1021 1022 1023

    Examples:
        .. code-block:: python
1024

B
Bai Yifan 已提交
1025
            import paddle.fluid as fluid
B
Bai Yifan 已提交
1026
            input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
B
Bai Yifan 已提交
1027
            out = fluid.layers.polygon_box_transform(input)
X
Xin Pan 已提交
1028
    """
1029 1030 1031
    check_variable_and_dtype(
        input, "input", ['float32', 'float64'], 'polygon_box_transform'
    )
X
Xin Pan 已提交
1032
    helper = LayerHelper("polygon_box_transform", **locals())
1033
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
Xin Pan 已提交
1034

1035 1036 1037 1038 1039 1040
    helper.append_op(
        type="polygon_box_transform",
        inputs={"Input": input},
        attrs={},
        outputs={"Output": output},
    )
X
Xin Pan 已提交
1041 1042 1043
    return output


1044
@deprecated(since="2.0.0", update_to="paddle.vision.ops.yolo_loss")
D
dengkaipeng 已提交
1045
@templatedoc(op_type="yolov3_loss")
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
def yolov3_loss(
    x,
    gt_box,
    gt_label,
    anchors,
    anchor_mask,
    class_num,
    ignore_thresh,
    downsample_ratio,
    gt_score=None,
    use_label_smooth=True,
    name=None,
    scale_x_y=1.0,
):
D
dengkaipeng 已提交
1060
    """
S
swtkiwi 已提交
1061

D
dengkaipeng 已提交
1062 1063 1064
    ${comment}

    Args:
1065
        x (Variable): ${x_comment}The data type is float32 or float64.
1066
        gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
1067
                          in the third dimension, x, y, w, h should be stored.
T
tianshuo78520a 已提交
1068
                          x,y is the center coordinate of boxes, w, h are the
1069
                          width and height, x, y, w, h should be divided by
1070
                          input image height to scale to [0, 1].
1071 1072
                          N is the batch number and B is the max box number in
                          an image.The data type is float32 or float64.
T
tianshuo78520a 已提交
1073
        gt_label (Variable): class id of ground truth boxes, should be in shape
1074
                            of [N, B].The data type is int32.
D
dengkaipeng 已提交
1075
        anchors (list|tuple): ${anchors_comment}
1076
        anchor_mask (list|tuple): ${anchor_mask_comment}
D
dengkaipeng 已提交
1077 1078
        class_num (int): ${class_num_comment}
        ignore_thresh (float): ${ignore_thresh_comment}
1079
        downsample_ratio (int): ${downsample_ratio_comment}
1080 1081
        name (string): The default value is None.  Normally there is no need
                       for user to set this property.  For more information,
X
xiaoting 已提交
1082
                       please refer to :ref:`api_guide_Name`
T
tianshuo78520a 已提交
1083
        gt_score (Variable): mixup score of ground truth boxes, should be in shape
1084
                            of [N, B]. Default None.
1085
        use_label_smooth (bool): ${use_label_smooth_comment}
1086
        scale_x_y (float): ${scale_x_y_comment}
D
dengkaipeng 已提交
1087 1088

    Returns:
1089
        Variable: A 1-D tensor with shape [N], the value of yolov3 loss
D
dengkaipeng 已提交
1090 1091 1092

    Raises:
        TypeError: Input x of yolov3_loss must be Variable
D
dengkaipeng 已提交
1093 1094
        TypeError: Input gtbox of yolov3_loss must be Variable
        TypeError: Input gtlabel of yolov3_loss must be Variable
D
dengkaipeng 已提交
1095
        TypeError: Input gtscore of yolov3_loss must be None or Variable
D
dengkaipeng 已提交
1096 1097 1098
        TypeError: Attr anchors of yolov3_loss must be list or tuple
        TypeError: Attr class_num of yolov3_loss must be an integer
        TypeError: Attr ignore_thresh of yolov3_loss must be a float number
1099
        TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
D
dengkaipeng 已提交
1100 1101

    Examples:
1102 1103
      .. code-block:: python

1104
          import paddle.fluid as fluid
1105 1106
          import paddle
          paddle.enable_static()
X
xiaoting 已提交
1107 1108 1109 1110
          x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
          gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
          gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
          gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
1111 1112
          anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
          anchor_mask = [0, 1, 2]
1113
          loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
1114
                                          gt_score=gt_score, anchors=anchors,
1115 1116
                                          anchor_mask=anchor_mask, class_num=80,
                                          ignore_thresh=0.7, downsample_ratio=32)
D
dengkaipeng 已提交
1117 1118 1119 1120
    """

    if not isinstance(x, Variable):
        raise TypeError("Input x of yolov3_loss must be Variable")
1121
    if not isinstance(gt_box, Variable):
D
dengkaipeng 已提交
1122
        raise TypeError("Input gtbox of yolov3_loss must be Variable")
1123
    if not isinstance(gt_label, Variable):
D
dengkaipeng 已提交
1124
        raise TypeError("Input gtlabel of yolov3_loss must be Variable")
1125
    if gt_score is not None and not isinstance(gt_score, Variable):
1126
        raise TypeError("Input gtscore of yolov3_loss must be Variable")
D
dengkaipeng 已提交
1127 1128
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
        raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
1129 1130
    if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
        raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
D
dengkaipeng 已提交
1131 1132 1133 1134
    if not isinstance(class_num, int):
        raise TypeError("Attr class_num of yolov3_loss must be an integer")
    if not isinstance(ignore_thresh, float):
        raise TypeError(
1135 1136
            "Attr ignore_thresh of yolov3_loss must be a float number"
        )
1137 1138
    if not isinstance(use_label_smooth, bool):
        raise TypeError(
1139 1140
            "Attr use_label_smooth of yolov3_loss must be a bool value"
        )
D
dengkaipeng 已提交
1141

1142
    if _non_static_mode():
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
        attrs = (
            "anchors",
            anchors,
            "anchor_mask",
            anchor_mask,
            "class_num",
            class_num,
            "ignore_thresh",
            ignore_thresh,
            "downsample_ratio",
            downsample_ratio,
            "use_label_smooth",
            use_label_smooth,
            "scale_x_y",
            scale_x_y,
        )
        loss, _, _ = _legacy_C_ops.yolov3_loss(
            x, gt_box, gt_label, gt_score, *attrs
        )
1162
        return loss
D
dengkaipeng 已提交
1163

1164 1165
    helper = LayerHelper('yolov3_loss', **locals())
    loss = helper.create_variable_for_type_inference(dtype=x.dtype)
1166 1167 1168
    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

1169 1170
    inputs = {
        "X": x,
1171 1172
        "GTBox": gt_box,
        "GTLabel": gt_label,
1173
    }
1174
    if gt_score is not None:
1175
        inputs["GTScore"] = gt_score
1176

D
dengkaipeng 已提交
1177 1178
    attrs = {
        "anchors": anchors,
1179
        "anchor_mask": anchor_mask,
D
dengkaipeng 已提交
1180 1181
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
1182
        "downsample_ratio": downsample_ratio,
1183
        "use_label_smooth": use_label_smooth,
1184
        "scale_x_y": scale_x_y,
D
dengkaipeng 已提交
1185 1186
    }

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
    helper.append_op(
        type='yolov3_loss',
        inputs=inputs,
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask,
        },
        attrs=attrs,
    )
D
dengkaipeng 已提交
1197 1198 1199
    return loss


1200
@deprecated(since="2.0.0", update_to="paddle.vision.ops.yolo_box")
D
dengkaipeng 已提交
1201
@templatedoc(op_type="yolo_box")
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
def yolo_box(
    x,
    img_size,
    anchors,
    class_num,
    conf_thresh,
    downsample_ratio,
    clip_bbox=True,
    name=None,
    scale_x_y=1.0,
    iou_aware=False,
    iou_aware_factor=0.5,
):
D
dengkaipeng 已提交
1215
    """
S
swtkiwi 已提交
1216

D
dengkaipeng 已提交
1217 1218 1219
    ${comment}

    Args:
1220 1221
        x (Variable): ${x_comment} The data type is float32 or float64.
        img_size (Variable): ${img_size_comment} The data type is int32.
D
dengkaipeng 已提交
1222 1223 1224 1225
        anchors (list|tuple): ${anchors_comment}
        class_num (int): ${class_num_comment}
        conf_thresh (float): ${conf_thresh_comment}
        downsample_ratio (int): ${downsample_ratio_comment}
1226
        clip_bbox (bool): ${clip_bbox_comment}
1227
        scale_x_y (float): ${scale_x_y_comment}
1228 1229
        name (string): The default value is None.  Normally there is no need
                       for user to set this property.  For more information,
X
xiaoting 已提交
1230
                       please refer to :ref:`api_guide_Name`
1231 1232
        iou_aware (bool): ${iou_aware_comment}
        iou_aware_factor (float): ${iou_aware_factor_comment}
D
dengkaipeng 已提交
1233 1234

    Returns:
D
dengkaipeng 已提交
1235
        Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
1236
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
D
dengkaipeng 已提交
1237
        scores of boxes.
D
dengkaipeng 已提交
1238 1239 1240 1241 1242 1243 1244 1245

    Raises:
        TypeError: Input x of yolov_box must be Variable
        TypeError: Attr anchors of yolo box must be list or tuple
        TypeError: Attr class_num of yolo box must be an integer
        TypeError: Attr conf_thresh of yolo box must be a float number

    Examples:
D
dengkaipeng 已提交
1246

D
dengkaipeng 已提交
1247 1248
    .. code-block:: python

X
xiaoting 已提交
1249
        import paddle.fluid as fluid
1250 1251
        import paddle
        paddle.enable_static()
X
xiaoting 已提交
1252 1253
        x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
        img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
D
dengkaipeng 已提交
1254
        anchors = [10, 13, 16, 30, 33, 23]
1255
        boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
D
dengkaipeng 已提交
1256 1257 1258 1259 1260
                                        conf_thresh=0.01, downsample_ratio=32)
    """
    helper = LayerHelper('yolo_box', **locals())

    if not isinstance(x, Variable):
1261 1262 1263
        raise TypeError("Input x of yolo_box must be Variable")
    if not isinstance(img_size, Variable):
        raise TypeError("Input img_size of yolo_box must be Variable")
D
dengkaipeng 已提交
1264
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
1265
        raise TypeError("Attr anchors of yolo_box must be list or tuple")
D
dengkaipeng 已提交
1266
    if not isinstance(class_num, int):
1267
        raise TypeError("Attr class_num of yolo_box must be an integer")
D
dengkaipeng 已提交
1268
    if not isinstance(conf_thresh, float):
1269
        raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
D
dengkaipeng 已提交
1270 1271 1272 1273 1274 1275 1276

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
D
dengkaipeng 已提交
1277
        "conf_thresh": conf_thresh,
D
dengkaipeng 已提交
1278
        "downsample_ratio": downsample_ratio,
1279
        "clip_bbox": clip_bbox,
1280
        "scale_x_y": scale_x_y,
1281
        "iou_aware": iou_aware,
1282
        "iou_aware_factor": iou_aware_factor,
D
dengkaipeng 已提交
1283 1284
    }

1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
    helper.append_op(
        type='yolo_box',
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs,
    )
D
dengkaipeng 已提交
1297 1298 1299
    return boxes, scores


X
Xin Pan 已提交
1300
@templatedoc()
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
def detection_map(
    detect_res,
    label,
    class_num,
    background_label=0,
    overlap_threshold=0.3,
    evaluate_difficult=True,
    has_state=None,
    input_states=None,
    out_states=None,
    ap_version='integral',
):
X
Xin Pan 已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
    """
    ${comment}

    Args:
        detect_res: ${detect_res_comment}
        label:  ${label_comment}
        class_num: ${class_num_comment}
        background_label: ${background_label_comment}
        overlap_threshold: ${overlap_threshold_comment}
        evaluate_difficult: ${evaluate_difficult_comment}
        has_state: ${has_state_comment}
1324 1325 1326 1327 1328 1329 1330 1331
        input_states: (tuple|None) If not None, It contains 3 elements:
            (1) pos_count ${pos_count_comment}.
            (2) true_pos ${true_pos_comment}.
            (3) false_pos ${false_pos_comment}.
        out_states: (tuple|None) If not None, it contains 3 elements.
            (1) accum_pos_count ${accum_pos_count_comment}.
            (2) accum_true_pos ${accum_true_pos_comment}.
            (3) accum_false_pos ${accum_false_pos_comment}.
X
Xin Pan 已提交
1332 1333 1334 1335 1336 1337 1338 1339 1340
        ap_version: ${ap_type_comment}

    Returns:
        ${map_comment}


    Examples:
          .. code-block:: python

1341
            import paddle.fluid as fluid
1342
            from fluid.layers import detection
1343
            detect_res = fluid.data(
X
Xin Pan 已提交
1344 1345 1346
                name='detect_res',
                shape=[10, 6],
                dtype='float32')
1347
            label = fluid.data(
X
Xin Pan 已提交
1348 1349 1350 1351
                name='label',
                shape=[10, 6],
                dtype='float32')

1352
            map_out = detection.detection_map(detect_res, label, 21)
X
Xin Pan 已提交
1353
    """
1354 1355
    helper = LayerHelper("detection_map", **locals())

1356
    def __create_var(type):
X
Xin Pan 已提交
1357
        return helper.create_variable_for_type_inference(dtype=type)
1358 1359

    map_out = __create_var('float32')
1360 1361 1362 1363 1364 1365 1366 1367 1368
    accum_pos_count_out = (
        out_states[0] if out_states is not None else __create_var('int32')
    )
    accum_true_pos_out = (
        out_states[1] if out_states is not None else __create_var('float32')
    )
    accum_false_pos_out = (
        out_states[2] if out_states is not None else __create_var('float32')
    )
1369

Z
zhongpu 已提交
1370 1371 1372
    pos_count = input_states[0] if input_states is not None else None
    true_pos = input_states[1] if input_states is not None else None
    false_pos = input_states[2] if input_states is not None else None
1373

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
    helper.append_op(
        type="detection_map",
        inputs={
            'Label': label,
            'DetectRes': detect_res,
            'HasState': has_state,
            'PosCount': pos_count,
            'TruePos': true_pos,
            'FalsePos': false_pos,
        },
        outputs={
            'MAP': map_out,
            'AccumPosCount': accum_pos_count_out,
            'AccumTruePos': accum_true_pos_out,
            'AccumFalsePos': accum_false_pos_out,
        },
        attrs={
            'overlap_threshold': overlap_threshold,
            'evaluate_difficult': evaluate_difficult,
            'ap_type': ap_version,
            'class_num': class_num,
        },
    )
1397
    return map_out
1398 1399


1400 1401 1402
def bipartite_match(
    dist_matrix, match_type=None, dist_threshold=None, name=None
):
1403
    """
S
swtkiwi 已提交
1404

Y
yuyang18 已提交
1405 1406
    This operator implements a greedy bipartite matching algorithm, which is
    used to obtain the matching with the maximum distance based on the input
1407
    distance matrix. For input 2D matrix, the bipartite matching algorithm can
Y
yuyang18 已提交
1408 1409 1410 1411
    find the matched column for each row (matched means the largest distance),
    also can find the matched row for each column. And this operator only
    calculate matched indices from column to row. For each instance,
    the number of matched indices is the column number of the input distance
W
wangguanzhong 已提交
1412
    matrix. **The OP only supports CPU**.
Y
yuyang18 已提交
1413 1414 1415

    There are two outputs, matched indices and distance.
    A simple description, this algorithm matched the best (maximum distance)
1416 1417 1418
    row entity to the column entity and the matched indices are not duplicated
    in each row of ColToRowMatchIndices. If the column entity is not matched
    any row entity, set -1 in ColToRowMatchIndices.
C
chengduoZH 已提交
1419

Y
yuyang18 已提交
1420
    NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1421 1422 1423
    If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
    If Tensor, the height of ColToRowMatchIndices is 1.

Y
yuyang18 已提交
1424 1425 1426
    NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
    layer. Please consider to use :code:`ssd_loss` instead.

1427 1428
    Args:
        dist_matrix(Variable): This input is a 2-D LoDTensor with shape
1429 1430 1431 1432 1433 1434 1435
            [K, M]. The data type is float32 or float64. It is pair-wise
            distance matrix between the entities represented by each row and
            each column. For example, assumed one entity is A with shape [K],
            another entity is B with shape [M]. The dist_matrix[i][j] is the
            distance between A[i] and B[j]. The bigger the distance is, the
            better matching the pairs are. NOTE: This tensor can contain LoD
            information to represent a batch of inputs. One instance of this
W
wangguanzhong 已提交
1436 1437 1438 1439
            batch can contain different numbers of entities.
        match_type(str, optional): The type of matching method, should be
           'bipartite' or 'per_prediction'. None ('bipartite') by default.
        dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1440
            this threshold is to determine the extra matching bboxes based
Y
yuyang18 已提交
1441
            on the maximum distance, 0.5 by default.
1442 1443
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
W
wangguanzhong 已提交
1444
            None by default.
1445

1446
    Returns:
W
wangguanzhong 已提交
1447
        Tuple:
Y
yuyang18 已提交
1448

W
wangguanzhong 已提交
1449 1450
        matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
        type is int32. N is the batch size. If match_indices[i][j] is -1, it
Y
yuyang18 已提交
1451 1452 1453 1454 1455
        means B[j] does not match any entity in i-th instance.
        Otherwise, it means B[j] is matched to row
        match_indices[i][j] in i-th instance. The row number of
        i-th instance is saved in match_indices[i][j].

W
wangguanzhong 已提交
1456 1457
        matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
        type is float32. N is batch size. If match_indices[i][j] is -1,
Y
yuyang18 已提交
1458 1459 1460 1461 1462 1463 1464
        match_distance[i][j] is also -1.0. Otherwise, assumed
        match_distance[i][j] = d, and the row offsets of each instance
        are called LoD. Then match_distance[i][j] =
        dist_matrix[d+LoD[i]][j].

    Examples:

1465
        >>> import paddle.fluid as fluid
1466 1467
        >>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
        >>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
Y
yuyang18 已提交
1468 1469
        >>> iou = fluid.layers.iou_similarity(x=x, y=y)
        >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
1470 1471
    """
    helper = LayerHelper('bipartite_match', **locals())
X
Xin Pan 已提交
1472 1473
    match_indices = helper.create_variable_for_type_inference(dtype='int32')
    match_distance = helper.create_variable_for_type_inference(
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
        dtype=dist_matrix.dtype
    )
    helper.append_op(
        type='bipartite_match',
        inputs={'DistMat': dist_matrix},
        attrs={
            'match_type': match_type,
            'dist_threshold': dist_threshold,
        },
        outputs={
            'ColToRowMatchIndices': match_indices,
            'ColToRowMatchDist': match_distance,
        },
    )
1488 1489 1490
    return match_indices, match_distance


1491 1492 1493 1494 1495 1496 1497
def target_assign(
    input,
    matched_indices,
    negative_indices=None,
    mismatch_value=None,
    name=None,
):
1498
    """
S
swtkiwi 已提交
1499

1500 1501 1502 1503
    This operator can be, for given the target bounding boxes or labels,
    to assign classification and regression targets to each prediction as well as
    weights to prediction. The weights is used to specify which prediction would
    not contribute to training loss.
C
chengduoZH 已提交
1504

1505 1506 1507 1508 1509
    For each instance, the output `out` and`out_weight` are assigned based on
    `match_indices` and `negative_indices`.
    Assumed that the row offset for each instance in `input` is called lod,
    this operator assigns classification/regression targets by performing the
    following steps:
C
chengduoZH 已提交
1510

1511
    1. Assigning all outputs based on `match_indices`:
C
chengduoZH 已提交
1512

1513 1514 1515
    .. code-block:: text

        If id = match_indices[i][j] > 0,
C
chengduoZH 已提交
1516

1517 1518
            out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
            out_weight[i][j] = 1.
C
chengduoZH 已提交
1519

1520
        Otherwise,
C
chengduoZH 已提交
1521

1522 1523
            out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][j] = 0.
C
chengduoZH 已提交
1524

Q
qingqing01 已提交
1525
    2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
C
chengduoZH 已提交
1526

Q
qingqing01 已提交
1527 1528
    Assumed that i-th instance in `neg_indices` is called `neg_indice`,
    for i-th instance:
M
minqiyang 已提交
1529

1530
    .. code-block:: text
C
chengduoZH 已提交
1531

Q
qingqing01 已提交
1532 1533 1534
        for id in neg_indice:
            out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][id] = 1.0
1535 1536

    Args:
Q
qingqing01 已提交
1537 1538 1539
       input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
           Data type should be int32 or float32.
       matched_indices (Variable): The input matched indices
1540 1541 1542
           is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
           the j-th entity of column is not matched to any entity of row in
           i-th instance.
Q
qingqing01 已提交
1543 1544
       negative_indices (Variable, optional): The input negative example indices
           are an optional input with shape [Neg, 1] and int32 type, where Neg is
1545
           the total number of negative example indices.
Q
qingqing01 已提交
1546 1547 1548 1549 1550
       mismatch_value (float32, optional): Fill this value to the mismatched
           location.
       name (string): The default value is None.  Normally there is no need for
           user to set this property.  For more information, please refer
           to :ref:`api_guide_Name`.
1551 1552

    Returns:
Q
qingqing01 已提交
1553 1554 1555 1556 1557 1558 1559 1560
        tuple: A tuple(out, out_weight) is returned.

        out (Variable): a 3D Tensor with shape [N, P, K] and same data type
        with `input`, N and P is the same as they are in `matched_indices`,
        K is the same as it in input of X.

        out_weight (Variable): the weight for output with the shape of [N, P, 1].
        Data type is float32.
1561 1562 1563 1564 1565

    Examples:

        .. code-block:: python

1566
            import paddle.fluid as fluid
1567 1568
            import paddle
            paddle.enable_static()
Q
qingqing01 已提交
1569
            x = fluid.data(
1570 1571 1572
                name='x',
                shape=[4, 20, 4],
                dtype='float',
Q
qingqing01 已提交
1573 1574
                lod_level=1)
            matched_id = fluid.data(
1575 1576
                name='indices',
                shape=[8, 20],
Q
qingqing01 已提交
1577
                dtype='int32')
1578 1579 1580 1581
            trg, trg_weight = fluid.layers.target_assign(
                x,
                matched_id,
                mismatch_value=0)
1582 1583
    """
    helper = LayerHelper('target_assign', **locals())
X
Xin Pan 已提交
1584 1585
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_weight = helper.create_variable_for_type_inference(dtype='float32')
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
    helper.append_op(
        type='target_assign',
        inputs={
            'X': input,
            'MatchIndices': matched_indices,
            'NegIndices': negative_indices,
        },
        outputs={'Out': out, 'OutWeight': out_weight},
        attrs={'mismatch_value': mismatch_value},
    )
1596 1597 1598
    return out, out_weight


1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
def ssd_loss(
    location,
    confidence,
    gt_box,
    gt_label,
    prior_box,
    prior_box_var=None,
    background_label=0,
    overlap_threshold=0.5,
    neg_pos_ratio=3.0,
    neg_overlap=0.5,
    loc_loss_weight=1.0,
    conf_loss_weight=1.0,
    match_type='per_prediction',
    mining_type='max_negative',
    normalize=True,
    sample_size=None,
):
1617
    r"""
1618 1619 1620
	:alias_main: paddle.nn.functional.ssd_loss
	:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
	:old_api: paddle.fluid.layers.ssd_loss
S
swtkiwi 已提交
1621

Y
yuyang18 已提交
1622
    **Multi-box loss layer for object detection algorithm of SSD**
1623

翟飞跃 已提交
1624 1625
    This layer is to compute detection loss for SSD given the location offset
    predictions, confidence predictions, prior boxes and ground-truth bounding
1626 1627 1628 1629
    boxes and labels, and the type of hard example mining. The returned loss
    is a weighted sum of the localization loss (or regression loss) and
    confidence loss (or classification loss) by performing the following steps:

Y
yuyang18 已提交
1630
    1. Find matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1631

1632
      1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
Y
yuyang18 已提交
1633

T
tianshuo78520a 已提交
1634
      1.2 Compute matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1635

1636
    2. Compute confidence for mining hard examples
Y
yuyang18 已提交
1637

1638
      2.1. Get the target label based on matched indices.
Y
yuyang18 已提交
1639

1640
      2.2. Compute confidence loss.
Y
yuyang18 已提交
1641

1642 1643
    3. Apply hard example mining to get the negative example indices and update
       the matched indices.
Y
yuyang18 已提交
1644

1645
    4. Assign classification and regression targets
Y
yuyang18 已提交
1646

1647
      4.1. Encoded bbox according to the prior boxes.
Y
yuyang18 已提交
1648

1649
      4.2. Assign regression targets.
Y
yuyang18 已提交
1650

1651
      4.3. Assign classification targets.
Y
yuyang18 已提交
1652

1653
    5. Compute the overall objective loss.
Y
yuyang18 已提交
1654

1655
      5.1 Compute confidence loss.
Y
yuyang18 已提交
1656

1657
      5.2 Compute localization loss.
Y
yuyang18 已提交
1658

1659 1660 1661 1662 1663 1664
      5.3 Compute the overall weighted loss.

    Args:
        location (Variable): The location predictions are a 3D Tensor with
            shape [N, Np, 4], N is the batch size, Np is total number of
            predictions for each instance. 4 is the number of coordinate values,
1665 1666
            the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
            float64.
1667 1668
        confidence (Variable): The confidence predictions are a 3D Tensor
            with shape [N, Np, C], N and Np are the same as they are in
1669 1670
            `location`, C is the class number.The data type is float32 or
            float64.
翟飞跃 已提交
1671
        gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
1672
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
1673
            bboxes of mini-batch input.The data type is float32 or float64.
1674
        gt_label (Variable): The ground-truth labels are a 2D LoDTensor
1675 1676 1677
            with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
            mini-batch input, 1 is the number of class. The data type is float32
            or float64.
1678
        prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
1679 1680
            Np and 4 are the same as they are in `location`. The data type is
            float32 or float64.
1681
        prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
1682
            with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
1683 1684
        background_label (int): The index of background label, 0 by default.
        overlap_threshold (float): If match_type is 'per_prediction', use
1685 1686
            'overlap_threshold' to determine the extra matching bboxes when finding \
            matched boxes. 0.5 by default.
1687
        neg_pos_ratio (float): The ratio of the negative boxes to the positive
翟飞跃 已提交
1688
            boxes, used only when mining_type is 'max_negative', 3.0 by default.
1689
        neg_overlap (float): The negative overlap upper bound for the unmatched
1690
            predictions. Use only when mining_type is 'max_negative',
1691 1692 1693 1694
            0.5 by default.
        loc_loss_weight (float): Weight for localization loss, 1.0 by default.
        conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
        match_type (str): The type of matching method during training, should
翟飞跃 已提交
1695
            be 'bipartite' or 'per_prediction', 'per_prediction' by default.
1696 1697
        mining_type (str): The hard example mining type, should be 'hard_example'
            or 'max_negative', now only support `max_negative`.
1698
        normalize (bool): Whether to normalize the SSD loss by the total number
Y
yuyang18 已提交
1699
            of output locations, True by default.
1700 1701
        sample_size (int): The max sample size of negative box, used only when
            mining_type is 'hard_example'.
1702 1703

    Returns:
1704 1705 1706
        Variable(Tensor):  The weighted sum of the localization loss and confidence loss, \
        with shape [N * Np, 1], N and Np are the same as they are in
        `location`.The data type is float32 or float64.
1707 1708

    Raises:
Y
yuyang18 已提交
1709 1710
        ValueError: If mining_type is 'hard_example', now only support mining \
        type of `max_negative`.
Y
yuyang18 已提交
1711 1712

    Examples:
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731

        .. code-block:: python

            import paddle.fluid as fluid
            pb = fluid.data(
                           name='prior_box',
                           shape=[10, 4],
                           dtype='float32')
            pbv = fluid.data(
                           name='prior_box_var',
                           shape=[10, 4],
                           dtype='float32')
            loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
            scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
            gt_box = fluid.data(
                 name='gt_box', shape=[4], lod_level=1, dtype='float32')
            gt_label = fluid.data(
                 name='gt_label', shape=[1], lod_level=1, dtype='float32')
            loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
1732 1733 1734 1735 1736 1737 1738
    """

    helper = LayerHelper('ssd_loss', **locals())
    if mining_type != 'max_negative':
        raise ValueError("Only support mining_type == max_negative now.")

    num, num_prior, num_class = confidence.shape
G
merge  
gongweibao 已提交
1739
    conf_shape = nn.shape(confidence)
1740 1741

    def __reshape_to_2d(var):
1742
        return nn.flatten(x=var, axis=2)
1743

T
tianshuo78520a 已提交
1744
    # 1. Find matched bounding box by prior box.
1745 1746
    #   1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
    iou = iou_similarity(x=gt_box, y=prior_box)
T
tianshuo78520a 已提交
1747
    #   1.2 Compute matched bounding box by bipartite matching algorithm.
1748 1749 1750
    matched_indices, matched_dist = bipartite_match(
        iou, match_type, overlap_threshold
    )
1751 1752 1753

    # 2. Compute confidence for mining hard examples
    # 2.1. Get the target label based on matched indices
1754
    gt_label = paddle.reshape(
1755 1756
        x=gt_label, shape=(len(gt_label.shape) - 1) * (0,) + (-1, 1)
    )
1757
    gt_label.stop_gradient = True
1758 1759 1760
    target_label, _ = target_assign(
        gt_label, matched_indices, mismatch_value=background_label
    )
1761 1762 1763 1764 1765
    # 2.2. Compute confidence loss.
    # Reshape confidence to 2D tensor.
    confidence = __reshape_to_2d(confidence)
    target_label = tensor.cast(x=target_label, dtype='int64')
    target_label = __reshape_to_2d(target_label)
1766
    target_label.stop_gradient = True
1767
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1768
    # 3. Mining hard examples
G
merge  
gongweibao 已提交
1769
    actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
1770
    actual_shape.stop_gradient = True
1771 1772
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1773
    conf_loss = paddle.reshape(x=conf_loss, shape=actual_shape)
1774
    conf_loss.stop_gradient = True
X
Xin Pan 已提交
1775
    neg_indices = helper.create_variable_for_type_inference(dtype='int32')
1776
    dtype = matched_indices.dtype
X
Xin Pan 已提交
1777
    updated_matched_indices = helper.create_variable_for_type_inference(
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
        dtype=dtype
    )
    helper.append_op(
        type='mine_hard_examples',
        inputs={
            'ClsLoss': conf_loss,
            'LocLoss': None,
            'MatchIndices': matched_indices,
            'MatchDist': matched_dist,
        },
        outputs={
            'NegIndices': neg_indices,
            'UpdatedMatchIndices': updated_matched_indices,
        },
        attrs={
            'neg_pos_ratio': neg_pos_ratio,
            'neg_dist_threshold': neg_overlap,
            'mining_type': mining_type,
            'sample_size': sample_size,
        },
    )
1799 1800 1801

    # 4. Assign classification and regression targets
    # 4.1. Encoded bbox according to the prior boxes.
1802 1803 1804 1805 1806 1807
    encoded_bbox = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=gt_box,
        code_type='encode_center_size',
    )
1808 1809
    # 4.2. Assign regression targets
    target_bbox, target_loc_weight = target_assign(
1810 1811
        encoded_bbox, updated_matched_indices, mismatch_value=background_label
    )
1812 1813 1814 1815 1816
    # 4.3. Assign classification targets
    target_label, target_conf_weight = target_assign(
        gt_label,
        updated_matched_indices,
        negative_indices=neg_indices,
1817 1818
        mismatch_value=background_label,
    )
1819 1820 1821 1822 1823

    # 5. Compute loss.
    # 5.1 Compute confidence loss.
    target_label = __reshape_to_2d(target_label)
    target_label = tensor.cast(x=target_label, dtype='int64')
1824

1825
    conf_loss = softmax_with_cross_entropy(confidence, target_label)
1826 1827 1828
    target_conf_weight = __reshape_to_2d(target_conf_weight)
    conf_loss = conf_loss * target_conf_weight

1829 1830 1831 1832
    # the target_label and target_conf_weight do not have gradient.
    target_label.stop_gradient = True
    target_conf_weight.stop_gradient = True

1833 1834 1835 1836 1837 1838 1839 1840
    # 5.2 Compute regression loss.
    location = __reshape_to_2d(location)
    target_bbox = __reshape_to_2d(target_bbox)

    loc_loss = nn.smooth_l1(location, target_bbox)
    target_loc_weight = __reshape_to_2d(target_loc_weight)
    loc_loss = loc_loss * target_loc_weight

1841 1842 1843 1844
    # the target_bbox and target_loc_weight do not have gradient.
    target_bbox.stop_gradient = True
    target_loc_weight.stop_gradient = True

1845 1846
    # 5.3 Compute overall weighted loss.
    loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
1847
    # reshape to [N, Np], N is the batch size and Np is the prior box number.
1848 1849
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1850
    loss = paddle.reshape(x=loss, shape=actual_shape)
1851
    loss = paddle.sum(loss, axis=1, keepdim=True)
1852
    if normalize:
1853
        normalizer = paddle.sum(target_loc_weight)
1854 1855
        loss = loss / normalizer

1856
    return loss
C
chengduoZH 已提交
1857 1858


Z
zhiboniu 已提交
1859 1860 1861 1862 1863
def prior_box(
    input,
    image,
    min_sizes,
    max_sizes=None,
1864
    aspect_ratios=[1.0],
Z
zhiboniu 已提交
1865 1866 1867 1868 1869 1870 1871 1872
    variance=[0.1, 0.1, 0.2, 0.2],
    flip=False,
    clip=False,
    steps=[0.0, 0.0],
    offset=0.5,
    name=None,
    min_max_aspect_ratios_order=False,
):
1873
    """
S
swtkiwi 已提交
1874

R
ruri 已提交
1875
    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
1876 1877 1878 1879 1880
    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

R
ruri 已提交
1881
    Parameters:
T
tianshuo78520a 已提交
1882
       input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
R
ruri 已提交
1883 1884 1885 1886
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes(list|tuple|float): the min sizes of generated prior boxes.
       max_sizes(list|tuple|None): the max sizes of generated prior boxes.
1887
            Default: None.
R
ruri 已提交
1888
       aspect_ratios(list|tuple|float): the aspect ratios of generated
1889
            prior boxes. Default: [1.].
1890 1891 1892 1893
       variance(list|tuple): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1894
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1895 1896
            step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
            height or weight of the input will be automatically calculated.
1897
            Default: [0., 0.]
1898
       offset(float): Prior boxes center offset. Default: 0.5
1899
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1900
            in order of [min, max, aspect_ratios], which is consistent with
1901 1902 1903
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
R
ruri 已提交
1904
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
1905 1906

    Returns:
R
ruri 已提交
1907
        Tuple: A tuple with two Variable (boxes, variances)
Q
update  
qiaolongfei 已提交
1908

R
ruri 已提交
1909
        boxes(Variable): the output prior boxes of PriorBox.
1910
        4-D tensor, the layout is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1911
        H is the height of input, W is the width of input,
R
ruri 已提交
1912
        num_priors is the total box count of each position of input.
Q
update  
qiaolongfei 已提交
1913

R
ruri 已提交
1914
        variances(Variable): the expanded variances of PriorBox.
1915
        4-D tensor, the layput is [H, W, num_priors, 4].
Q
update  
qiaolongfei 已提交
1916
        H is the height of input, W is the width of input
R
ruri 已提交
1917
        num_priors is the total box count of each position of input
1918 1919 1920

    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1921

1922 1923 1924
            #declarative mode
            import paddle.fluid as fluid
            import numpy as np
1925 1926
            import paddle
            paddle.enable_static()
1927 1928 1929
            input = fluid.data(name="input", shape=[None,3,6,9])
            image = fluid.data(name="image", shape=[None,3,9,12])
            box, var = fluid.layers.prior_box(
R
ruri 已提交
1930 1931
                 input=input,
                 image=image,
1932
                 min_sizes=[100.],
R
ruri 已提交
1933 1934 1935
                 clip=True,
                 flip=True)

1936 1937 1938
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
1939

1940 1941 1942
            # prepare a batch of data
            input_data = np.random.rand(1,3,6,9).astype("float32")
            image_data = np.random.rand(1,3,9,12).astype("float32")
1943

1944
            box_out, var_out = exe.run(fluid.default_main_program(),
R
ruri 已提交
1945 1946 1947
                feed={"input":input_data,"image":image_data},
                fetch_list=[box,var],
                return_numpy=True)
1948

1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
            # print(box_out.shape)
            # (6, 9, 1, 4)
            # print(var_out.shape)
            # (6, 9, 1, 4)

            # imperative mode
            import paddle.fluid.dygraph as dg

            with dg.guard(place) as g:
                input = dg.to_variable(input_data)
                image = dg.to_variable(image_data)
                box, var = fluid.layers.prior_box(
                    input=input,
                    image=image,
                    min_sizes=[100.],
                    clip=True,
                    flip=True)
                # print(box.shape)
                # [6L, 9L, 1L, 4L]
R
ruri 已提交
1968
                # print(var.shape)
1969
                # [6L, 9L, 1L, 4L]
R
ruri 已提交
1970

1971
    """
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
    return paddle.vision.ops.prior_box(
        input=input,
        image=image,
        min_sizes=min_sizes,
        max_sizes=max_sizes,
        aspect_ratios=aspect_ratios,
        variance=variance,
        flip=flip,
        clip=clip,
        steps=steps,
        offset=offset,
        min_max_aspect_ratios_order=min_max_aspect_ratios_order,
        name=name,
1985
    )
1986 1987


1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
def density_prior_box(
    input,
    image,
    densities=None,
    fixed_sizes=None,
    fixed_ratios=None,
    variance=[0.1, 0.1, 0.2, 0.2],
    clip=False,
    steps=[0.0, 0.0],
    offset=0.5,
    flatten_to_2d=False,
    name=None,
):
2001
    r"""
R
ruri 已提交
2002

2003 2004 2005 2006 2007 2008
    This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
    algorithm. Each position of the input produce N prior boxes, N is
    determined by the count of densities, fixed_sizes and fixed_ratios.
    Boxes center at grid points around each input position is generated by
    this operator, and the grid points is determined by densities and
    the count of density prior box is determined by fixed_sizes and fixed_ratios.
R
ruri 已提交
2009
    Obviously, the number of fixed_sizes is equal to the number of densities.
2010

R
ruri 已提交
2011
    For densities_i in densities:
2012

R
ruri 已提交
2013
    .. math::
R
ruri 已提交
2014

R
ruri 已提交
2015 2016 2017 2018 2019 2020 2021
        N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)

    N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.

    Parameters:
       input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
       image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
R
ruri 已提交
2022
            the layout is NCHW.
2023 2024
       densities(list|tuple|None): The densities of generated density prior
            boxes, this attribute should be a list or tuple of integers.
R
ruri 已提交
2025
            Default: None.
R
ruri 已提交
2026
       fixed_sizes(list|tuple|None): The fixed sizes of generated density
2027
            prior boxes, this attribute should a list or tuple of same
R
ruri 已提交
2028
            length with :attr:`densities`. Default: None.
R
ruri 已提交
2029
       fixed_ratios(list|tuple|None): The fixed ratios of generated density
R
ruri 已提交
2030 2031 2032
            prior boxes, if this attribute is not set and :attr:`densities`
            and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
            to generate density prior boxes.
R
ruri 已提交
2033
       variance(list|tuple): The variances to be encoded in density prior boxes.
R
ruri 已提交
2034
            Default:[0.1, 0.1, 0.2, 0.2].
R
ruri 已提交
2035
       clip(bool): Whether to clip out of boundary boxes. Default: False.
翟飞跃 已提交
2036
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
2037 2038
            step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
            height or weight of the input will be automatically calculated.
R
ruri 已提交
2039 2040
            Default: [0., 0.]
       offset(float): Prior boxes center offset. Default: 0.5
2041 2042
       flatten_to_2d(bool): Whether to flatten output prior boxes and variance
           to 2D shape, the second dim is 4. Default: False.
R
ruri 已提交
2043
       name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
2044

R
ruri 已提交
2045
    Returns:
R
ruri 已提交
2046
        Tuple: A tuple with two Variable (boxes, variances)
R
ruri 已提交
2047 2048

        boxes: the output density prior boxes of PriorBox.
R
ruri 已提交
2049 2050 2051
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
2052 2053

        variances: the expanded variances of PriorBox.
R
ruri 已提交
2054 2055 2056
        4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
        2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
        H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
R
ruri 已提交
2057 2058 2059


    Examples:
R
ruri 已提交
2060

R
ruri 已提交
2061 2062
        .. code-block:: python

R
ruri 已提交
2063
            #declarative mode
R
ruri 已提交
2064

R
ruri 已提交
2065 2066
            import paddle.fluid as fluid
            import numpy as np
2067 2068
            import paddle
            paddle.enable_static()
R
ruri 已提交
2069

R
ruri 已提交
2070 2071 2072
            input = fluid.data(name="input", shape=[None,3,6,9])
            image = fluid.data(name="image", shape=[None,3,9,12])
            box, var = fluid.layers.density_prior_box(
R
ruri 已提交
2073 2074 2075 2076 2077 2078 2079 2080
                 input=input,
                 image=image,
                 densities=[4, 2, 1],
                 fixed_sizes=[32.0, 64.0, 128.0],
                 fixed_ratios=[1.],
                 clip=True,
                 flatten_to_2d=True)

R
ruri 已提交
2081 2082 2083
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
2084

R
ruri 已提交
2085 2086 2087 2088 2089 2090
            # prepare a batch of data
            input_data = np.random.rand(1,3,6,9).astype("float32")
            image_data = np.random.rand(1,3,9,12).astype("float32")

            box_out, var_out = exe.run(
                fluid.default_main_program(),
R
ruri 已提交
2091
                feed={"input":input_data,
R
ruri 已提交
2092
                      "image":image_data},
R
ruri 已提交
2093 2094 2095
                fetch_list=[box,var],
                return_numpy=True)

R
ruri 已提交
2096 2097 2098 2099
            # print(box_out.shape)
            # (1134, 4)
            # print(var_out.shape)
            # (1134, 4)
R
ruri 已提交
2100 2101


R
ruri 已提交
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
            #imperative mode
            import paddle.fluid.dygraph as dg

            with dg.guard(place) as g:
                input = dg.to_variable(input_data)
                image = dg.to_variable(image_data)
                box, var = fluid.layers.density_prior_box(
                    input=input,
                    image=image,
                    densities=[4, 2, 1],
                    fixed_sizes=[32.0, 64.0, 128.0],
                    fixed_ratios=[1.],
                    clip=True)

                # print(box.shape)
                # [6L, 9L, 21L, 4L]
                # print(var.shape)
                # [6L, 9L, 21L, 4L]
R
ruri 已提交
2120

R
ruri 已提交
2121 2122 2123
    """
    helper = LayerHelper("density_prior_box", **locals())
    dtype = helper.input_dtype()
2124 2125 2126
    check_variable_and_dtype(
        input, 'input', ['float32', 'float64'], 'density_prior_box'
    )
R
ruri 已提交
2127 2128

    def _is_list_or_tuple_(data):
2129
        return isinstance(data, list) or isinstance(data, tuple)
R
ruri 已提交
2130

2131 2132 2133
    check_type(densities, 'densities', (list, tuple), 'density_prior_box')
    check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
    check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
R
ruri 已提交
2134 2135
    if len(densities) != len(fixed_sizes):
        raise ValueError('densities and fixed_sizes length should be euqal.')
2136

R
ruri 已提交
2137
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
2138 2139 2140 2141
        raise ValueError(
            'steps should be a list or tuple ',
            'with length 2, (step_width, step_height).',
        )
R
ruri 已提交
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153

    densities = list(map(int, densities))
    fixed_sizes = list(map(float, fixed_sizes))
    fixed_ratios = list(map(float, fixed_ratios))
    steps = list(map(float, steps))

    attrs = {
        'variances': variance,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
        'offset': offset,
2154 2155 2156 2157
        'densities': densities,
        'fixed_sizes': fixed_sizes,
        'fixed_ratios': fixed_ratios,
        'flatten_to_2d': flatten_to_2d,
R
ruri 已提交
2158 2159 2160 2161 2162
    }
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="density_prior_box",
2163 2164
        inputs={"Input": input, "Image": image},
        outputs={"Boxes": box, "Variances": var},
2165 2166
        attrs=attrs,
    )
R
ruri 已提交
2167 2168 2169 2170 2171
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


2172
@static_only
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
def multi_box_head(
    inputs,
    image,
    base_size,
    num_classes,
    aspect_ratios,
    min_ratio=None,
    max_ratio=None,
    min_sizes=None,
    max_sizes=None,
    steps=None,
    step_w=None,
    step_h=None,
    offset=0.5,
    variance=[0.1, 0.1, 0.2, 0.2],
    flip=True,
    clip=False,
    kernel_size=1,
    pad=0,
    stride=1,
    name=None,
    min_max_aspect_ratios_order=False,
):
C
chengduoZH 已提交
2196
    """
2197
        :api_attr: Static Graph
S
swtkiwi 已提交
2198

Q
qingqing01 已提交
2199 2200 2201 2202
    Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
    regression location and classification confidence on multiple input feature
    maps, then output the concatenate results. The details of this algorithm,
    please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
C
chengduoZH 已提交
2203
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
2204 2205

    Args:
Q
qingqing01 已提交
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
       inputs (list(Variable)|tuple(Variable)): The list of input variables,
           the format of all Variables are 4-D Tensor, layout is NCHW.
           Data type should be float32 or float64.
       image (Variable): The input image, layout is NCHW. Data type should be
           the same as inputs.
       base_size(int): the base_size is input image size. When len(inputs) > 2
           and `min_size` and `max_size` are None, the `min_size` and `max_size`
           are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
           formula is as follows:

              ..  code-block:: text

                  min_sizes = []
                  max_sizes = []
                  step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
2221
                  for ratio in range(min_ratio, max_ratio + 1, step):
Q
qingqing01 已提交
2222 2223 2224 2225 2226
                      min_sizes.append(base_size * ratio / 100.)
                      max_sizes.append(base_size * (ratio + step) / 100.)
                      min_sizes = [base_size * .10] + min_sizes
                      max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
2227
       num_classes(int): The number of classes.
Q
qingqing01 已提交
2228 2229
       aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
           prior boxes. The length of input and aspect_ratios must be equal.
C
chengduoZH 已提交
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       steps(list|tuple): If step_w and step_h are the same,
            step_w and step_h can be replaced by steps.
       step_w(list|tuple): Prior boxes step
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically
            calculated. Default: None.
       step_h(list|tuple): Prior boxes step across height, If
            step_h[i] == 0.0, the prior boxes step across height of
            the inputs[i] will be automatically calculated. Default: None.
       offset(float): Prior boxes center offset. Default: 0.5
       variance(list|tuple): the variances to be encoded in prior boxes.
2249
            Default:[0.1, 0.1, 0.2, 0.2].
C
chengduoZH 已提交
2250 2251 2252 2253 2254
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
       kernel_size(int): The kernel size of conv2d. Default: 1.
       pad(int|list|tuple): The padding of conv2d. Default:0.
       stride(int|list|tuple): The stride of conv2d. Default:1,
Q
qingqing01 已提交
2255 2256 2257
       name(str): The default value is None.  Normally there is no need
           for user to set this property.  For more information, please
           refer to :ref:`api_guide_Name`.
2258
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
2259
            in order of [min, max, aspect_ratios], which is consistent with
2260
            Caffe. Please note, this order affects the weights order of
T
tianshuo78520a 已提交
2261
            convolution layer followed by and does not affect the final
2262
            detection results. Default: False.
C
chengduoZH 已提交
2263 2264

    Returns:
Q
update  
qiaolongfei 已提交
2265 2266
        tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)

Q
qingqing01 已提交
2267 2268 2269
        mbox_loc (Variable): The predicted boxes' location of the inputs. The
        layout is [N, num_priors, 4], where N is batch size, ``num_priors``
        is the number of prior boxes. Data type is the same as input.
Q
update  
qiaolongfei 已提交
2270

Q
qingqing01 已提交
2271
        mbox_conf (Variable): The predicted boxes' confidence of the inputs.
2272
        The layout is [N, num_priors, C], where ``N`` and ``num_priors``
Q
qingqing01 已提交
2273 2274
        has the same meaning as above. C is the number of Classes.
        Data type is the same as input.
Q
update  
qiaolongfei 已提交
2275

Q
qingqing01 已提交
2276 2277 2278
        boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
        The meaning of num_priors is the same as above.
        Data type is the same as input.
C
chengduoZH 已提交
2279

Q
qingqing01 已提交
2280 2281
        variances (Variable): the expanded variances for prior boxes.
        The layout is [num_priors, 4]. Data type is the same as input.
C
chengduoZH 已提交
2282

Q
qingqing01 已提交
2283
    Examples 1: set min_ratio and max_ratio:
C
chengduoZH 已提交
2284
        .. code-block:: python
Q
update  
qiaolongfei 已提交
2285

2286 2287
          import paddle
          paddle.enable_static()
2288

2289 2290 2291 2292 2293 2294 2295
          images = paddle.static.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = paddle.static.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = paddle.static.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = paddle.static.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = paddle.static.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = paddle.static.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = paddle.static.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
2296

2297
          mbox_locs, mbox_confs, box, var = paddle.static.nn.multi_box_head(
2298
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
C
chengduoZH 已提交
2299 2300 2301 2302 2303 2304 2305 2306 2307
            image=images,
            num_classes=21,
            min_ratio=20,
            max_ratio=90,
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)
Q
qingqing01 已提交
2308 2309 2310 2311

    Examples 2: set min_sizes and max_sizes:
        .. code-block:: python

2312 2313
          import paddle
          paddle.enable_static()
Q
qingqing01 已提交
2314

2315 2316 2317 2318 2319 2320 2321
          images = paddle.static.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
          conv1 = paddle.static.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
          conv2 = paddle.static.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
          conv3 = paddle.static.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
          conv4 = paddle.static.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
          conv5 = paddle.static.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
          conv6 = paddle.static.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
Q
qingqing01 已提交
2322

2323
          mbox_locs, mbox_confs, box, var = paddle.static.nn.multi_box_head(
Q
qingqing01 已提交
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
            image=images,
            num_classes=21,
            min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
            max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)

C
chengduoZH 已提交
2335 2336
    """

C
chengduoZH 已提交
2337
    def _reshape_with_axis_(input, axis=1):
2338
        out = nn.flatten(x=input, axis=axis)
C
chengduoZH 已提交
2339
        return out
2340

2341
    def _is_list_or_tuple_(data):
2342
        return isinstance(data, list) or isinstance(data, tuple)
2343

C
chengduoZH 已提交
2344 2345 2346 2347
    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

2348 2349
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
2350

C
chengduoZH 已提交
2351 2352 2353 2354 2355
    num_layer = len(inputs)

    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
2356
    elif min_sizes is None and max_sizes is None:
C
chengduoZH 已提交
2357 2358 2359
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
2360
        for ratio in range(min_ratio, max_ratio + 1, step):
2361 2362 2363 2364
            min_sizes.append(base_size * ratio / 100.0)
            max_sizes.append(base_size * (ratio + step) / 100.0)
        min_sizes = [base_size * 0.10] + min_sizes
        max_sizes = [base_size * 0.20] + max_sizes
C
chengduoZH 已提交
2365

C
chengduoZH 已提交
2366 2367
    if aspect_ratios:
        _is_list_or_tuple_and_equal(
2368 2369
            aspect_ratios,
            num_layer,
C
chengduoZH 已提交
2370
            'aspect_ratios should be list or tuple, and the length of inputs '
2371 2372
            'and aspect_ratios should be the same.',
        )
Z
zhongpu 已提交
2373
    if step_h is not None:
C
chengduoZH 已提交
2374
        _is_list_or_tuple_and_equal(
2375 2376
            step_h,
            num_layer,
C
chengduoZH 已提交
2377
            'step_h should be list or tuple, and the length of inputs and '
2378 2379
            'step_h should be the same.',
        )
Z
zhongpu 已提交
2380
    if step_w is not None:
C
chengduoZH 已提交
2381
        _is_list_or_tuple_and_equal(
2382 2383
            step_w,
            num_layer,
C
chengduoZH 已提交
2384
            'step_w should be list or tuple, and the length of inputs and '
2385 2386
            'step_w should be the same.',
        )
Z
zhongpu 已提交
2387
    if steps is not None:
C
chengduoZH 已提交
2388
        _is_list_or_tuple_and_equal(
2389 2390
            steps,
            num_layer,
C
chengduoZH 已提交
2391
            'steps should be list or tuple, and the length of inputs and '
2392 2393
            'step_w should be the same.',
        )
C
chengduoZH 已提交
2394 2395 2396
        step_w = steps
        step_h = steps

C
chengduoZH 已提交
2397 2398
    mbox_locs = []
    mbox_confs = []
C
chengduoZH 已提交
2399 2400
    box_results = []
    var_results = []
C
chengduoZH 已提交
2401 2402
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
C
chengduoZH 已提交
2403 2404
        max_size = max_sizes[i]

2405
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
2406
            min_size = [min_size]
C
chengduoZH 已提交
2407 2408
        if not _is_list_or_tuple_(max_size):
            max_size = [max_size]
C
chengduoZH 已提交
2409 2410 2411 2412

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
2413
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
2414
                aspect_ratio = [aspect_ratio]
2415
        step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
C
chengduoZH 已提交
2416

2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
        box, var = prior_box(
            input,
            image,
            min_size,
            max_size,
            aspect_ratio,
            variance,
            flip,
            clip,
            step,
            offset,
            None,
            min_max_aspect_ratios_order,
        )
C
chengduoZH 已提交
2431 2432 2433 2434 2435

        box_results.append(box)
        var_results.append(var)

        num_boxes = box.shape[2]
C
chengduoZH 已提交
2436

2437
        # get loc
Y
Yuan Gao 已提交
2438
        num_loc_output = num_boxes * 4
2439 2440 2441 2442 2443 2444 2445
        mbox_loc = nn.conv2d(
            input=input,
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride,
        )
2446

2447
        mbox_loc = paddle.transpose(mbox_loc, perm=[0, 2, 3, 1])
2448
        mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
Y
Yuan Gao 已提交
2449
        mbox_locs.append(mbox_loc_flatten)
C
chengduoZH 已提交
2450

2451
        # get conf
C
chengduoZH 已提交
2452
        num_conf_output = num_boxes * num_classes
2453 2454 2455 2456 2457 2458 2459
        conf_loc = nn.conv2d(
            input=input,
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride,
        )
2460
        conf_loc = paddle.transpose(conf_loc, perm=[0, 2, 3, 1])
2461
        conf_loc_flatten = nn.flatten(conf_loc, axis=1)
Y
Yuan Gao 已提交
2462
        mbox_confs.append(conf_loc_flatten)
C
chengduoZH 已提交
2463

C
chengduoZH 已提交
2464 2465 2466
    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
Y
Yuan Gao 已提交
2467 2468
        mbox_locs_concat = mbox_locs[0]
        mbox_confs_concat = mbox_confs[0]
C
chengduoZH 已提交
2469 2470 2471 2472 2473 2474 2475 2476 2477
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))

        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
Y
Yuan Gao 已提交
2478
        mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
2479
        mbox_locs_concat = paddle.reshape(mbox_locs_concat, shape=[0, -1, 4])
Y
Yuan Gao 已提交
2480
        mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
2481
        mbox_confs_concat = paddle.reshape(
2482 2483
            mbox_confs_concat, shape=[0, -1, num_classes]
        )
C
chengduoZH 已提交
2484

2485 2486
    box.stop_gradient = True
    var.stop_gradient = True
Y
Yuan Gao 已提交
2487
    return mbox_locs_concat, mbox_confs_concat, box, var
2488 2489


2490 2491 2492 2493 2494 2495 2496 2497 2498
def anchor_generator(
    input,
    anchor_sizes=None,
    aspect_ratios=None,
    variance=[0.1, 0.1, 0.2, 0.2],
    stride=None,
    offset=0.5,
    name=None,
):
2499
    """
S
swtkiwi 已提交
2500

2501 2502 2503 2504 2505 2506 2507 2508
    **Anchor generator operator**

    Generate anchors for Faster RCNN algorithm.
    Each position of the input produce N anchors, N =
    size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
    is firstly aspect_ratios loop then anchor_sizes loop.

    Args:
W
wangguanzhong 已提交
2509 2510 2511
       input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
       anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
          anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
2512
          For instance, the anchor size of 64 means the area of this anchor
W
wangguanzhong 已提交
2513
          equals to 64**2. None by default.
2514
       aspect_ratios(float32|list|tuple, optional): The height / width ratios
W
wangguanzhong 已提交
2515
           of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
2516 2517
       variance(list|tuple, optional): The variances to be used in box
           regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
W
wangguanzhong 已提交
2518 2519 2520 2521
           default.
       stride(list|tuple, optional): The anchors stride across width and height.
           The data type is float32. e.g. [16.0, 16.0]. None by default.
       offset(float32, optional): Prior boxes center offset. 0.5 by default.
2522 2523 2524
       name(str, optional): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and None
           by default.
2525 2526

    Returns:
W
wangguanzhong 已提交
2527 2528 2529 2530
        Tuple:

        Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
        H is the height of input, W is the width of input,
2531
        num_anchors is the box count of each position.
W
wangguanzhong 已提交
2532
        Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
2533

W
wangguanzhong 已提交
2534 2535 2536 2537 2538
        Variances(Variable): The expanded variances of anchors
        with a layout of [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_anchors is the box count of each position.
        Each variance is in (xcenter, ycenter, w, h) format.
2539 2540 2541 2542 2543 2544


    Examples:

        .. code-block:: python

2545
            import paddle.fluid as fluid
2546 2547 2548
            import paddle

            paddle.enable_static()
2549
            conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
J
jerrywgz 已提交
2550
            anchor, var = fluid.layers.anchor_generator(
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
                input=conv1,
                anchor_sizes=[64, 128, 256, 512],
                aspect_ratios=[0.5, 1.0, 2.0],
                variance=[0.1, 0.1, 0.2, 0.2],
                stride=[16.0, 16.0],
                offset=0.5)
    """
    helper = LayerHelper("anchor_generator", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
2562
        return isinstance(data, list) or isinstance(data, tuple)
2563 2564 2565 2566 2567 2568

    if not _is_list_or_tuple_(anchor_sizes):
        anchor_sizes = [anchor_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(stride) and len(stride) == 2):
2569 2570 2571 2572
        raise ValueError(
            'stride should be a list or tuple ',
            'with length 2, (stride_width, stride_height).',
        )
2573 2574 2575 2576 2577 2578 2579 2580 2581 2582

    anchor_sizes = list(map(float, anchor_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    stride = list(map(float, stride))

    attrs = {
        'anchor_sizes': anchor_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'stride': stride,
2583
        'offset': offset,
2584 2585
    }

X
Xin Pan 已提交
2586 2587
    anchor = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
2588 2589 2590
    helper.append_op(
        type="anchor_generator",
        inputs={"Input": input},
2591
        outputs={"Anchors": anchor, "Variances": var},
2592 2593
        attrs=attrs,
    )
2594 2595 2596
    anchor.stop_gradient = True
    var.stop_gradient = True
    return anchor, var
2597 2598


2599 2600 2601 2602 2603 2604 2605 2606
def roi_perspective_transform(
    input,
    rois,
    transformed_height,
    transformed_width,
    spatial_scale=1.0,
    name=None,
):
W
whs 已提交
2607
    """
S
SunGaofeng 已提交
2608
    **The** `rois` **of this op should be a LoDTensor.**
W
whs 已提交
2609

2610
    ROI perspective transform op applies perspective transform to map each roi into an
S
SunGaofeng 已提交
2611 2612 2613
    rectangular region. Perspective transform is a type of transformation in linear algebra.

    Parameters:
2614
        input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
W
whs 已提交
2615 2616
                          input tensor is NCHW. Where N is batch size, C is the
                          number of input channels, H is the height of the feature,
S
SunGaofeng 已提交
2617
                          and W is the width of the feature. The data type is float32.
2618 2619 2620 2621 2622
        rois (Variable):  2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
                          It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
                          [[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
                          top left coordinates, and (x2, y2) is the top right
                          coordinates, and (x3, y3) is the bottom right coordinates,
S
SunGaofeng 已提交
2623
                          and (x4, y4) is the bottom left coordinates. The data type is the
2624
                          same as `input`
S
SunGaofeng 已提交
2625 2626
        transformed_height (int): The height of transformed output.
        transformed_width (int): The width of transformed output.
W
whs 已提交
2627
        spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
2628 2629
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
2630
                             For more information, please refer to :ref:`api_guide_Name`
W
whs 已提交
2631 2632

    Returns:
S
SunGaofeng 已提交
2633
            A tuple with three Variables. (out, mask, transform_matrix)
2634 2635

            out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2636
            (num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
2637 2638

            mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
S
SunGaofeng 已提交
2639
            (num_rois, 1, transformed_h, transformed_w). The data type is int32
2640 2641

            transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
S
SunGaofeng 已提交
2642 2643 2644 2645
            a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`

    Return Type:
        tuple
W
whs 已提交
2646 2647 2648 2649

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
2650
            import paddle.fluid as fluid
2651

S
SunGaofeng 已提交
2652 2653
            x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
2654
            out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
W
whs 已提交
2655
    """
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
    check_variable_and_dtype(
        input, 'input', ['float32'], 'roi_perspective_transform'
    )
    check_variable_and_dtype(
        rois, 'rois', ['float32'], 'roi_perspective_transform'
    )
    check_type(
        transformed_height,
        'transformed_height',
        int,
        'roi_perspective_transform',
    )
    check_type(
        transformed_width, 'transformed_width', int, 'roi_perspective_transform'
    )
    check_type(
        spatial_scale, 'spatial_scale', float, 'roi_perspective_transform'
    )
2674

W
whs 已提交
2675 2676
    helper = LayerHelper('roi_perspective_transform', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2677
    out = helper.create_variable_for_type_inference(dtype)
2678 2679
    mask = helper.create_variable_for_type_inference(dtype="int32")
    transform_matrix = helper.create_variable_for_type_inference(dtype)
2680 2681
    out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
    out2in_w = helper.create_variable_for_type_inference(dtype)
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697
    helper.append_op(
        type="roi_perspective_transform",
        inputs={"X": input, "ROIs": rois},
        outputs={
            "Out": out,
            "Out2InIdx": out2in_idx,
            "Out2InWeights": out2in_w,
            "Mask": mask,
            "TransformMatrix": transform_matrix,
        },
        attrs={
            "transformed_height": transformed_height,
            "transformed_width": transformed_width,
            "spatial_scale": spatial_scale,
        },
    )
2698
    return out, mask, transform_matrix
W
whs 已提交
2699 2700


2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719
def generate_proposal_labels(
    rpn_rois,
    gt_classes,
    is_crowd,
    gt_boxes,
    im_info,
    batch_size_per_im=256,
    fg_fraction=0.25,
    fg_thresh=0.25,
    bg_thresh_hi=0.5,
    bg_thresh_lo=0.0,
    bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
    class_nums=None,
    use_random=True,
    is_cls_agnostic=False,
    is_cascade_rcnn=False,
    max_overlap=None,
    return_max_overlap=False,
):
2720
    """
S
swtkiwi 已提交
2721

2722
    **Generate Proposal Labels of Faster-RCNN**
2723

B
buxingyuan 已提交
2724
    This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
2725
    to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
2726 2727 2728

    RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
    were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
2729
    If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
2730 2731
    If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
    then it was considered as a background sample.
B
buxingyuan 已提交
2732
    After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
2733
    then we apply random sampling to make sure
B
buxingyuan 已提交
2734
    the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
2735 2736 2737 2738 2739

    For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
    Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.

    Args:
2740 2741 2742
        rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
        is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
B
buxingyuan 已提交
2743 2744 2745
        gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
        im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.

2746 2747 2748 2749 2750 2751 2752
        batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
        fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
        fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
        bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
        bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
        bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
        class_nums(int): Class number. The data type must be int32.
B
buxingyuan 已提交
2753
        use_random(bool): Use random sampling to choose foreground and background boxes.
2754 2755
        is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
        is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
2756 2757
        max_overlap(Variable): Maximum overlap between each proposal box and ground-truth.
        return_max_overlap(bool): Whether return the maximum overlap between each sampled RoI and ground-truth.
B
Bai Yifan 已提交
2758

2759 2760
    Returns:
        tuple:
2761
        A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, max_overlap)``.
2762 2763 2764 2765 2766 2767

        - **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
        - **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
        - **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
        - **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
        - **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
2768
        - **max_overlap**: 1-D LoDTensor with shape ``[P]``. P is the number of output ``rois``. The maximum overlap between each sampled RoI and ground-truth.
2769

B
Bai Yifan 已提交
2770 2771 2772
    Examples:
        .. code-block:: python

2773
            import paddle
B
Bai Yifan 已提交
2774
            import paddle.fluid as fluid
2775
            paddle.enable_static()
2776
            rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
2777 2778
            gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='int32')
            is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='int32')
2779 2780
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
2781
            rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
B
Bai Yifan 已提交
2782 2783 2784
                           rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
                           class_nums=10)

2785 2786 2787 2788
    """

    helper = LayerHelper('generate_proposal_labels', **locals())

2789 2790 2791 2792 2793 2794 2795 2796 2797
    check_variable_and_dtype(
        rpn_rois, 'rpn_rois', ['float32', 'float64'], 'generate_proposal_labels'
    )
    check_variable_and_dtype(
        gt_classes, 'gt_classes', ['int32'], 'generate_proposal_labels'
    )
    check_variable_and_dtype(
        is_crowd, 'is_crowd', ['int32'], 'generate_proposal_labels'
    )
2798
    if is_cascade_rcnn:
2799 2800 2801
        assert (
            max_overlap is not None
        ), "Input max_overlap of generate_proposal_labels should not be None if is_cascade_rcnn is True"
2802

X
Xin Pan 已提交
2803 2804
    rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
    labels_int32 = helper.create_variable_for_type_inference(
2805 2806
        dtype=gt_classes.dtype
    )
X
Xin Pan 已提交
2807
    bbox_targets = helper.create_variable_for_type_inference(
2808 2809
        dtype=rpn_rois.dtype
    )
X
Xin Pan 已提交
2810
    bbox_inside_weights = helper.create_variable_for_type_inference(
2811 2812
        dtype=rpn_rois.dtype
    )
X
Xin Pan 已提交
2813
    bbox_outside_weights = helper.create_variable_for_type_inference(
2814 2815
        dtype=rpn_rois.dtype
    )
2816
    max_overlap_with_gt = helper.create_variable_for_type_inference(
2817 2818
        dtype=rpn_rois.dtype
    )
2819

2820 2821 2822 2823 2824 2825 2826 2827 2828
    inputs = {
        'RpnRois': rpn_rois,
        'GtClasses': gt_classes,
        'IsCrowd': is_crowd,
        'GtBoxes': gt_boxes,
        'ImInfo': im_info,
    }
    if max_overlap is not None:
        inputs['MaxOverlap'] = max_overlap
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
    helper.append_op(
        type="generate_proposal_labels",
        inputs=inputs,
        outputs={
            'Rois': rois,
            'LabelsInt32': labels_int32,
            'BboxTargets': bbox_targets,
            'BboxInsideWeights': bbox_inside_weights,
            'BboxOutsideWeights': bbox_outside_weights,
            'MaxOverlapWithGT': max_overlap_with_gt,
        },
        attrs={
            'batch_size_per_im': batch_size_per_im,
            'fg_fraction': fg_fraction,
            'fg_thresh': fg_thresh,
            'bg_thresh_hi': bg_thresh_hi,
            'bg_thresh_lo': bg_thresh_lo,
            'bbox_reg_weights': bbox_reg_weights,
            'class_nums': class_nums,
            'use_random': use_random,
            'is_cls_agnostic': is_cls_agnostic,
            'is_cascade_rcnn': is_cascade_rcnn,
        },
    )
2853 2854 2855 2856 2857 2858

    rois.stop_gradient = True
    labels_int32.stop_gradient = True
    bbox_targets.stop_gradient = True
    bbox_inside_weights.stop_gradient = True
    bbox_outside_weights.stop_gradient = True
2859
    max_overlap_with_gt.stop_gradient = True
2860

2861
    if return_max_overlap:
2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
        return (
            rois,
            labels_int32,
            bbox_targets,
            bbox_inside_weights,
            bbox_outside_weights,
            max_overlap_with_gt,
        )
    return (
        rois,
        labels_int32,
        bbox_targets,
        bbox_inside_weights,
        bbox_outside_weights,
    )
2877 2878


2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
def generate_mask_labels(
    im_info,
    gt_classes,
    is_crowd,
    gt_segms,
    rois,
    labels_int32,
    num_classes,
    resolution,
):
2889
    r"""
S
swtkiwi 已提交
2890

Q
qingqing01 已提交
2891
    **Generate Mask Labels for Mask-RCNN**
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919

    This operator can be, for given the RoIs and corresponding labels,
    to sample foreground RoIs. This mask branch also has
    a :math: `K \\times M^{2}` dimensional output targets for each foreground
    RoI, which encodes K binary masks of resolution M x M, one for each of the
    K classes. This mask targets are used to compute loss of mask branch.

    Please note, the data format of groud-truth segmentation, assumed the
    segmentations are as follows. The first instance has two gt objects.
    The second instance has one gt object, this object has two gt segmentations.

        .. code-block:: python

            #[
            #  [[[229.14, 370.9, 229.14, 370.9, ...]],
            #   [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
            #  [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
            #]

            batch_masks = []
            for semgs in batch_semgs:
                gt_masks = []
                for semg in semgs:
                    gt_segm = []
                    for polys in semg:
                        gt_segm.append(np.array(polys).reshape(-1, 2))
                    gt_masks.append(gt_segm)
                batch_masks.append(gt_masks)
2920 2921


2922 2923 2924 2925 2926
            place = fluid.CPUPlace()
            feeder = fluid.DataFeeder(place=place, feed_list=feeds)
            feeder.feed(batch_masks)

    Args:
Q
qingqing01 已提交
2927 2928 2929 2930 2931 2932
        im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
            data type. N is the batch size, each element is
            [height, width, scale] of image. Image scale is
            target_size / original_size, target_size is the size after resize,
            original_size is the original image size.
        gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
T
tianshuo78520a 已提交
2933
            should be int. M is the total number of ground-truth, each
Q
qingqing01 已提交
2934 2935 2936 2937 2938 2939 2940
            element is a class label.
        is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
            as gt_classes, each element is a flag indicating whether a
            groundtruth is crowd.
        gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
            float32 data type, it's LoD level is 3.
            Usually users do not needs to understand LoD,
2941
            The users should return correct data format in reader.
Q
qingqing01 已提交
2942
            The LoD[0] represents the ground-truth objects number of
2943 2944 2945 2946
            each instance. LoD[1] represents the segmentation counts of each
            objects. LoD[2] represents the polygons number of each segmentation.
            S the total number of polygons coordinate points. Each element is
            (x, y) coordinate points.
Q
qingqing01 已提交
2947 2948 2949 2950
        rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
            float32. R is the total number of RoIs, each element is a bounding
            box with (xmin, ymin, xmax, ymax) format in the range of original image.
        labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
T
tianshuo78520a 已提交
2951
            of int32. R is the same as it in `rois`. Each element represents
2952
            a class label of a RoI.
Q
qingqing01 已提交
2953 2954
        num_classes (int): Class number.
        resolution (int): Resolution of mask predictions.
2955 2956

    Returns:
Q
qingqing01 已提交
2957 2958 2959
        mask_rois (Variable):  A 2D LoDTensor with shape [P, 4] and same data
        type as `rois`. P is the total number of sampled RoIs. Each element
        is a bounding box with [xmin, ymin, xmax, ymax] format in range of
T
tianshuo78520a 已提交
2960
        original image size.
Q
qingqing01 已提交
2961 2962

        mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
T
tianshuo78520a 已提交
2963
        and int data type, each element represents the output mask RoI
Q
qingqing01 已提交
2964 2965 2966 2967
        index with regard to input RoIs.

        mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
        data type, K is the classes number and M is the resolution of mask
T
tianshuo78520a 已提交
2968
        predictions. Each element represents the binary mask targets.
2969 2970 2971 2972

    Examples:
        .. code-block:: python

2973 2974
          import paddle.fluid as fluid

Q
qingqing01 已提交
2975
          im_info = fluid.data(name="im_info", shape=[None, 3],
2976
              dtype="float32")
Q
qingqing01 已提交
2977
          gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
2978
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2979
          is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
2980
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2981
          gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
2982
              dtype="float32", lod_level=3)
2983
          # rois, roi_labels can be the output of
2984
          # fluid.layers.generate_proposal_labels.
Q
qingqing01 已提交
2985
          rois = fluid.data(name="rois", shape=[None, 4],
2986
              dtype="float32", lod_level=1)
Q
qingqing01 已提交
2987
          roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
2988
              dtype="int32", lod_level=1)
2989 2990 2991 2992 2993 2994
          mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
              im_info=im_info,
              gt_classes=gt_classes,
              is_crowd=is_crowd,
              gt_segms=gt_masks,
              rois=rois,
2995
              labels_int32=roi_labels,
2996 2997 2998 2999 3000 3001 3002 3003
              num_classes=81,
              resolution=14)
    """

    helper = LayerHelper('generate_mask_labels', **locals())

    mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
    roi_has_mask_int32 = helper.create_variable_for_type_inference(
3004 3005
        dtype=gt_classes.dtype
    )
3006
    mask_int32 = helper.create_variable_for_type_inference(
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
        dtype=gt_classes.dtype
    )

    helper.append_op(
        type="generate_mask_labels",
        inputs={
            'ImInfo': im_info,
            'GtClasses': gt_classes,
            'IsCrowd': is_crowd,
            'GtSegms': gt_segms,
            'Rois': rois,
            'LabelsInt32': labels_int32,
        },
        outputs={
            'MaskRois': mask_rois,
            'RoiHasMaskInt32': roi_has_mask_int32,
            'MaskInt32': mask_int32,
        },
        attrs={'num_classes': num_classes, 'resolution': resolution},
    )
3027 3028 3029 3030 3031 3032 3033 3034

    mask_rois.stop_gradient = True
    roi_has_mask_int32.stop_gradient = True
    mask_int32.stop_gradient = True

    return mask_rois, roi_has_mask_int32, mask_int32


3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
def generate_proposals(
    scores,
    bbox_deltas,
    im_info,
    anchors,
    variances,
    pre_nms_top_n=6000,
    post_nms_top_n=1000,
    nms_thresh=0.5,
    min_size=0.1,
    eta=1.0,
    return_rois_num=False,
    name=None,
):
3049
    """
S
swtkiwi 已提交
3050

H
haowang101779990 已提交
3051 3052
    **Generate proposal Faster-RCNN**

3053
    This operation proposes RoIs according to each box with their
3054
    probability to be a foreground object and
3055 3056
    the box can be calculated by anchors. Bbox_deltais and scores
    to be an object are the output of RPN. Final proposals
H
haowang101779990 已提交
3057 3058 3059 3060
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

3061 3062
    1. Transposes and resizes scores and bbox_deltas in size of
       (H*W*A, 1) and (H*W*A, 4)
3063
    2. Calculate box locations as proposals candidates.
H
haowang101779990 已提交
3064
    3. Clip boxes to image
3065
    4. Remove predicted boxes with small area.
H
haowang101779990 已提交
3066 3067 3068
    5. Apply NMS to get final proposals as output.

    Args:
3069 3070 3071
        scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
3072
            width of the feature map. The data type must be float32.
3073
        bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
T
tianshuo78520a 已提交
3074
            represents the difference between predicted box location and
3075
            anchor location. The data type must be float32.
3076
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
3077 3078
            image information for N batch. Height and width are the input sizes
            and scale is the ratio of network input size and original size.
3079
            The data type can be float32 or float64.
3080 3081 3082
        anchors(Variable):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
3083 3084
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
3085
            [H, W, num_priors, 4]. Each variance is in
3086
            (xcenter, ycenter, w, h) format. The data type must be float32.
3087
        pre_nms_top_n(float): Number of total bboxes to be kept per
3088
            image before NMS. The data type must be float32. `6000` by default.
3089
        post_nms_top_n(float): Number of total bboxes to be kept per
3090 3091
            image after NMS. The data type must be float32. `1000` by default.
        nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
3092
        min_size(float): Remove predicted boxes with either height or
3093 3094 3095
            width < min_size. The data type must be float32. `0.1` by default.
        eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration.
3096
        return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
F
FDInSky 已提交
3097
            num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
3098 3099 3100 3101 3102
            the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
            'False' by default.
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
3103

3104 3105 3106 3107 3108 3109
    Returns:
        tuple:
        A tuple with format ``(rpn_rois, rpn_roi_probs)``.

        - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
B
Bai Yifan 已提交
3110 3111 3112

    Examples:
        .. code-block:: python
3113

B
Bai Yifan 已提交
3114
            import paddle.fluid as fluid
3115 3116
            import paddle
            paddle.enable_static()
3117 3118 3119 3120 3121
            scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
            bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
            anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
            variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
B
Bai Yifan 已提交
3122 3123 3124
            rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
                         im_info, anchors, variances)

3125
    """
3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
    return paddle.vision.ops.generate_proposals(
        scores=scores,
        bbox_deltas=bbox_deltas,
        img_size=im_info[:2],
        anchors=anchors,
        variances=variances,
        pre_nms_top_n=pre_nms_top_n,
        post_nms_top_n=post_nms_top_n,
        nms_thresh=nms_thresh,
        min_size=min_size,
        eta=eta,
        return_rois_num=return_rois_num,
        name=name,
    )
J
jerrywgz 已提交
3140 3141


J
jerrywgz 已提交
3142
def box_clip(input, im_info, name=None):
J
jerrywgz 已提交
3143
    """
3144

J
jerrywgz 已提交
3145
    Clip the box into the size given by im_info
J
jerrywgz 已提交
3146
    For each input box, The formula is given as follows:
3147

3148 3149
    .. code-block:: text

J
jerrywgz 已提交
3150
        xmin = max(min(xmin, im_w - 1), 0)
3151
        ymin = max(min(ymin, im_h - 1), 0)
J
jerrywgz 已提交
3152 3153
        xmax = max(min(xmax, im_w - 1), 0)
        ymax = max(min(ymax, im_h - 1), 0)
3154

J
jerrywgz 已提交
3155
    where im_w and im_h are computed from im_info:
3156

J
jerrywgz 已提交
3157 3158 3159 3160
    .. code-block:: text

        im_h = round(height / scale)
        im_w = round(weight / scale)
J
jerrywgz 已提交
3161 3162

    Args:
W
wangguanzhong 已提交
3163 3164
        input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
            the last dimension is 4 and data type is float32 or float64.
3165 3166
        im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
            (height, width, scale) representing the information of image.
3167
            Height and width are the input sizes and scale is the ratio of network input
W
wangguanzhong 已提交
3168
            size and original size. The data type is float32 or float64.
3169 3170 3171 3172
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

J
jerrywgz 已提交
3173
    Returns:
W
wangguanzhong 已提交
3174 3175
        Variable:

3176
        output(Variable): The clipped tensor with data type float32 or float64.
W
wangguanzhong 已提交
3177 3178
        The shape is same as input.

3179

J
jerrywgz 已提交
3180 3181
    Examples:
        .. code-block:: python
3182

3183
            import paddle.fluid as fluid
3184 3185
            import paddle
            paddle.enable_static()
3186 3187 3188
            boxes = fluid.data(
                name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
            im_info = fluid.data(name='im_info', shape=[-1 ,3])
J
jerrywgz 已提交
3189
            out = fluid.layers.box_clip(
J
jerrywgz 已提交
3190
                input=boxes, im_info=im_info)
J
jerrywgz 已提交
3191 3192
    """

3193
    check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
3194 3195 3196
    check_variable_and_dtype(
        im_info, 'im_info', ['float32', 'float64'], 'box_clip'
    )
3197

J
jerrywgz 已提交
3198
    helper = LayerHelper("box_clip", **locals())
J
jerrywgz 已提交
3199
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
3200
    inputs = {"Input": input, "ImInfo": im_info}
J
jerrywgz 已提交
3201
    helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
J
jerrywgz 已提交
3202

3203 3204
    return output

J
jerrywgz 已提交
3205

3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216
def retinanet_detection_output(
    bboxes,
    scores,
    anchors,
    im_info,
    score_threshold=0.05,
    nms_top_k=1000,
    keep_top_k=100,
    nms_threshold=0.3,
    nms_eta=1.0,
):
3217
    """
3218
    **Detection Output Layer for the detector RetinaNet.**
3219

3220
    In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
3221 3222 3223
    `FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
    and location predictions, this OP is to get the detection results by
    performing following steps:
3224

3225 3226 3227
    1. For each FPN level, decode box predictions according to the anchor
       boxes from at most :attr:`nms_top_k` top-scoring predictions after
       thresholding detector confidence at :attr:`score_threshold`.
3228
    2. Merge top predictions from all levels and apply multi-class non
3229 3230 3231
       maximum suppression (NMS) on them to get the final detections.

    Args:
3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248
        bboxes(List): A list of Tensors from multiple FPN levels represents
            the location prediction for all anchor boxes. Each element is
            a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
            batch size, :math:`Mi` is the number of bounding boxes from
            :math:`i`-th FPN level and each bounding box has four coordinate
            values and the layout is [xmin, ymin, xmax, ymax]. The data type
            of each element is float32 or float64.
        scores(List): A list of Tensors from multiple FPN levels represents
            the category prediction for all anchor boxes. Each element is a
            3-D Tensor with shape :math:`[N, Mi, C]`,  :math:`N` is the batch
            size, :math:`C` is the class number (**excluding background**),
            :math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
            level. The data type of each element is float32 or float64.
        anchors(List): A list of Tensors from multiple FPN levels represents
            the locations of all anchor boxes. Each element is a 2-D Tensor
            with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
            boxes from :math:`i`-th FPN level, and each bounding box has four
3249
            coordinate values and the layout is [xmin, ymin, xmax, ymax].
3250 3251 3252
            The data type of each element is float32 or float64.
        im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
            information of input images. :math:`N` is the batch size, the size
T
tianshuo78520a 已提交
3253
            information of each image is a 3-vector which are the height and width
3254 3255
            of the network input along with the factor scaling the origin image to
            the network input. The data type of :attr:`im_info` is float32.
3256
        score_threshold(float): Threshold to filter out bounding boxes
3257
            with a confidence score before NMS, default value is set to 0.05.
3258
        nms_top_k(int): Maximum number of detections per FPN layer to be
3259 3260
            kept according to the confidences before NMS, default value is set to
            1000.
3261
        keep_top_k(int): Number of total bounding boxes to be kept per image after
3262 3263
            NMS step. Default value is set to 100, -1 means keeping all bounding
            boxes after NMS step.
3264
        nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
            filter out boxes in NMS.
        nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
            Default value is set to 1., which represents the value of
            :attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
            to be lower than 1. and the value of :attr:`nms_threshold` is set to
            be higher than 0.5, everytime a bounding box is filtered out,
            the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
            = :attr:`nms_threshold` * :attr:`nms_eta`  will not be stopped until
            the actual value of :attr:`nms_threshold` is lower than or equal to
            0.5.

    **Notice**: In some cases where the image sizes are very small, it's possible
    that there is no detection if :attr:`score_threshold` are used at all
    levels. Hence, this OP do not filter out anchors from the highest FPN level
    before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
T
tianshuo78520a 已提交
3280
    :attr:`anchors` is required to be from the highest FPN level.
3281 3282

    Returns:
3283 3284
        Variable(The data type is float32 or float64):
            The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
3285
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
3286 3287 3288
            :math:`No` is the total number of detections in this mini-batch.
            The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
            results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
3289 3290 3291 3292 3293 3294
            has no detected results. If all images have no detected results,
            LoD will be set to 0, and the output tensor is empty (None).

    Examples:
        .. code-block:: python

3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
           import paddle.fluid as fluid

           bboxes_low = fluid.data(
               name='bboxes_low', shape=[1, 44, 4], dtype='float32')
           bboxes_high = fluid.data(
               name='bboxes_high', shape=[1, 11, 4], dtype='float32')
           scores_low = fluid.data(
               name='scores_low', shape=[1, 44, 10], dtype='float32')
           scores_high = fluid.data(
               name='scores_high', shape=[1, 11, 10], dtype='float32')
           anchors_low = fluid.data(
               name='anchors_low', shape=[44, 4], dtype='float32')
           anchors_high = fluid.data(
               name='anchors_high', shape=[11, 4], dtype='float32')
           im_info = fluid.data(
               name="im_info", shape=[1, 3], dtype='float32')
           nmsed_outs = fluid.layers.retinanet_detection_output(
3312 3313 3314 3315 3316 3317 3318 3319 3320
               bboxes=[bboxes_low, bboxes_high],
               scores=[scores_low, scores_high],
               anchors=[anchors_low, anchors_high],
               im_info=im_info,
               score_threshold=0.05,
               nms_top_k=1000,
               keep_top_k=100,
               nms_threshold=0.45,
               nms_eta=1.0)
3321 3322
    """

3323 3324
    check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
    for i, bbox in enumerate(bboxes):
3325 3326 3327 3328 3329 3330
        check_variable_and_dtype(
            bbox,
            'bbox{}'.format(i),
            ['float32', 'float64'],
            'retinanet_detection_output',
        )
3331 3332
    check_type(scores, 'scores', (list), 'retinanet_detection_output')
    for i, score in enumerate(scores):
3333 3334 3335 3336 3337 3338
        check_variable_and_dtype(
            score,
            'score{}'.format(i),
            ['float32', 'float64'],
            'retinanet_detection_output',
        )
3339 3340
    check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
    for i, anchor in enumerate(anchors):
3341 3342 3343 3344 3345 3346 3347 3348 3349
        check_variable_and_dtype(
            anchor,
            'anchor{}'.format(i),
            ['float32', 'float64'],
            'retinanet_detection_output',
        )
    check_variable_and_dtype(
        im_info, 'im_info', ['float32', 'float64'], 'retinanet_detection_output'
    )
3350

3351 3352
    helper = LayerHelper('retinanet_detection_output', **locals())
    output = helper.create_variable_for_type_inference(
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
        dtype=helper.input_dtype('scores')
    )
    helper.append_op(
        type="retinanet_detection_output",
        inputs={
            'BBoxes': bboxes,
            'Scores': scores,
            'Anchors': anchors,
            'ImInfo': im_info,
        },
        attrs={
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'keep_top_k': keep_top_k,
            'nms_eta': 1.0,
        },
        outputs={'Out': output},
    )
3372 3373 3374 3375
    output.stop_gradient = True
    return output


3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387
def multiclass_nms(
    bboxes,
    scores,
    score_threshold,
    nms_top_k,
    keep_top_k,
    nms_threshold=0.3,
    normalized=True,
    nms_eta=1.0,
    background_label=0,
    name=None,
):
J
jerrywgz 已提交
3388
    """
S
swtkiwi 已提交
3389

3390
    **Multiclass NMS**
3391

3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417
    See below for an example:

    .. code-block:: text

        if:
            box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
            box1.scores = (0.7, 0.2, 0.4)  which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)

            box2.data = (3.0, 4.0, 8.0, 5.0)
            box2.score = (0.3, 0.3, 0.1)

            nms_threshold = 0.3
            background_label = 0
            score_threshold = 0
3418

3419 3420 3421

        Then:
            iou = 4/11 > 0.3
3422
            out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
3423
                         [2, 0.4, 2.0, 3.0, 7.0, 5.0]]
3424

3425
            Out format is (label, confidence, xmin, ymin, xmax, ymax)
3426 3427 3428 3429 3430 3431
    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
3432
                           coordinate values and the layout is
3433
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
X
xiaoting 已提交
3434
                           The data type is float32 or float64.
3435
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
3436 3437
                           M is the number of bounding boxes, C is the
                           class number. The data type is float32 or float64.
3438 3439 3440
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
3441 3442
                           N is the batch size, C is the class number, M is
                           number of bounding boxes. For each category there
3443 3444
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
3445
                           of BBoxes.The data type is float32 or float64.
3446 3447 3448
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
3449 3450
                           case with shape [M, C, 4].The data type is float32 or float64.
        background_label (int): The index of background label, the background
3451 3452 3453
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
3454
                                 low confidence score. If not provided,
3455 3456
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3457
                         the confidences after the filtering detections based
3458 3459 3460 3461 3462 3463 3464 3465 3466
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
X
xiaoting 已提交
3467
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
3468 3469
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
3470 3471
             Each row has 10 values:
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
3472
             total number of detections. If there is no detected boxes for all
J
jerrywgz 已提交
3473 3474
             images, lod will be set to {1} and Out only contains one value
             which is -1.
3475 3476
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1})
3477

3478

3479 3480 3481
    Examples:
        .. code-block:: python

3482

3483
            import paddle.fluid as fluid
3484 3485
            import paddle
            paddle.enable_static()
X
xiaoting 已提交
3486
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
3487
                                      dtype='float32', lod_level=1)
X
xiaoting 已提交
3488
            scores = fluid.data(name='scores', shape=[None,81],
3489 3490 3491 3492 3493 3494 3495 3496 3497
                                      dtype='float32', lod_level=1)
            out = fluid.layers.multiclass_nms(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
J
jerrywgz 已提交
3498
    """
3499 3500 3501 3502 3503 3504
    check_variable_and_dtype(
        bboxes, 'BBoxes', ['float32', 'float64'], 'multiclass_nms'
    )
    check_variable_and_dtype(
        scores, 'Scores', ['float32', 'float64'], 'multiclass_nms'
    )
X
xiaoting 已提交
3505 3506 3507 3508 3509 3510 3511 3512
    check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
    check_type(normalized, 'normalized', bool, 'multiclass_nms')
    check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
    check_type(background_label, 'background_label', int, 'multiclass_nms')

J
jerrywgz 已提交
3513 3514
    helper = LayerHelper('multiclass_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
    helper.append_op(
        type="multiclass_nms",
        inputs={'BBoxes': bboxes, 'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'normalized': normalized,
        },
        outputs={'Out': output},
    )
J
jerrywgz 已提交
3529
    output.stop_gradient = True
J
jerrywgz 已提交
3530 3531

    return output
3532 3533


3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545
def locality_aware_nms(
    bboxes,
    scores,
    score_threshold,
    nms_top_k,
    keep_top_k,
    nms_threshold=0.3,
    normalized=True,
    nms_eta=1.0,
    background_label=-1,
    name=None,
):
3546 3547
    """
    **Local Aware NMS**
3548

3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583
    `Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
    suppression (LANMS) on boxes and scores.

    Firstly, this operator merge box and score according their IOU
    (intersection over union). In the NMS step, this operator greedily selects a
    subset of detection bounding boxes that have high scores larger than score_threshold,
    if providing this threshold, then selects the largest nms_top_k confidences scores
    if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
    IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
    of nms_threshold and nms_eta.

    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
                           represents the predicted locations of M bounding
                           bboxes, N is the batch size. Each bounding box
                           has four coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
                           predicted confidence predictions. N is the batch
                           size, C is the class number, M is number of bounding
                           boxes. Now only support 1 class. For each category
                           there are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension of
                           BBoxes. The data type is float32 or float64.
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: -1
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided,
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
T
tianshuo78520a 已提交
3584
                         the confidences after the filtering detections based
3585 3586 3587
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
3588 3589
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
                          Default: None.

    Returns:
        Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values:
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
             total number of detections. If there is no detected boxes for all
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1}). The data type is float32 or float64.


    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
                                      dtype='float32')
            scores = fluid.data(name='scores', shape=[None, 1, 81],
                                      dtype='float32')
            out = fluid.layers.locality_aware_nms(bboxes=boxes,
                                              scores=scores,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
    """
3624 3625 3626 3627 3628 3629
    check_variable_and_dtype(
        bboxes, 'bboxes', ['float32', 'float64'], 'locality_aware_nms'
    )
    check_variable_and_dtype(
        scores, 'scores', ['float32', 'float64'], 'locality_aware_nms'
    )
3630 3631 3632 3633 3634 3635 3636 3637
    check_type(background_label, 'background_label', int, 'locality_aware_nms')
    check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
    check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
    check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
    check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
    check_type(normalized, 'normalized', bool, 'locality_aware_nms')

3638 3639
    shape = scores.shape
    assert len(shape) == 3, "dim size of scores must be 3"
3640 3641 3642
    assert (
        shape[1] == 1
    ), "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
3643 3644 3645 3646 3647 3648

    helper = LayerHelper('locality_aware_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    out = {'Out': output}

3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
    helper.append_op(
        type="locality_aware_nms",
        inputs={'BBoxes': bboxes, 'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized,
        },
        outputs={'Out': output},
    )
3664 3665 3666 3667 3668
    output.stop_gradient = True

    return output


3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
def matrix_nms(
    bboxes,
    scores,
    score_threshold,
    post_threshold,
    nms_top_k,
    keep_top_k,
    use_gaussian=False,
    gaussian_sigma=2.0,
    background_label=0,
    normalized=True,
    return_index=False,
    name=None,
):
Y
Yang Zhang 已提交
3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
    """
    **Matrix NMS**

    This operator does matrix non maximum suppression (NMS).

    First selects a subset of candidate bounding boxes that have higher scores
    than score_threshold (if provided), then the top k candidate is selected if
    nms_top_k is larger than -1. Score of the remaining candidate are then
    decayed according to the Matrix NMS scheme.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Variable): A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is
                           number of bounding boxes. For each category there
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes. The data type is float32 or float64.
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score.
        post_threshold (float): Threshold to filter out bounding boxes with
                                low confidence score AFTER decaying.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences after the filtering detections based
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        use_gaussian (bool): Use Gaussian as the decay function. Default: False
        gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
        background_label (int): The index of background label, the background
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        normalized (bool): Whether detections are normalized. Default: True
        return_index(bool): Whether return selected index. Default: False
        name(str): Name of the matrix nms op. Default: None.

    Returns:
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, one Variable(Out) is returned.

        Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
             detection results.
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             (After version 1.3, when no boxes detected, the lod is changed
             from {0} to {1})

        Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
            selected indices, which are absolute values cross batches.

    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.data(name='bboxes', shape=[None,81, 4],
                                      dtype='float32', lod_level=1)
            scores = fluid.data(name='scores', shape=[None,81],
                                      dtype='float32', lod_level=1)
            out = fluid.layers.matrix_nms(bboxes=boxes,
                                          scores=scores,
                                          background_label=0,
                                          score_threshold=0.5,
                                          post_threshold=0.1,
                                          nms_top_k=400,
                                          keep_top_k=200,
                                          normalized=False)
    """
Z
zhiboniu 已提交
3758
    if in_dygraph_mode():
3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
        attrs = (
            score_threshold,
            nms_top_k,
            keep_top_k,
            post_threshold,
            use_gaussian,
            gaussian_sigma,
            background_label,
            normalized,
        )
Z
zhiboniu 已提交
3769

3770
        out, index = _C_ops.matrix_nms(bboxes, scores, *attrs)
Z
zhiboniu 已提交
3771 3772 3773 3774 3775
        if return_index:
            return out, index
        else:
            return out

3776 3777 3778 3779 3780 3781
    check_variable_and_dtype(
        bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms'
    )
    check_variable_and_dtype(
        scores, 'Scores', ['float32', 'float64'], 'matrix_nms'
    )
Y
Yang Zhang 已提交
3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793
    check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
    check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
    check_type(normalized, 'normalized', bool, 'matrix_nms')
    check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
    check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
    check_type(background_label, 'background_label', int, 'matrix_nms')

    helper = LayerHelper('matrix_nms', **locals())
    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    index = helper.create_variable_for_type_inference(dtype='int')
3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808
    helper.append_op(
        type="matrix_nms",
        inputs={'BBoxes': bboxes, 'Scores': scores},
        attrs={
            'score_threshold': score_threshold,
            'post_threshold': post_threshold,
            'nms_top_k': nms_top_k,
            'keep_top_k': keep_top_k,
            'use_gaussian': use_gaussian,
            'gaussian_sigma': gaussian_sigma,
            'background_label': background_label,
            'normalized': normalized,
        },
        outputs={'Out': output, 'Index': index},
    )
Y
Yang Zhang 已提交
3809 3810 3811 3812 3813 3814 3815 3816
    output.stop_gradient = True

    if return_index:
        return output, index
    else:
        return output


3817 3818 3819 3820 3821 3822 3823 3824 3825
def distribute_fpn_proposals(
    fpn_rois,
    min_level,
    max_level,
    refer_level,
    refer_scale,
    rois_num=None,
    name=None,
):
3826
    r"""
3827 3828 3829 3830 3831 3832

    **This op only takes LoDTensor as input.** In Feature Pyramid Networks
    (FPN) models, it is needed to distribute all proposals into different FPN
    level, with respect to scale of the proposals, the referring scale and the
    referring level. Besides, to restore the order of proposals, we return an
    array which indicates the original index of rois in current proposals.
W
wangguanzhong 已提交
3833
    To compute FPN level for each roi, the formula is given as follows:
3834

J
jerrywgz 已提交
3835
    .. math::
3836

J
jerrywgz 已提交
3837
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
3838

J
jerrywgz 已提交
3839 3840 3841
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)

    where BBoxArea is a function to compute the area of each roi.
3842 3843

    Args:
W
wangguanzhong 已提交
3844

3845
        fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
W
wangguanzhong 已提交
3846
            float32 or float64. The input fpn_rois.
3847
        min_level(int32): The lowest level of FPN layer where the proposals come
W
wangguanzhong 已提交
3848 3849 3850 3851 3852
            from.
        max_level(int32): The highest level of FPN layer where the proposals
            come from.
        refer_level(int32): The referring level of FPN layer with specified scale.
        refer_scale(int32): The referring scale of FPN layer with specified level.
3853
        rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
3854
            The shape is [B] and data type is int32. B is the number of images.
3855
            If it is not None then return a list of 1-D Tensor. Each element
3856 3857
            is the output RoIs' number of each image on the corresponding level
            and the shape is [B]. None by default.
3858 3859 3860
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
J
jerrywgz 已提交
3861

3862
    Returns:
W
wangguanzhong 已提交
3863 3864
        Tuple:

3865 3866
        multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
        and data type of float32 and float64. The length is
W
wangguanzhong 已提交
3867 3868
        max_level-min_level+1. The proposals in each FPN level.

3869
        restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
W
wangguanzhong 已提交
3870 3871 3872
        the number of total rois. The data type is int32. It is
        used to restore the order of fpn_rois.

3873 3874
        rois_num_per_level(List): A list of 1-D Tensor and each Tensor is
        the RoIs' number in each image on the corresponding level. The shape
3875 3876
        is [B] and data type of int32. B is the number of images

3877 3878 3879 3880

    Examples:
        .. code-block:: python

3881
            import paddle.fluid as fluid
3882 3883
            import paddle
            paddle.enable_static()
3884 3885
            fpn_rois = fluid.data(
                name='data', shape=[None, 4], dtype='float32', lod_level=1)
3886
            multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
3887 3888 3889
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
3890 3891 3892
                refer_level=4,
                refer_scale=224)
    """
3893 3894 3895 3896 3897 3898 3899 3900 3901
    return paddle.vision.ops.distribute_fpn_proposals(
        fpn_rois=fpn_rois,
        min_level=min_level,
        max_level=max_level,
        refer_level=refer_level,
        refer_scale=refer_scale,
        rois_num=rois_num,
        name=name,
    )
3902 3903


3904
@templatedoc()
3905 3906 3907
def box_decoder_and_assign(
    prior_box, prior_box_var, target_box, box_score, box_clip, name=None
):
3908
    """
3909

3910 3911 3912 3913 3914 3915
    ${comment}
    Args:
        prior_box(${prior_box_type}): ${prior_box_comment}
        prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
        target_box(${target_box_type}): ${target_box_comment}
        box_score(${box_score_type}): ${box_score_comment}
J
jerrywgz 已提交
3916
        box_clip(${box_clip_type}): ${box_clip_comment}
3917 3918 3919
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
W
wangguanzhong 已提交
3920

3921
    Returns:
W
wangguanzhong 已提交
3922
        Tuple:
J
jerrywgz 已提交
3923

W
wangguanzhong 已提交
3924 3925 3926
        decode_box(${decode_box_type}): ${decode_box_comment}

        output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
J
jerrywgz 已提交
3927 3928


3929 3930 3931
    Examples:
        .. code-block:: python

3932
            import paddle.fluid as fluid
3933 3934
            import paddle
            paddle.enable_static()
3935 3936 3937 3938 3939 3940 3941 3942
            pb = fluid.data(
                name='prior_box', shape=[None, 4], dtype='float32')
            pbv = fluid.data(
                name='prior_box_var', shape=[4], dtype='float32')
            loc = fluid.data(
                name='target_box', shape=[None, 4*81], dtype='float32')
            scores = fluid.data(
                name='scores', shape=[None, 81], dtype='float32')
J
jerrywgz 已提交
3943
            decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
J
jerrywgz 已提交
3944
                pb, pbv, loc, scores, 4.135)
3945 3946

    """
3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958
    check_variable_and_dtype(
        prior_box, 'prior_box', ['float32', 'float64'], 'box_decoder_and_assign'
    )
    check_variable_and_dtype(
        target_box,
        'target_box',
        ['float32', 'float64'],
        'box_decoder_and_assign',
    )
    check_variable_and_dtype(
        box_score, 'box_score', ['float32', 'float64'], 'box_decoder_and_assign'
    )
3959 3960
    helper = LayerHelper("box_decoder_and_assign", **locals())

J
jerrywgz 已提交
3961
    decoded_box = helper.create_variable_for_type_inference(
3962 3963
        dtype=prior_box.dtype
    )
3964
    output_assign_box = helper.create_variable_for_type_inference(
3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981
        dtype=prior_box.dtype
    )

    helper.append_op(
        type="box_decoder_and_assign",
        inputs={
            "PriorBox": prior_box,
            "PriorBoxVar": prior_box_var,
            "TargetBox": target_box,
            "BoxScore": box_score,
        },
        attrs={"box_clip": box_clip},
        outputs={
            "DecodeBox": decoded_box,
            "OutputAssignBox": output_assign_box,
        },
    )
J
jerrywgz 已提交
3982
    return decoded_box, output_assign_box
3983 3984


3985 3986 3987 3988 3989 3990 3991 3992 3993
def collect_fpn_proposals(
    multi_rois,
    multi_scores,
    min_level,
    max_level,
    post_nms_top_n,
    rois_num_per_level=None,
    name=None,
):
3994
    """
3995 3996 3997

    **This OP only supports LoDTensor as input**. Concat multi-level RoIs
    (Region of Interest) and select N RoIs with respect to multi_scores.
W
wangguanzhong 已提交
3998
    This operation performs the following steps:
3999 4000 4001 4002 4003 4004 4005 4006

    1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
    2. Concat multi-level RoIs and scores
    3. Sort scores and select post_nms_top_n scores
    4. Gather RoIs by selected indices from scores
    5. Re-sort RoIs by corresponding batch_id

    Args:
4007 4008
        multi_rois(list): List of RoIs to collect. Element in list is 2-D
            LoDTensor with shape [N, 4] and data type is float32 or float64,
W
wangguanzhong 已提交
4009
            N is the number of RoIs.
4010
        multi_scores(list): List of scores of RoIs to collect. Element in list
W
wangguanzhong 已提交
4011 4012
            is 2-D LoDTensor with shape [N, 1] and data type is float32 or
            float64, N is the number of RoIs.
4013 4014 4015
        min_level(int): The lowest level of FPN layer to collect
        max_level(int): The highest level of FPN layer to collect
        post_nms_top_n(int): The number of selected RoIs
4016 4017 4018 4019 4020
        rois_num_per_level(list, optional): The List of RoIs' numbers.
            Each element is 1-D Tensor which contains the RoIs' number of each
            image on each level and the shape is [B] and data type is
            int32, B is the number of images. If it is not None then return
            a 1-D Tensor contains the output RoIs' number of each image and
4021
            the shape is [B]. Default: None
4022 4023 4024
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
W
wangguanzhong 已提交
4025

4026
    Returns:
W
wangguanzhong 已提交
4027 4028
        Variable:

4029 4030
        fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
        float32 or float64. Selected RoIs.
W
wangguanzhong 已提交
4031

4032 4033 4034
        rois_num(Tensor): 1-D Tensor contains the RoIs's number of each
        image. The shape is [B] and data type is int32. B is the number of
        images.
4035 4036 4037

    Examples:
        .. code-block:: python
4038

4039
            import paddle.fluid as fluid
4040 4041
            import paddle
            paddle.enable_static()
4042 4043 4044
            multi_rois = []
            multi_scores = []
            for i in range(4):
4045 4046
                multi_rois.append(fluid.data(
                    name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
4047
            for i in range(4):
4048 4049
                multi_scores.append(fluid.data(
                    name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
4050 4051

            fpn_rois = fluid.layers.collect_fpn_proposals(
4052
                multi_rois=multi_rois,
4053
                multi_scores=multi_scores,
4054 4055
                min_level=2,
                max_level=5,
4056 4057
                post_nms_top_n=2000)
    """
4058 4059 4060 4061
    num_lvl = max_level - min_level + 1
    input_rois = multi_rois[:num_lvl]
    input_scores = multi_scores[:num_lvl]

J
Jiabin Yang 已提交
4062
    if _non_static_mode():
4063 4064 4065
        assert (
            rois_num_per_level is not None
        ), "rois_num_per_level should not be None in dygraph mode."
4066
        attrs = ('post_nms_topN', post_nms_top_n)
4067
        output_rois, rois_num = _legacy_C_ops.collect_fpn_proposals(
4068 4069
            input_rois, input_scores, rois_num_per_level, *attrs
        )
4070

4071 4072
    check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
    check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
4073 4074
    helper = LayerHelper('collect_fpn_proposals', **locals())
    dtype = helper.input_dtype('multi_rois')
4075 4076 4077
    check_dtype(
        dtype, 'multi_rois', ['float32', 'float64'], 'collect_fpn_proposals'
    )
4078 4079
    output_rois = helper.create_variable_for_type_inference(dtype)
    output_rois.stop_gradient = True
4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090

    inputs = {
        'MultiLevelRois': input_rois,
        'MultiLevelScores': input_scores,
    }
    outputs = {'FpnRois': output_rois}
    if rois_num_per_level is not None:
        inputs['MultiLevelRoIsNum'] = rois_num_per_level
        rois_num = helper.create_variable_for_type_inference(dtype='int32')
        rois_num.stop_gradient = True
        outputs['RoisNum'] = rois_num
4091 4092 4093 4094 4095 4096
    helper.append_op(
        type='collect_fpn_proposals',
        inputs=inputs,
        outputs=outputs,
        attrs={'post_nms_topN': post_nms_top_n},
    )
4097 4098
    if rois_num_per_level is not None:
        return output_rois, rois_num
4099
    return output_rois