detection.py 136.6 KB
Newer Older
1
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
7
#    http://www.apache.org/licenses/LICENSE-2.0
8 9 10 11 12 13 14 15 16 17
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""

18 19
from __future__ import print_function

20 21
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
22
from ..layer_helper import LayerHelper
D
dengkaipeng 已提交
23
from ..framework import Variable
24 25
from . import tensor
from . import nn
26
from . import ops
M
minqiyang 已提交
27
from ... import compat as cpt
C
chengduoZH 已提交
28
import math
M
minqiyang 已提交
29
import six
30
import numpy
31
from functools import reduce
32

C
chengduoZH 已提交
33
__all__ = [
34 35 36 37 38 39 40 41
    'prior_box',
    'density_prior_box',
    'multi_box_head',
    'bipartite_match',
    'target_assign',
    'detection_output',
    'ssd_loss',
    'rpn_target_assign',
42
    'retinanet_target_assign',
43
    'sigmoid_focal_loss',
44 45 46 47
    'anchor_generator',
    'roi_perspective_transform',
    'generate_proposal_labels',
    'generate_proposals',
48
    'generate_mask_labels',
49 50 51 52
    'iou_similarity',
    'box_coder',
    'polygon_box_transform',
    'yolov3_loss',
D
dengkaipeng 已提交
53
    'yolo_box',
54
    'box_clip',
J
jerrywgz 已提交
55
    'multiclass_nms',
56
    'multiclass_nms2',
57
    'retinanet_detection_output',
58
    'distribute_fpn_proposals',
59
    'box_decoder_and_assign',
60
    'collect_fpn_proposals',
C
chengduoZH 已提交
61
]
62 63


64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
def retinanet_target_assign(bbox_pred,
                            cls_logits,
                            anchor_box,
                            anchor_var,
                            gt_boxes,
                            gt_labels,
                            is_crowd,
                            im_info,
                            num_classes=1,
                            positive_overlap=0.5,
                            negative_overlap=0.4):
    """
    **Target Assign Layer for Retinanet .**

    This layer can be, for given the Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each anchor, these target labels are used for training
    retinanet. Every anchor is assigned with a length :attr:`num_classes`
    one-hot vector of classification targets, and a 4-vector of box regression
    targets. The assignment rules are as followed:
    
    1. Anchors are assigned to ground-truth boxes when: (i) it has the highest
    IoU overlap with a ground-truth box, or (ii) it has an IoU overlap higher
    than positive_overlap(0.5) with any ground-truth box.
    
    2. Anchors are assigned to background when its IoU ratio is lower than
    negative_overlap (0.4) for all ground-truth boxes.
    
    When an anchor is assigned with a ground-truth box which is the i-th category,
    the i-th entry in its C vector of targets is set to 1 and all other entries
    are set to 0. When an anchor is assigned with background, all entries are set
    to 0. Anchors that are not assigned do not contribute to the training
    objective. The regression targets are the encoded ground-truth boxes
    associated with the assigned anchors.
 
    Args:
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
        cls_logits(Variable): A 3-D Tensor with shape [N, M, C] represents the
            predicted confidence predictions. N is the batch size, C is the
            number of classes (excluding background), M is number of bounding boxes.
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
            coordinate of the anchor box.
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded 
            variances of anchors.
        gt_boxes(Variable): The ground-truth bounding boxes (bboxes) are a 2D
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
            bboxes of mini-batch input.
        gt_labels(variable): The ground-truth labels are a 2D LoDTensor with
            shape [Ng, 1], Ng is the total number of ground-truth labels of
            mini-batch input.
        is_crowd(Variable): A 1-D LoDTensor which indicates ground-truth is crowd.
        im_info(Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
            3 is the height, width and scale.
        num_classes(int32): The number of classes.
        positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
            example.
        negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
            examples.

    Returns:
        tuple:
               A tuple(predicted_scores, predicted_location, target_label,
               target_bbox, bbox_inside_weight, fg_num) is returned. The
               predicted_scores and predicted_location are the predicted result
               of the retinanet.The target_label and target_bbox are the ground
               truth, respectively. The predicted_location is a 2D Tensor with
               shape [F, 4], and the shape of target_bbox is same as the shape of
               the predicted_location, F is the number of the foreground
               anchors. The predicted_scores is a 2D Tensor with shape
               [F + B, C], and the shape of target_label is [F + B, 1], B is the
               number of the background anchors, the F and B is depends on the
               input of this operator. Bbox_inside_weight represents whether the
               predicted location is fake foreground or not and the shape is [F, 4].
               Fg_num is the foreground number (including fake foreground) which
               is needed by focal loss.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          bbox_pred = layers.data(name='bbox_pred', shape=[1, 100, 4],
                            append_batch_size=False, dtype='float32')
          cls_logits = layers.data(name='cls_logits', shape=[1, 100, 10],
                            append_batch_size=False, dtype='float32')
          anchor_box = layers.data(name='anchor_box', shape=[100, 4],
                            append_batch_size=False, dtype='float32')
          anchor_var = layers.data(name='anchor_var', shape=[100, 4],
                            append_batch_size=False, dtype='float32')
          gt_boxes = layers.data(name='gt_boxes', shape=[10, 4],
                            append_batch_size=False, dtype='float32')
          gt_labels = layers.data(name='gt_labels', shape=[10, 1],
                            append_batch_size=False, dtype='float32')
          is_crowd = fluid.layers.data(name='is_crowd', shape=[1],
                            append_batch_size=False, dtype='float32')
          im_info = fluid.layers.data(name='im_infoss', shape=[1, 3],
                            append_batch_size=False, dtype='float32')
          loc_pred, score_pred, loc_target, score_target, bbox_inside_weight, fg_num =
                fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
                anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)

    """

    helper = LayerHelper('retinanet_target_assign', **locals())
    # Assign target label to anchors
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    fg_num = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type="retinanet_target_assign",
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'GtLabels': gt_labels,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
            'TargetLabel': target_label,
            'TargetBBox': target_bbox,
            'BBoxInsideWeight': bbox_inside_weight,
            'ForegroundNumber': fg_num
        },
        attrs={
            'positive_overlap': positive_overlap,
            'negative_overlap': negative_overlap
        })

    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
    bbox_inside_weight.stop_gradient = True
    fg_num.stop_gradient = True

    cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)

    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num


222 223
def rpn_target_assign(bbox_pred,
                      cls_logits,
Y
Yuan Gao 已提交
224
                      anchor_box,
225
                      anchor_var,
226 227 228
                      gt_boxes,
                      is_crowd,
                      im_info,
Y
Yuan Gao 已提交
229
                      rpn_batch_size_per_im=256,
230 231
                      rpn_straddle_thresh=0.0,
                      rpn_fg_fraction=0.5,
Y
Yuan Gao 已提交
232
                      rpn_positive_overlap=0.7,
233 234
                      rpn_negative_overlap=0.3,
                      use_random=True):
Y
Yuan Gao 已提交
235
    """
H
haowang101779990 已提交
236
    **Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
Y
Yuan Gao 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253

    This layer can be, for given the  Intersection-over-Union (IoU) overlap
    between anchors and ground truth boxes, to assign classification and
    regression targets to each each anchor, these target labels are used for
    train RPN. The classification targets is a binary class label (of being
    an object or not). Following the paper of Faster-RCNN, the positive labels
    are two kinds of anchors: (i) the anchor/anchors with the highest IoU
    overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
    higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
    that a single ground-truth box may assign positive labels to multiple
    anchors. A non-positive anchor is when its IoU ratio is lower than
    rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
    neither positive nor negative do not contribute to the training objective.
    The regression targets are the encoded ground-truth boxes associated with
    the positive anchors.

    Args:
254
        bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
Y
Yuan Gao 已提交
255 256
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
257
            is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
258 259 260
        cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
            predicted confidence predictions. N is the batch size, 1 is the
            frontground and background sigmoid, M is number of bounding boxes.
261
            The data type can be float32 or float64.
Y
Yuan Gao 已提交
262 263 264 265 266
        anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
267
            coordinate of the anchor box. The data type can be float32 or float64.
268
        anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded 
269
            variances of anchors. The data type can be float32 or float64.
翟飞跃 已提交
270
        gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
Y
Yuan Gao 已提交
271
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
272
            bboxes of mini-batch input. The data type can be float32 or float64.
273
        is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
274
                             The data type must be int32.
275 276
        im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
        3 is the height, width and scale.
Y
Yuan Gao 已提交
277
        rpn_batch_size_per_im(int): Total number of RPN examples per image.
278
                                    The data type must be int32.
279
        rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
280
            by straddle_thresh pixels. The data type must be float32.
281
        rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
282
            foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
Y
Yuan Gao 已提交
283 284
        rpn_positive_overlap(float): Minimum overlap required between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a positive
285
            example. The data type must be float32.
Y
Yuan Gao 已提交
286 287
        rpn_negative_overlap(float): Maximum overlap allowed between an anchor
            and ground-truth box for the (anchor, gt box) pair to be a negative
288
            examples. The data type must be float32.
Y
Yuan Gao 已提交
289 290

    Returns:
M
minqiyang 已提交
291
        tuple:
292 293 294 295 296 297 298 299 300 301 302 303 304
        A tuple(predicted_scores, predicted_location, target_label,
        target_bbox, bbox_inside_weight) is returned. The predicted_scores 
        and predicted_location is the predicted result of the RPN.
        The target_label and target_bbox is the ground truth,
        respectively. The predicted_location is a 2D Tensor with shape
        [F, 4], and the shape of target_bbox is same as the shape of
        the predicted_location, F is the number of the foreground
        anchors. The predicted_scores is a 2D Tensor with shape
        [F + B, 1], and the shape of target_label is same as the shape
        of the predicted_scores, B is the number of the background
        anchors, the F and B is depends on the input of this operator.
        Bbox_inside_weight represents whether the predicted loc is fake_fg
        or not and the shape is [F, 4].
Y
Yuan Gao 已提交
305 306 307 308

    Examples:
        .. code-block:: python

B
Bai Yifan 已提交
309
            import paddle.fluid as fluid
310 311 312 313 314 315 316
            bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
            cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
            anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
            anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
            im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
317 318
            loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
                bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
H
haowang101779990 已提交
319

Y
Yuan Gao 已提交
320 321 322
    """

    helper = LayerHelper('rpn_target_assign', **locals())
323
    # Assign target label to anchors
J
jerrywgz 已提交
324 325 326 327 328 329 330
    loc_index = helper.create_variable_for_type_inference(dtype='int32')
    score_index = helper.create_variable_for_type_inference(dtype='int32')
    target_label = helper.create_variable_for_type_inference(dtype='int32')
    target_bbox = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
    bbox_inside_weight = helper.create_variable_for_type_inference(
        dtype=anchor_box.dtype)
Y
Yuan Gao 已提交
331 332
    helper.append_op(
        type="rpn_target_assign",
333 334 335 336 337 338
        inputs={
            'Anchor': anchor_box,
            'GtBoxes': gt_boxes,
            'IsCrowd': is_crowd,
            'ImInfo': im_info
        },
Y
Yuan Gao 已提交
339 340 341
        outputs={
            'LocationIndex': loc_index,
            'ScoreIndex': score_index,
342
            'TargetLabel': target_label,
J
jerrywgz 已提交
343
            'TargetBBox': target_bbox,
J
jerrywgz 已提交
344
            'BBoxInsideWeight': bbox_inside_weight
Y
Yuan Gao 已提交
345 346 347
        },
        attrs={
            'rpn_batch_size_per_im': rpn_batch_size_per_im,
348
            'rpn_straddle_thresh': rpn_straddle_thresh,
Y
Yuan Gao 已提交
349 350
            'rpn_positive_overlap': rpn_positive_overlap,
            'rpn_negative_overlap': rpn_negative_overlap,
351 352
            'rpn_fg_fraction': rpn_fg_fraction,
            'use_random': use_random
Y
Yuan Gao 已提交
353 354
        })

355 356 357 358
    loc_index.stop_gradient = True
    score_index.stop_gradient = True
    target_label.stop_gradient = True
    target_bbox.stop_gradient = True
J
jerrywgz 已提交
359
    bbox_inside_weight.stop_gradient = True
Y
Yuan Gao 已提交
360

361 362 363 364
    cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
    bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
    predicted_cls_logits = nn.gather(cls_logits, score_index)
    predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
365

J
jerrywgz 已提交
366
    return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
Y
Yuan Gao 已提交
367 368


369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
def sigmoid_focal_loss(x, label, fg_num, gamma=2, alpha=0.25):
    """
    **Sigmoid Focal Loss Operator.**

    Focal loss is used to address the foreground-background class imbalance existed
    on the training phase of one-stage detectors. This operator computes the sigmoid
    value for each element in the input tensor, after which focal loss is measured.
    
    The focal loss is given as followed:

    .. math::
        loss_j = (-label_j * alpha * {(1 - \\sigma(x_j))}^{gamma} * \\log(\\sigma(x_j)) -
        (1 - labels_j) * (1 - alpha) * {(\sigma(x_j)}^{ gamma} * \\log(1 - \\sigma(x_j)))
        / fg\_num, j = 1,...,K

    We know that
    
    .. math::
        \\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}

    Args:
        x(Variable): A 2-D tensor with shape [N, D], where N is the batch size and D is the number
            of classes (excluding background). This input is a tensor of logits computed by the
            previous operator.
        label(Variable): A 2-D tensor with shape [N, 1], which is the probabilistic labels.
        fg_num(Variable): A 1-D tensor with shape [1], which is the number of foreground.

        gamma(float): Hyper-parameter to balance the easy and hard examples. Default value is
            set to 2.0.
        alpha(float): Hyper-parameter to balance the positive and negative example. Default value
            is set to 0.25.

    Returns:
        out(Variable): A 2-D tensor with shape [N, D], which is the focal loss.

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

            input = fluid.layers.data(
                name='data', shape=[10,80], append_batch_size=False, dtype='float32')
            label = fluid.layers.data(
                name='label', shape=[10,1], append_batch_size=False, dtype='int32')
            fg_num = fluid.layers.data(
                name='fg_num', shape=[1], append_batch_size=False, dtype='int32')
            loss = fluid.layers.sigmoid_focal_loss(x=input,
                                                   label=label,
                                                   fg_num=fg_num,
                                                   gamma=2.,
                                                   alpha=0.25)
    """

    helper = LayerHelper("sigmoid_focal_loss", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(
        type="sigmoid_focal_loss",
        inputs={"X": x,
                "Label": label,
                "FgNum": fg_num},
        attrs={"gamma": gamma,
               'alpha': alpha},
        outputs={"Out": out})
    return out


Y
Yuan Gao 已提交
437 438
def detection_output(loc,
                     scores,
439 440 441 442 443 444 445
                     prior_box,
                     prior_box_var,
                     background_label=0,
                     nms_threshold=0.3,
                     nms_top_k=400,
                     keep_top_k=200,
                     score_threshold=0.01,
446 447
                     nms_eta=1.0,
                     return_index=False):
448
    """
449
    **Detection Output Layer for Single Shot Multibox Detector (SSD).**
450

451 452
    This operation is to get the detection results by performing following
    two steps:
C
caoying03 已提交
453

454 455 456 457 458 459
    1. Decode input bounding box predictions according to the prior boxes.
    2. Get the final detection results by applying multi-class non maximum
       suppression (NMS).

    Please note, this operation doesn't clip the final output bounding boxes
    to the image window.
460 461 462 463 464 465

    Args:
        loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
            predicted locations of M bounding bboxes. N is the batch size,
            and each bounding box has four coordinate values and the layout
            is [xmin, ymin, xmax, ymax].
Y
Yuan Gao 已提交
466 467 468 469
        scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
            predicted confidence predictions. N is the batch size, C is the
            class number, M is number of bounding boxes. For each category
            there are total M scores which corresponding M bounding boxes.
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
        prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
            each box is represented as [xmin, ymin, xmax, ymax],
            [xmin, ymin] is the left top coordinate of the anchor box,
            if the input is image feature map, they are close to the origin
            of the coordinate system. [xmax, ymax] is the right bottom
            coordinate of the anchor box.
        prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
            of variance.
        background_label(float): The index of background label,
            the background label will be ignored. If set to -1, then all
            categories will be considered.
        nms_threshold(float): The threshold to be used in NMS.
        nms_top_k(int): Maximum number of detections to be kept according
            to the confidences aftern the filtering detections based on
            score_threshold.
        keep_top_k(int): Number of total bboxes to be kept per image after
            NMS step. -1 means keeping all bboxes after NMS step.
        score_threshold(float): Threshold to filter out bounding boxes with
            low confidence score. If not provided, consider all boxes.
        nms_eta(float): The parameter for adaptive NMS.
490
        return_index(bool): Whether return selected index. Default: False
491 492

    Returns:
M
minqiyang 已提交
493

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, a tuple with one Variable(Out) is returned. 

        Out: The detection outputs is a LoDTensor with shape [No, 6]. Each row 
        has six values: [label, confidence, xmin, ymin, xmax, ymax]. `No` is 
        the total number of detections in this mini-batch. For each instance, 
        the offsets in first dimension are called LoD, the offset number is 
        N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]` 
        detected results, if it is 0, the i-th image has no detected results. 

        If all images have not detected results, LoD will be set to {1}, and 
        output tensor only contains one value, which is -1.
        (After version 1.3, when no boxes detected, the lod is changed
        from {0} to {1}.)       
 
        Index: Only return when return_index is True. A 2-D LoDTensor with 
        shape [No, 1] represents the selected index which type is Integer. 
        The index is the absolute value cross batches. No is the same number 
        as Out. If the index is used to gather other attribute such as age, 
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
        N is the batch size and M is the number of boxes.

516 517 518 519

    Examples:
        .. code-block:: python

520 521 522
            import paddle.fluid as fluid

            pb = fluid.layers.data(name='prior_box', shape=[10, 4],
523
                         append_batch_size=False, dtype='float32')
524
            pbv = fluid.layers.data(name='prior_box_var', shape=[10, 4],
525
                          append_batch_size=False, dtype='float32')
526
            loc = fluid.layers.data(name='target_box', shape=[2, 21, 4],
527
                          append_batch_size=False, dtype='float32')
528
            scores = fluid.layers.data(name='scores', shape=[2, 21, 10],
529
                          append_batch_size=False, dtype='float32')
530
            nmsed_outs, index = fluid.layers.detection_output(scores=scores,
531 532
                                       loc=loc,
                                       prior_box=pb,
533 534
                                       prior_box_var=pbv,
                                       return_index=True)
535 536
    """
    helper = LayerHelper("detection_output", **locals())
537 538 539 540 541
    decoded_box = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=loc,
        code_type='decode_center_size')
542
    scores = nn.softmax(input=scores)
Y
Yuan Gao 已提交
543
    scores = nn.transpose(scores, perm=[0, 2, 1])
544
    scores.stop_gradient = True
X
Xin Pan 已提交
545 546
    nmsed_outs = helper.create_variable_for_type_inference(
        dtype=decoded_box.dtype)
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
    if return_index:
        index = helper.create_variable_for_type_inference(dtype='int')
        helper.append_op(
            type="multiclass_nms2",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs,
                     'Index': index},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
        index.stop_gradient = True
    else:
        helper.append_op(
            type="multiclass_nms",
            inputs={'Scores': scores,
                    'BBoxes': decoded_box},
            outputs={'Out': nmsed_outs},
            attrs={
                'background_label': 0,
                'nms_threshold': nms_threshold,
                'nms_top_k': nms_top_k,
                'keep_top_k': keep_top_k,
                'score_threshold': score_threshold,
                'nms_eta': 1.0,
            })
578
    nmsed_outs.stop_gradient = True
579 580
    if return_index:
        return nmsed_outs, index
581
    return nmsed_outs
C
chengduoZH 已提交
582 583


X
Xin Pan 已提交
584 585 586 587 588 589 590 591 592 593 594
@templatedoc()
def iou_similarity(x, y, name=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        y(${y_type}): ${y_comment}

    Returns:
        out(${out_type}): ${out_comment}
595 596 597 598 599 600 601 602 603

    Examples:
        .. code-block:: python

            import paddle.fluid as fluid

            x = fluid.layers.data(name='x', shape=[4], dtype='float32')
            y = fluid.layers.data(name='y', shape=[4], dtype='float32')
            iou = fluid.layers.iou_similarity(x=x, y=y)
X
Xin Pan 已提交
604 605 606
    """
    helper = LayerHelper("iou_similarity", **locals())
    if name is None:
X
Xin Pan 已提交
607
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
    else:
        out = helper.create_variable(
            name=name, dtype=x.dtype, persistable=False)

    helper.append_op(
        type="iou_similarity",
        inputs={"X": x,
                "Y": y},
        attrs={},
        outputs={"Out": out})
    return out


@templatedoc()
def box_coder(prior_box,
              prior_box_var,
              target_box,
              code_type="encode_center_size",
              box_normalized=True,
627 628
              name=None,
              axis=0):
X
Xin Pan 已提交
629
    """
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
    **Box Coder Layer**

    Encode/Decode the target bounding box with the priorbox information.
    
    The Encoding schema described below:

    .. math::

        ox = (tx - px) / pw / pxv

        oy = (ty - py) / ph / pyv

        ow = \log(\abs(tw / pw)) / pwv 

        oh = \log(\abs(th / ph)) / phv 

    The Decoding schema described below:
    
    .. math::
  
        ox = (pw * pxv * tx * + px) - tw / 2

        oy = (ph * pyv * ty * + py) - th / 2

        ow = \exp(pwv * tw) * pw + tw / 2

        oh = \exp(phv * th) * ph + th / 2   

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, 
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote 
    the priorbox's (anchor) center coordinates, width and height. `pxv`, 
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`, 
    `ow`, `oh` denote the encoded/decoded coordinates, width and height. 

    During Box Decoding, two modes for broadcast are supported. Say target 
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or 
    [M, 4]. Then prior box will broadcast to target box along the 
    assigned axis. 
X
Xin Pan 已提交
668 669

    Args:
670
        prior_box(Variable): Box list prior_box is a 2-D Tensor with shape 
W
wangguanzhong 已提交
671 672 673 674 675 676 677 678 679 680
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the 
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system. 
            [xmax, ymax] is the right bottom coordinate of the anchor box.       
        prior_box_var(List|Variable|None): prior_box_var supports three types 
            of input. One is variable with shape [M, 4] which holds M group and 
            data type is float32 or float64. The second is list consist of 
            4 elements shared by all boxes and data type is float32 or float64. 
            Other is None and not involved in calculation. 
681
        target_box(Variable): This input can be a 2-D LoDTensor with shape 
W
wangguanzhong 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694
            [N, 4] when code_type is 'encode_center_size'. This input also can 
            be a 3-D Tensor with shape [N, M, 4] when code_type is 
            'decode_center_size'. Each box is represented as 
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64. 
            This tensor can contain LoD information to represent a batch of inputs. 
        code_type(str): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size` 
            by default.
        box_normalized(bool): Whether treat the priorbox as a noramlized box.
            Set true by default.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
695
        axis(int): Which axis in PriorBox to broadcast for box decode, 
W
wangguanzhong 已提交
696 697 698 699
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and 
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is 
            `decode_center_size`. Set 0 by default. 
X
Xin Pan 已提交
700 701

    Returns:
W
wangguanzhong 已提交
702 703
        Variable:

704
        output_box(Variable): When code_type is 'encode_center_size', the 
W
wangguanzhong 已提交
705 706 707 708
        output tensor of box_coder_op with shape [N, M, 4] representing the 
        result of N target boxes encoded with M Prior boxes and variances. 
        When code_type is 'decode_center_size', N represents the batch size 
        and M represents the number of deocded boxes.
709 710 711 712 713

    Examples:
 
        .. code-block:: python
 
714
            import paddle.fluid as fluid
W
wangguanzhong 已提交
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
            # For encode
            prior_box_encode = fluid.layers.data(name='prior_box_encode',
                                  shape=[512, 4],
                                  dtype='float32',
                                  append_batch_size=False)
            target_box_encode = fluid.layers.data(name='target_box_encode',
                                   shape=[81,4],
                                   dtype='float32',
                                   append_batch_size=False)
            output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_encode,
                                    code_type="encode_center_size")
            # For decode
            prior_box_decode = fluid.layers.data(name='prior_box_decode',
                                  shape=[512, 4],
                                  dtype='float32',
                                  append_batch_size=False)
            target_box_decode = fluid.layers.data(name='target_box_decode',
                                   shape=[512,81,4],
                                   dtype='float32',
                                   append_batch_size=False)
            output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
                                    prior_box_var=[0.1,0.1,0.2,0.2],
                                    target_box=target_box_decode,
                                    code_type="decode_center_size",
                                    box_normalized=False,
                                    axis=1)
X
Xin Pan 已提交
743 744 745 746
    """
    helper = LayerHelper("box_coder", **locals())

    if name is None:
X
Xin Pan 已提交
747 748
        output_box = helper.create_variable_for_type_inference(
            dtype=prior_box.dtype)
X
Xin Pan 已提交
749 750 751 752
    else:
        output_box = helper.create_variable(
            name=name, dtype=prior_box.dtype, persistable=False)

753 754 755 756 757 758 759 760 761 762 763 764
    inputs = {"PriorBox": prior_box, "TargetBox": target_box}
    attrs = {
        "code_type": code_type,
        "box_normalized": box_normalized,
        "axis": axis
    }
    if isinstance(prior_box_var, Variable):
        inputs['PriorBoxVar'] = prior_box_var
    elif isinstance(prior_box_var, list):
        attrs['variance'] = prior_box_var
    else:
        raise TypeError("Input variance of box_coder must be Variable or lisz")
X
Xin Pan 已提交
765 766
    helper.append_op(
        type="box_coder",
767 768
        inputs=inputs,
        attrs=attrs,
X
Xin Pan 已提交
769 770 771 772 773 774 775 776 777 778
        outputs={"OutputBox": output_box})
    return output_box


@templatedoc()
def polygon_box_transform(input, name=None):
    """
    ${comment}

    Args:
779 780 781 782
        input(Variable): The input with shape [batch_size, geometry_channels, height, width].
                         A Tensor with type float32, float64.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
X
Xin Pan 已提交
783 784

    Returns:
785
        Variable: The output with the same shape as input. A Tensor with type float32, float64.
B
Bai Yifan 已提交
786 787 788 789 790

    Examples:
        .. code-block:: python
            
            import paddle.fluid as fluid
B
Bai Yifan 已提交
791
            input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
B
Bai Yifan 已提交
792
            out = fluid.layers.polygon_box_transform(input)
X
Xin Pan 已提交
793 794 795
    """
    helper = LayerHelper("polygon_box_transform", **locals())
    if name is None:
X
Xin Pan 已提交
796
        output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
Xin Pan 已提交
797 798 799 800 801 802 803 804 805 806 807 808
    else:
        output = helper.create_variable(
            name=name, dtype=prior_box.input, persistable=False)

    helper.append_op(
        type="polygon_box_transform",
        inputs={"Input": input},
        attrs={},
        outputs={"Output": output})
    return output


D
dengkaipeng 已提交
809 810
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
811 812
                gt_box,
                gt_label,
D
dengkaipeng 已提交
813
                anchors,
814
                anchor_mask,
D
dengkaipeng 已提交
815 816
                class_num,
                ignore_thresh,
817
                downsample_ratio,
818
                gt_score=None,
D
dengkaipeng 已提交
819
                use_label_smooth=True,
D
dengkaipeng 已提交
820 821 822 823 824 825
                name=None):
    """
    ${comment}

    Args:
        x (Variable): ${x_comment}
826
        gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
827 828 829 830
                          in the third dimenstion, x, y, w, h should be stored. 
                          x,y is the center cordinate of boxes, w, h are the
                          width and height, x, y, w, h should be divided by 
                          input image height to scale to [0, 1].
D
dengkaipeng 已提交
831 832
                          N is the batch number and B is the max box number in 
                          an image.
833
        gt_label (Variable): class id of ground truth boxes, shoud be in shape
D
dengkaipeng 已提交
834
                            of [N, B].
D
dengkaipeng 已提交
835
        anchors (list|tuple): ${anchors_comment}
836
        anchor_mask (list|tuple): ${anchor_mask_comment}
D
dengkaipeng 已提交
837 838
        class_num (int): ${class_num_comment}
        ignore_thresh (float): ${ignore_thresh_comment}
839
        downsample_ratio (int): ${downsample_ratio_comment}
840
        name (string): the name of yolov3 loss. Default None.
841
        gt_score (Variable): mixup score of ground truth boxes, shoud be in shape
842
                            of [N, B]. Default None.
843
        use_label_smooth (bool): ${use_label_smooth_comment}
D
dengkaipeng 已提交
844 845

    Returns:
846
        Variable: A 1-D tensor with shape [N], the value of yolov3 loss
D
dengkaipeng 已提交
847 848 849

    Raises:
        TypeError: Input x of yolov3_loss must be Variable
D
dengkaipeng 已提交
850 851
        TypeError: Input gtbox of yolov3_loss must be Variable
        TypeError: Input gtlabel of yolov3_loss must be Variable
D
dengkaipeng 已提交
852
        TypeError: Input gtscore of yolov3_loss must be None or Variable
D
dengkaipeng 已提交
853 854 855
        TypeError: Attr anchors of yolov3_loss must be list or tuple
        TypeError: Attr class_num of yolov3_loss must be an integer
        TypeError: Attr ignore_thresh of yolov3_loss must be a float number
856
        TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
D
dengkaipeng 已提交
857 858

    Examples:
859 860
      .. code-block:: python

861
          import paddle.fluid as fluid
862
          x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32')
863 864 865
          gt_box = fluid.layers.data(name='gt_box', shape=[6, 4], dtype='float32')
          gt_label = fluid.layers.data(name='gt_label', shape=[6], dtype='int32')
          gt_score = fluid.layers.data(name='gt_score', shape=[6], dtype='float32')
866 867
          anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
          anchor_mask = [0, 1, 2]
868 869
          loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
                                          gt_score=gt_score, anchors=anchors, 
870 871
                                          anchor_mask=anchor_mask, class_num=80,
                                          ignore_thresh=0.7, downsample_ratio=32)
D
dengkaipeng 已提交
872 873 874 875 876
    """
    helper = LayerHelper('yolov3_loss', **locals())

    if not isinstance(x, Variable):
        raise TypeError("Input x of yolov3_loss must be Variable")
877
    if not isinstance(gt_box, Variable):
D
dengkaipeng 已提交
878
        raise TypeError("Input gtbox of yolov3_loss must be Variable")
879
    if not isinstance(gt_label, Variable):
D
dengkaipeng 已提交
880
        raise TypeError("Input gtlabel of yolov3_loss must be Variable")
881
    if gt_score is not None and not isinstance(gt_score, Variable):
882
        raise TypeError("Input gtscore of yolov3_loss must be Variable")
D
dengkaipeng 已提交
883 884
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
        raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
885 886
    if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
        raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
D
dengkaipeng 已提交
887 888 889 890 891
    if not isinstance(class_num, int):
        raise TypeError("Attr class_num of yolov3_loss must be an integer")
    if not isinstance(ignore_thresh, float):
        raise TypeError(
            "Attr ignore_thresh of yolov3_loss must be a float number")
892 893 894
    if not isinstance(use_label_smooth, bool):
        raise TypeError(
            "Attr use_label_smooth of yolov3_loss must be a bool value")
D
dengkaipeng 已提交
895 896 897 898 899 900 901

    if name is None:
        loss = helper.create_variable_for_type_inference(dtype=x.dtype)
    else:
        loss = helper.create_variable(
            name=name, dtype=x.dtype, persistable=False)

902 903 904
    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

905 906
    inputs = {
        "X": x,
907 908
        "GTBox": gt_box,
        "GTLabel": gt_label,
909
    }
910
    if gt_score:
911
        inputs["GTScore"] = gt_score
912

D
dengkaipeng 已提交
913 914
    attrs = {
        "anchors": anchors,
915
        "anchor_mask": anchor_mask,
D
dengkaipeng 已提交
916 917
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
918
        "downsample_ratio": downsample_ratio,
919
        "use_label_smooth": use_label_smooth,
D
dengkaipeng 已提交
920 921 922 923
    }

    helper.append_op(
        type='yolov3_loss',
924
        inputs=inputs,
925 926 927 928 929
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask
        },
D
dengkaipeng 已提交
930 931 932 933
        attrs=attrs)
    return loss


D
dengkaipeng 已提交
934
@templatedoc(op_type="yolo_box")
935 936 937 938 939 940 941
def yolo_box(x,
             img_size,
             anchors,
             class_num,
             conf_thresh,
             downsample_ratio,
             name=None):
D
dengkaipeng 已提交
942 943 944 945 946
    """
    ${comment}

    Args:
        x (Variable): ${x_comment}
947
        img_size (Variable): ${img_size_comment}
D
dengkaipeng 已提交
948 949 950 951
        anchors (list|tuple): ${anchors_comment}
        class_num (int): ${class_num_comment}
        conf_thresh (float): ${conf_thresh_comment}
        downsample_ratio (int): ${downsample_ratio_comment}
952
        name (string): the name of yolo box layer. Default None.
D
dengkaipeng 已提交
953 954

    Returns:
D
dengkaipeng 已提交
955
        Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
D
dengkaipeng 已提交
956 957
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification 
        scores of boxes.
D
dengkaipeng 已提交
958 959 960 961 962 963 964 965

    Raises:
        TypeError: Input x of yolov_box must be Variable
        TypeError: Attr anchors of yolo box must be list or tuple
        TypeError: Attr class_num of yolo box must be an integer
        TypeError: Attr conf_thresh of yolo box must be a float number

    Examples:
D
dengkaipeng 已提交
966

D
dengkaipeng 已提交
967 968
    .. code-block:: python

X
xiaoting 已提交
969
        import paddle.fluid as fluid
D
dengkaipeng 已提交
970
        x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32')
971
        img_size = fluid.layers.data(name='img_size',shape=[2],dtype='int64')
D
dengkaipeng 已提交
972
        anchors = [10, 13, 16, 30, 33, 23]
X
xiaoting 已提交
973
        boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors, 
D
dengkaipeng 已提交
974 975 976 977 978
                                        conf_thresh=0.01, downsample_ratio=32)
    """
    helper = LayerHelper('yolo_box', **locals())

    if not isinstance(x, Variable):
979 980 981
        raise TypeError("Input x of yolo_box must be Variable")
    if not isinstance(img_size, Variable):
        raise TypeError("Input img_size of yolo_box must be Variable")
D
dengkaipeng 已提交
982
    if not isinstance(anchors, list) and not isinstance(anchors, tuple):
983
        raise TypeError("Attr anchors of yolo_box must be list or tuple")
D
dengkaipeng 已提交
984
    if not isinstance(class_num, int):
985
        raise TypeError("Attr class_num of yolo_box must be an integer")
D
dengkaipeng 已提交
986
    if not isinstance(conf_thresh, float):
987
        raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
D
dengkaipeng 已提交
988 989 990 991 992 993 994

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
D
dengkaipeng 已提交
995
        "conf_thresh": conf_thresh,
D
dengkaipeng 已提交
996 997 998 999 1000
        "downsample_ratio": downsample_ratio,
    }

    helper.append_op(
        type='yolo_box',
1001 1002 1003 1004
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
D
dengkaipeng 已提交
1005 1006 1007 1008 1009 1010 1011 1012
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs)
    return boxes, scores


X
Xin Pan 已提交
1013
@templatedoc()
1014 1015
def detection_map(detect_res,
                  label,
1016 1017
                  class_num,
                  background_label=0,
1018 1019
                  overlap_threshold=0.3,
                  evaluate_difficult=True,
1020 1021 1022 1023
                  has_state=None,
                  input_states=None,
                  out_states=None,
                  ap_version='integral'):
X
Xin Pan 已提交
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
    """
    ${comment}

    Args:
        detect_res: ${detect_res_comment}
        label:  ${label_comment}
        class_num: ${class_num_comment}
        background_label: ${background_label_comment}
        overlap_threshold: ${overlap_threshold_comment}
        evaluate_difficult: ${evaluate_difficult_comment}
        has_state: ${has_state_comment}
        input_states: If not None, It contains 3 elements:
            1. pos_count ${pos_count_comment}.
            2. true_pos ${true_pos_comment}.
            3. false_pos ${false_pos_comment}.
        out_states: If not None, it contains 3 elements.
            1. accum_pos_count ${accum_pos_count_comment}.
            2. accum_true_pos ${accum_true_pos_comment}.
            3. accum_false_pos ${accum_false_pos_comment}.
        ap_version: ${ap_type_comment}

    Returns:
        ${map_comment}


    Examples:
          .. code-block:: python

1052
            import paddle.fluid as fluid
1053
            from fluid.layers import detection
X
Xin Pan 已提交
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
            detect_res = fluid.layers.data(
                name='detect_res',
                shape=[10, 6],
                append_batch_size=False,
                dtype='float32')
            label = fluid.layers.data(
                name='label',
                shape=[10, 6],
                append_batch_size=False,
                dtype='float32')

1065
            map_out = detection.detection_map(detect_res, label, 21)
X
Xin Pan 已提交
1066
    """
1067 1068
    helper = LayerHelper("detection_map", **locals())

1069
    def __create_var(type):
X
Xin Pan 已提交
1070
        return helper.create_variable_for_type_inference(dtype=type)
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082

    map_out = __create_var('float32')
    accum_pos_count_out = out_states[0] if out_states else __create_var('int32')
    accum_true_pos_out = out_states[1] if out_states else __create_var(
        'float32')
    accum_false_pos_out = out_states[2] if out_states else __create_var(
        'float32')

    pos_count = input_states[0] if input_states else None
    true_pos = input_states[1] if input_states else None
    false_pos = input_states[2] if input_states else None

1083 1084 1085 1086 1087
    helper.append_op(
        type="detection_map",
        inputs={
            'Label': label,
            'DetectRes': detect_res,
1088
            'HasState': has_state,
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
            'PosCount': pos_count,
            'TruePos': true_pos,
            'FalsePos': false_pos
        },
        outputs={
            'MAP': map_out,
            'AccumPosCount': accum_pos_count_out,
            'AccumTruePos': accum_true_pos_out,
            'AccumFalsePos': accum_false_pos_out
        },
        attrs={
            'overlap_threshold': overlap_threshold,
            'evaluate_difficult': evaluate_difficult,
1102 1103
            'ap_type': ap_version,
            'class_num': class_num,
1104
        })
1105
    return map_out
1106 1107


1108 1109 1110 1111
def bipartite_match(dist_matrix,
                    match_type=None,
                    dist_threshold=None,
                    name=None):
1112
    """
Y
yuyang18 已提交
1113 1114
    This operator implements a greedy bipartite matching algorithm, which is
    used to obtain the matching with the maximum distance based on the input
1115
    distance matrix. For input 2D matrix, the bipartite matching algorithm can
Y
yuyang18 已提交
1116 1117 1118 1119
    find the matched column for each row (matched means the largest distance),
    also can find the matched row for each column. And this operator only
    calculate matched indices from column to row. For each instance,
    the number of matched indices is the column number of the input distance
W
wangguanzhong 已提交
1120
    matrix. **The OP only supports CPU**.
Y
yuyang18 已提交
1121 1122 1123

    There are two outputs, matched indices and distance.
    A simple description, this algorithm matched the best (maximum distance)
1124 1125 1126
    row entity to the column entity and the matched indices are not duplicated
    in each row of ColToRowMatchIndices. If the column entity is not matched
    any row entity, set -1 in ColToRowMatchIndices.
C
chengduoZH 已提交
1127

Y
yuyang18 已提交
1128
    NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1129 1130 1131
    If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
    If Tensor, the height of ColToRowMatchIndices is 1.

Y
yuyang18 已提交
1132 1133 1134
    NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
    layer. Please consider to use :code:`ssd_loss` instead.

1135 1136
    Args:
        dist_matrix(Variable): This input is a 2-D LoDTensor with shape
W
wangguanzhong 已提交
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
            [K, M]. The data type is float32 or float64. It is pair-wise 
            distance matrix between the entities represented by each row and 
            each column. For example, assumed one entity is A with shape [K], 
            another entity is B with shape [M]. The dist_matrix[i][j] is the 
            distance between A[i] and B[j]. The bigger the distance is, the 
            better matching the pairs are. NOTE: This tensor can contain LoD 
            information to represent a batch of inputs. One instance of this 
            batch can contain different numbers of entities.
        match_type(str, optional): The type of matching method, should be
           'bipartite' or 'per_prediction'. None ('bipartite') by default.
        dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1148
            this threshold is to determine the extra matching bboxes based
Y
yuyang18 已提交
1149
            on the maximum distance, 0.5 by default.
W
wangguanzhong 已提交
1150 1151 1152 1153
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.
 
1154
    Returns:
W
wangguanzhong 已提交
1155
        Tuple:
Y
yuyang18 已提交
1156

W
wangguanzhong 已提交
1157 1158
        matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
        type is int32. N is the batch size. If match_indices[i][j] is -1, it
Y
yuyang18 已提交
1159 1160 1161 1162 1163
        means B[j] does not match any entity in i-th instance.
        Otherwise, it means B[j] is matched to row
        match_indices[i][j] in i-th instance. The row number of
        i-th instance is saved in match_indices[i][j].

W
wangguanzhong 已提交
1164 1165
        matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
        type is float32. N is batch size. If match_indices[i][j] is -1,
Y
yuyang18 已提交
1166 1167 1168 1169 1170 1171 1172
        match_distance[i][j] is also -1.0. Otherwise, assumed
        match_distance[i][j] = d, and the row offsets of each instance
        are called LoD. Then match_distance[i][j] =
        dist_matrix[d+LoD[i]][j].

    Examples:

1173
        >>> import paddle.fluid as fluid
Y
yuyang18 已提交
1174 1175 1176 1177
        >>> x = fluid.layers.data(name='x', shape=[4], dtype='float32')
        >>> y = fluid.layers.data(name='y', shape=[4], dtype='float32')
        >>> iou = fluid.layers.iou_similarity(x=x, y=y)
        >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
1178 1179
    """
    helper = LayerHelper('bipartite_match', **locals())
X
Xin Pan 已提交
1180 1181 1182
    match_indices = helper.create_variable_for_type_inference(dtype='int32')
    match_distance = helper.create_variable_for_type_inference(
        dtype=dist_matrix.dtype)
1183 1184 1185
    helper.append_op(
        type='bipartite_match',
        inputs={'DistMat': dist_matrix},
1186 1187 1188 1189
        attrs={
            'match_type': match_type,
            'dist_threshold': dist_threshold,
        },
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
        outputs={
            'ColToRowMatchIndices': match_indices,
            'ColToRowMatchDist': match_distance
        })
    return match_indices, match_distance


def target_assign(input,
                  matched_indices,
                  negative_indices=None,
                  mismatch_value=None,
                  name=None):
    """
    This operator can be, for given the target bounding boxes or labels,
    to assign classification and regression targets to each prediction as well as
    weights to prediction. The weights is used to specify which prediction would
    not contribute to training loss.
C
chengduoZH 已提交
1207

1208 1209 1210 1211 1212
    For each instance, the output `out` and`out_weight` are assigned based on
    `match_indices` and `negative_indices`.
    Assumed that the row offset for each instance in `input` is called lod,
    this operator assigns classification/regression targets by performing the
    following steps:
C
chengduoZH 已提交
1213

1214
    1. Assigning all outputs based on `match_indices`:
C
chengduoZH 已提交
1215

1216 1217 1218
    .. code-block:: text

        If id = match_indices[i][j] > 0,
C
chengduoZH 已提交
1219

1220 1221
            out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
            out_weight[i][j] = 1.
C
chengduoZH 已提交
1222

1223
        Otherwise,
C
chengduoZH 已提交
1224

1225 1226
            out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
            out_weight[i][j] = 0.
C
chengduoZH 已提交
1227

1228
    2. Assigning out_weight based on `neg_indices` if `neg_indices` is provided:
C
chengduoZH 已提交
1229

1230 1231
    Assumed that the row offset for each instance in `neg_indices` is called neg_lod,
    for i-th instance and each `id` of neg_indices in this instance:
M
minqiyang 已提交
1232

1233
    .. code-block:: text
C
chengduoZH 已提交
1234

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
        out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
        out_weight[i][id] = 1.0

    Args:
       inputs (Variable): This input is a 3D LoDTensor with shape [M, P, K].
       matched_indices (Variable): Tensor<int>), The input matched indices
           is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
           the j-th entity of column is not matched to any entity of row in
           i-th instance.
       negative_indices (Variable): The input negative example indices are
           an optional input with shape [Neg, 1] and int32 type, where Neg is
           the total number of negative example indices.
       mismatch_value (float32): Fill this value to the mismatched location.

    Returns:
M
minqiyang 已提交
1250 1251 1252 1253 1254
        tuple:
               A tuple(out, out_weight) is returned. out is a 3D Tensor with
               shape [N, P, K], N and P is the same as they are in
               `neg_indices`, K is the same as it in input of X. If
               `match_indices[i][j]`. out_weight is the weight for output with
1255 1256 1257 1258 1259 1260
               the shape of [N, P, 1].

    Examples:

        .. code-block:: python

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
            import paddle.fluid as fluid
            x = fluid.layers.data(
                name='x',
                shape=[4, 20, 4],
                dtype='float',
                lod_level=1,
                append_batch_size=False)
            matched_id = fluid.layers.data(
                name='indices',
                shape=[8, 20],
                dtype='int32',
                append_batch_size=False)
            trg, trg_weight = fluid.layers.target_assign(
                x,
                matched_id,
                mismatch_value=0)
1277 1278
    """
    helper = LayerHelper('target_assign', **locals())
X
Xin Pan 已提交
1279 1280
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    out_weight = helper.create_variable_for_type_inference(dtype='float32')
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
    helper.append_op(
        type='target_assign',
        inputs={
            'X': input,
            'MatchIndices': matched_indices,
            'NegIndices': negative_indices
        },
        outputs={'Out': out,
                 'OutWeight': out_weight},
        attrs={'mismatch_value': mismatch_value})
    return out, out_weight


def ssd_loss(location,
             confidence,
             gt_box,
             gt_label,
             prior_box,
             prior_box_var=None,
             background_label=0,
             overlap_threshold=0.5,
             neg_pos_ratio=3.0,
             neg_overlap=0.5,
             loc_loss_weight=1.0,
             conf_loss_weight=1.0,
             match_type='per_prediction',
             mining_type='max_negative',
1308
             normalize=True,
1309 1310
             sample_size=None):
    """
Y
yuyang18 已提交
1311
    **Multi-box loss layer for object detection algorithm of SSD**
1312

翟飞跃 已提交
1313 1314
    This layer is to compute detection loss for SSD given the location offset
    predictions, confidence predictions, prior boxes and ground-truth bounding
1315 1316 1317 1318
    boxes and labels, and the type of hard example mining. The returned loss
    is a weighted sum of the localization loss (or regression loss) and
    confidence loss (or classification loss) by performing the following steps:

Y
yuyang18 已提交
1319
    1. Find matched bounding box by bipartite matching algorithm.
Y
yuyang18 已提交
1320

1321
      1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
Y
yuyang18 已提交
1322

1323
      1.2 Compute matched boundding box by bipartite matching algorithm.
Y
yuyang18 已提交
1324

1325
    2. Compute confidence for mining hard examples
Y
yuyang18 已提交
1326

1327
      2.1. Get the target label based on matched indices.
Y
yuyang18 已提交
1328

1329
      2.2. Compute confidence loss.
Y
yuyang18 已提交
1330

1331 1332
    3. Apply hard example mining to get the negative example indices and update
       the matched indices.
Y
yuyang18 已提交
1333

1334
    4. Assign classification and regression targets
Y
yuyang18 已提交
1335

1336
      4.1. Encoded bbox according to the prior boxes.
Y
yuyang18 已提交
1337

1338
      4.2. Assign regression targets.
Y
yuyang18 已提交
1339

1340
      4.3. Assign classification targets.
Y
yuyang18 已提交
1341

1342
    5. Compute the overall objective loss.
Y
yuyang18 已提交
1343

1344
      5.1 Compute confidence loss.
Y
yuyang18 已提交
1345

1346
      5.2 Compute localization loss.
Y
yuyang18 已提交
1347

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
      5.3 Compute the overall weighted loss.

    Args:
        location (Variable): The location predictions are a 3D Tensor with
            shape [N, Np, 4], N is the batch size, Np is total number of
            predictions for each instance. 4 is the number of coordinate values,
            the layout is [xmin, ymin, xmax, ymax].
        confidence (Variable): The confidence predictions are a 3D Tensor
            with shape [N, Np, C], N and Np are the same as they are in
            `location`, C is the class number.
翟飞跃 已提交
1358
        gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
            LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
            bboxes of mini-batch input.
        gt_label (Variable): The ground-truth labels are a 2D LoDTensor
            with shape [Ng, 1].
        prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
        prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
            with shape [Np, 4].
        background_label (int): The index of background label, 0 by default.
        overlap_threshold (float): If match_type is 'per_prediction', use
            `overlap_threshold` to determine the extra matching bboxes when
             finding matched boxes. 0.5 by default.
        neg_pos_ratio (float): The ratio of the negative boxes to the positive
翟飞跃 已提交
1371
            boxes, used only when mining_type is 'max_negative', 3.0 by default.
1372
        neg_overlap (float): The negative overlap upper bound for the unmatched
1373
            predictions. Use only when mining_type is 'max_negative',
1374 1375 1376 1377
            0.5 by default.
        loc_loss_weight (float): Weight for localization loss, 1.0 by default.
        conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
        match_type (str): The type of matching method during training, should
翟飞跃 已提交
1378
            be 'bipartite' or 'per_prediction', 'per_prediction' by default.
1379 1380
        mining_type (str): The hard example mining type, should be 'hard_example'
            or 'max_negative', now only support `max_negative`.
1381
        normalize (bool): Whether to normalize the SSD loss by the total number
Y
yuyang18 已提交
1382
            of output locations, True by default.
1383 1384
        sample_size (int): The max sample size of negative box, used only when
            mining_type is 'hard_example'.
1385 1386

    Returns:
Y
yuyang18 已提交
1387 1388
        The weighted sum of the localization loss and confidence loss, with \
        shape [N * Np, 1], N and Np are the same as they are in `location`.
1389 1390

    Raises:
Y
yuyang18 已提交
1391 1392
        ValueError: If mining_type is 'hard_example', now only support mining \
        type of `max_negative`.
Y
yuyang18 已提交
1393 1394

    Examples:
1395
        >>> import paddle.fluid as fluid
Y
yuyang18 已提交
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
        >>> pb = fluid.layers.data(
        >>>                   name='prior_box',
        >>>                   shape=[10, 4],
        >>>                   append_batch_size=False,
        >>>                   dtype='float32')
        >>> pbv = fluid.layers.data(
        >>>                   name='prior_box_var',
        >>>                   shape=[10, 4],
        >>>                   append_batch_size=False,
        >>>                   dtype='float32')
        >>> loc = fluid.layers.data(name='target_box', shape=[10, 4], dtype='float32')
        >>> scores = fluid.layers.data(name='scores', shape=[10, 21], dtype='float32')
        >>> gt_box = fluid.layers.data(
        >>>         name='gt_box', shape=[4], lod_level=1, dtype='float32')
        >>> gt_label = fluid.layers.data(
        >>>         name='gt_label', shape=[1], lod_level=1, dtype='float32')
        >>> loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
1413 1414 1415 1416 1417 1418 1419
    """

    helper = LayerHelper('ssd_loss', **locals())
    if mining_type != 'max_negative':
        raise ValueError("Only support mining_type == max_negative now.")

    num, num_prior, num_class = confidence.shape
G
merge  
gongweibao 已提交
1420
    conf_shape = nn.shape(confidence)
1421 1422

    def __reshape_to_2d(var):
1423
        return nn.flatten(x=var, axis=2)
1424 1425 1426 1427 1428

    # 1. Find matched boundding box by prior box.
    #   1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
    iou = iou_similarity(x=gt_box, y=prior_box)
    #   1.2 Compute matched boundding box by bipartite matching algorithm.
1429 1430
    matched_indices, matched_dist = bipartite_match(iou, match_type,
                                                    overlap_threshold)
1431 1432 1433

    # 2. Compute confidence for mining hard examples
    # 2.1. Get the target label based on matched indices
1434 1435
    gt_label = nn.reshape(
        x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
1436
    gt_label.stop_gradient = True
1437 1438 1439 1440 1441 1442 1443
    target_label, _ = target_assign(
        gt_label, matched_indices, mismatch_value=background_label)
    # 2.2. Compute confidence loss.
    # Reshape confidence to 2D tensor.
    confidence = __reshape_to_2d(confidence)
    target_label = tensor.cast(x=target_label, dtype='int64')
    target_label = __reshape_to_2d(target_label)
1444
    target_label.stop_gradient = True
1445 1446
    conf_loss = nn.softmax_with_cross_entropy(confidence, target_label)
    # 3. Mining hard examples
G
merge  
gongweibao 已提交
1447
    actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
1448
    actual_shape.stop_gradient = True
1449 1450
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
1451
    conf_loss = nn.reshape(
1452
        x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
1453
    conf_loss.stop_gradient = True
X
Xin Pan 已提交
1454
    neg_indices = helper.create_variable_for_type_inference(dtype='int32')
1455
    dtype = matched_indices.dtype
X
Xin Pan 已提交
1456 1457
    updated_matched_indices = helper.create_variable_for_type_inference(
        dtype=dtype)
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
    helper.append_op(
        type='mine_hard_examples',
        inputs={
            'ClsLoss': conf_loss,
            'LocLoss': None,
            'MatchIndices': matched_indices,
            'MatchDist': matched_dist,
        },
        outputs={
            'NegIndices': neg_indices,
            'UpdatedMatchIndices': updated_matched_indices
        },
        attrs={
            'neg_pos_ratio': neg_pos_ratio,
B
Bai Yifan 已提交
1472
            'neg_dist_threshold': neg_overlap,
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
            'mining_type': mining_type,
            'sample_size': sample_size,
        })

    # 4. Assign classification and regression targets
    # 4.1. Encoded bbox according to the prior boxes.
    encoded_bbox = box_coder(
        prior_box=prior_box,
        prior_box_var=prior_box_var,
        target_box=gt_box,
        code_type='encode_center_size')
    # 4.2. Assign regression targets
    target_bbox, target_loc_weight = target_assign(
        encoded_bbox, updated_matched_indices, mismatch_value=background_label)
    # 4.3. Assign classification targets
    target_label, target_conf_weight = target_assign(
        gt_label,
        updated_matched_indices,
        negative_indices=neg_indices,
        mismatch_value=background_label)

    # 5. Compute loss.
    # 5.1 Compute confidence loss.
    target_label = __reshape_to_2d(target_label)
    target_label = tensor.cast(x=target_label, dtype='int64')
1498

1499 1500 1501 1502
    conf_loss = nn.softmax_with_cross_entropy(confidence, target_label)
    target_conf_weight = __reshape_to_2d(target_conf_weight)
    conf_loss = conf_loss * target_conf_weight

1503 1504 1505 1506
    # the target_label and target_conf_weight do not have gradient.
    target_label.stop_gradient = True
    target_conf_weight.stop_gradient = True

1507 1508 1509 1510 1511 1512 1513 1514
    # 5.2 Compute regression loss.
    location = __reshape_to_2d(location)
    target_bbox = __reshape_to_2d(target_bbox)

    loc_loss = nn.smooth_l1(location, target_bbox)
    target_loc_weight = __reshape_to_2d(target_loc_weight)
    loc_loss = loc_loss * target_loc_weight

1515 1516 1517 1518
    # the target_bbox and target_loc_weight do not have gradient.
    target_bbox.stop_gradient = True
    target_loc_weight.stop_gradient = True

1519 1520
    # 5.3 Compute overall weighted loss.
    loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
1521
    # reshape to [N, Np], N is the batch size and Np is the prior box number.
1522 1523 1524
    # shape=(-1, 0) is set for compile-time, the correct shape is set by
    # actual_shape in runtime.
    loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
1525 1526 1527 1528 1529
    loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
    if normalize:
        normalizer = nn.reduce_sum(target_loc_weight)
        loss = loss / normalizer

1530
    return loss
C
chengduoZH 已提交
1531 1532


1533 1534 1535 1536
def prior_box(input,
              image,
              min_sizes,
              max_sizes=None,
1537
              aspect_ratios=[1.],
1538 1539 1540 1541 1542
              variance=[0.1, 0.1, 0.2, 0.2],
              flip=False,
              clip=False,
              steps=[0.0, 0.0],
              offset=0.5,
1543 1544
              name=None,
              min_max_aspect_ratios_order=False):
1545
    """
Q
update  
qiaolongfei 已提交
1546
    **Prior Box Operator**
1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557

    Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

    Args:
       input(Variable): The Input Variables, the format is NCHW.
       image(Variable): The input image data of PriorBoxOp,
            the layout is NCHW.
1558
       min_sizes(list|tuple|float value): min sizes of generated prior boxes.
1559 1560
       max_sizes(list|tuple|None): max sizes of generated prior boxes.
            Default: None.
1561 1562
       aspect_ratios(list|tuple|float value): the aspect ratios of generated
            prior boxes. Default: [1.].
1563 1564 1565 1566
       variance(list|tuple): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1567
       step(list|tuple): Prior boxes step across width and height, If
1568
            step[0] == 0.0/step[1] == 0.0, the prior boxes step across
1569 1570
            height/weight of the input will be automatically calculated.
            Default: [0., 0.]
1571 1572
       offset(float): Prior boxes center offset. Default: 0.5
       name(str): Name of the prior box op. Default: None.
1573
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1574
            in order of [min, max, aspect_ratios], which is consistent with
1575 1576 1577
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
1578 1579

    Returns:
Q
update  
qiaolongfei 已提交
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
        tuple: A tuple with two Variable (boxes, variances)

        boxes: the output prior boxes of PriorBox.
        The layout is [H, W, num_priors, 4].
        H is the height of input, W is the width of input,
        num_priors is the total
        box count of each position of input.

        variances: the expanded variances of PriorBox.
        The layout is [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_priors is the total
        box count of each position of input
1593 1594 1595 1596


    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1597

1598
            import paddle.fluid as fluid
R
ruri 已提交
1599 1600
            input = fluid.layers.data(name="input", shape=[3,6,9])
            images = fluid.layers.data(name="images", shape=[3,9,12])
Q
update  
qiaolongfei 已提交
1601
            box, var = fluid.layers.prior_box(
R
ruri 已提交
1602
                input=input,
Q
update  
qiaolongfei 已提交
1603 1604 1605 1606
                image=images,
                min_sizes=[100.],
                flip=True,
                clip=True)
1607 1608 1609 1610
    """
    helper = LayerHelper("prior_box", **locals())
    dtype = helper.input_dtype()

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(min_sizes):
        min_sizes = [min_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    min_sizes = list(map(float, min_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    steps = list(map(float, steps))

1626 1627 1628 1629 1630 1631 1632 1633
    attrs = {
        'min_sizes': min_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'flip': flip,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
1634 1635
        'offset': offset,
        'min_max_aspect_ratios_order': min_max_aspect_ratios_order
1636 1637
    }
    if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
1638 1639
        if not _is_list_or_tuple_(max_sizes):
            max_sizes = [max_sizes]
1640 1641
        attrs['max_sizes'] = max_sizes

X
Xin Pan 已提交
1642 1643
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
    helper.append_op(
        type="prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


R
ruri 已提交
1656 1657 1658 1659 1660 1661 1662 1663 1664
def density_prior_box(input,
                      image,
                      densities=None,
                      fixed_sizes=None,
                      fixed_ratios=None,
                      variance=[0.1, 0.1, 0.2, 0.2],
                      clip=False,
                      steps=[0.0, 0.0],
                      offset=0.5,
1665
                      flatten_to_2d=False,
R
ruri 已提交
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
                      name=None):
    """
    **Density Prior Box Operator**

    Generate density prior boxes for SSD(Single Shot MultiBox Detector) 
    algorithm. Each position of the input produce N prior boxes, N is 
    determined by the count of densities, fixed_sizes and fixed_ratios. 
    Boxes center at grid points around each input position is generated by 
    this operator, and the grid points is determined by densities and 
    the count of density prior box is determined by fixed_sizes and fixed_ratios. 
    Obviously, the number of fixed_sizes is equal to the number of densities.
    For densities_i in densities:
    N_density_prior_box =sum(N_fixed_ratios * densities_i^2),

    Args:
       input(Variable): The Input Variables, the format is NCHW.
       image(Variable): The input image data of PriorBoxOp,
            the layout is NCHW.
       densities(list|tuple|None): the densities of generated density prior 
            boxes, this attribute should be a list or tuple of integers. 
            Default: None.
       fixed_sizes(list|tuple|None): the fixed sizes of generated density
            prior boxes, this attribute should a list or tuple of same 
            length with :attr:`densities`. Default: None.
       fixed_ratios(list|tuple|None): the fixed ratios of generated density
            prior boxes, if this attribute is not set and :attr:`densities`
            and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
            to generate density prior boxes.
       variance(list|tuple): the variances to be encoded in density prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
翟飞跃 已提交
1697
       step(list|tuple): Prior boxes step across width and height, If
R
ruri 已提交
1698 1699 1700 1701
            step[0] == 0.0/step[1] == 0.0, the density prior boxes step across
            height/weight of the input will be automatically calculated.
            Default: [0., 0.]
       offset(float): Prior boxes center offset. Default: 0.5
1702 1703
       flatten_to_2d(bool): Whether to flatten output prior boxes and variance
           to 2D shape, the second dim is 4. Default: False.
R
ruri 已提交
1704 1705 1706 1707 1708 1709
       name(str): Name of the density prior box op. Default: None.

    Returns:
        tuple: A tuple with two Variable (boxes, variances)

        boxes: the output density prior boxes of PriorBox.
1710 1711 1712 1713
            The layout is [H, W, num_priors, 4] when flatten_to_2d is False.
            The layout is [H * W * num_priors, 4] when flatten_to_2d is True.
            H is the height of input, W is the width of input,
            num_priors is the total box count of each position of input.
R
ruri 已提交
1714 1715

        variances: the expanded variances of PriorBox.
1716 1717 1718 1719
            The layout is [H, W, num_priors, 4] when flatten_to_2d is False.
            The layout is [H * W * num_priors, 4] when flatten_to_2d is True.
            H is the height of input, W is the width of input
            num_priors is the total box count of each position of input.
R
ruri 已提交
1720 1721 1722 1723 1724


    Examples:
        .. code-block:: python

1725
            import paddle.fluid as fluid
R
ruri 已提交
1726 1727
            input = fluid.layers.data(name="input", shape=[3,6,9])
            images = fluid.layers.data(name="images", shape=[3,9,12])
R
ruri 已提交
1728
            box, var = fluid.layers.density_prior_box(
R
ruri 已提交
1729
                input=input,
R
ruri 已提交
1730
                image=images,
1731 1732 1733 1734 1735
                densities=[4, 2, 1],
                fixed_sizes=[32.0, 64.0, 128.0],
                fixed_ratios=[1.],
                clip=True,
                flatten_to_2d=True)
R
ruri 已提交
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
    """
    helper = LayerHelper("density_prior_box", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(densities):
        raise TypeError('densities should be a list or a tuple or None.')
    if not _is_list_or_tuple_(fixed_sizes):
        raise TypeError('fixed_sizes should be a list or a tuple or None.')
    if not _is_list_or_tuple_(fixed_ratios):
        raise TypeError('fixed_ratios should be a list or a tuple or None.')
    if len(densities) != len(fixed_sizes):
        raise ValueError('densities and fixed_sizes length should be euqal.')
    if not (_is_list_or_tuple_(steps) and len(steps) == 2):
        raise ValueError('steps should be a list or tuple ',
                         'with length 2, (step_width, step_height).')

    densities = list(map(int, densities))
    fixed_sizes = list(map(float, fixed_sizes))
    fixed_ratios = list(map(float, fixed_ratios))
    steps = list(map(float, steps))

    attrs = {
        'variances': variance,
        'clip': clip,
        'step_w': steps[0],
        'step_h': steps[1],
        'offset': offset,
1766 1767 1768 1769
        'densities': densities,
        'fixed_sizes': fixed_sizes,
        'fixed_ratios': fixed_ratios,
        'flatten_to_2d': flatten_to_2d,
R
ruri 已提交
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
    }
    box = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="density_prior_box",
        inputs={"Input": input,
                "Image": image},
        outputs={"Boxes": box,
                 "Variances": var},
        attrs=attrs, )
    box.stop_gradient = True
    var.stop_gradient = True
    return box, var


C
chengduoZH 已提交
1785
def multi_box_head(inputs,
C
chengduoZH 已提交
1786 1787
                   image,
                   base_size,
C
chengduoZH 已提交
1788
                   num_classes,
C
chengduoZH 已提交
1789
                   aspect_ratios,
1790 1791
                   min_ratio=None,
                   max_ratio=None,
C
chengduoZH 已提交
1792 1793
                   min_sizes=None,
                   max_sizes=None,
C
chengduoZH 已提交
1794 1795 1796 1797
                   steps=None,
                   step_w=None,
                   step_h=None,
                   offset=0.5,
1798 1799
                   variance=[0.1, 0.1, 0.2, 0.2],
                   flip=True,
C
chengduoZH 已提交
1800
                   clip=False,
C
chengduoZH 已提交
1801
                   kernel_size=1,
C
chengduoZH 已提交
1802
                   pad=0,
C
chengduoZH 已提交
1803
                   stride=1,
1804 1805
                   name=None,
                   min_max_aspect_ratios_order=False):
C
chengduoZH 已提交
1806
    """
C
chengduoZH 已提交
1807 1808
    Generate prior boxes for SSD(Single Shot MultiBox Detector)
    algorithm. The details of this algorithm, please refer the
Q
update  
qiaolongfei 已提交
1809
    section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
C
chengduoZH 已提交
1810
    <https://arxiv.org/abs/1512.02325>`_ .
C
chengduoZH 已提交
1811 1812

    Args:
1813
       inputs(list|tuple): The list of input Variables, the format
C
chengduoZH 已提交
1814
            of all Variables is NCHW.
C
chengduoZH 已提交
1815 1816
       image(Variable): The input image data of PriorBoxOp,
            the layout is NCHW.
C
chengduoZH 已提交
1817 1818
       base_size(int): the base_size is used to get min_size
            and max_size according to min_ratio and max_ratio.
C
chengduoZH 已提交
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
       num_classes(int): The number of classes.
       aspect_ratios(list|tuple): the aspect ratios of generated prior
            boxes. The length of input and aspect_ratios must be equal.
       min_ratio(int): the min ratio of generated prior boxes.
       max_ratio(int): the max ratio of generated prior boxes.
       min_sizes(list|tuple|None): If `len(inputs) <=2`,
            min_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       max_sizes(list|tuple|None): If `len(inputs) <=2`,
            max_sizes must be set up, and the length of min_sizes
            should equal to the length of inputs. Default: None.
       steps(list|tuple): If step_w and step_h are the same,
            step_w and step_h can be replaced by steps.
       step_w(list|tuple): Prior boxes step
            across width. If step_w[i] == 0.0, the prior boxes step
            across width of the inputs[i] will be automatically
            calculated. Default: None.
       step_h(list|tuple): Prior boxes step across height, If
            step_h[i] == 0.0, the prior boxes step across height of
            the inputs[i] will be automatically calculated. Default: None.
       offset(float): Prior boxes center offset. Default: 0.5
       variance(list|tuple): the variances to be encoded in prior boxes.
1841
            Default:[0.1, 0.1, 0.2, 0.2].
C
chengduoZH 已提交
1842 1843 1844 1845 1846 1847
       flip(bool): Whether to flip aspect ratios. Default:False.
       clip(bool): Whether to clip out-of-boundary boxes. Default: False.
       kernel_size(int): The kernel size of conv2d. Default: 1.
       pad(int|list|tuple): The padding of conv2d. Default:0.
       stride(int|list|tuple): The stride of conv2d. Default:1,
       name(str): Name of the prior box layer. Default: None.
1848
       min_max_aspect_ratios_order(bool): If set True, the output prior box is
M
minqiyang 已提交
1849
            in order of [min, max, aspect_ratios], which is consistent with
1850 1851 1852
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the fininal
            detection results. Default: False.
C
chengduoZH 已提交
1853 1854

    Returns:
Q
update  
qiaolongfei 已提交
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
        tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)

        mbox_loc: The predicted boxes' location of the inputs. The layout
        is [N, H*W*Priors, 4]. where Priors is the number of predicted
        boxes each position of each input.

        mbox_conf: The predicted boxes' confidence of the inputs. The layout
        is [N, H*W*Priors, C]. where Priors is the number of predicted boxes
        each position of each input and C is the number of Classes.

        boxes: the output prior boxes of PriorBox. The layout is [num_priors, 4].
        num_priors is the total box count of each position of inputs.

        variances: the expanded variances of PriorBox. The layout is
        [num_priors, 4]. num_priors is the total box count of each position of inputs
C
chengduoZH 已提交
1870

C
chengduoZH 已提交
1871 1872 1873

    Examples:
        .. code-block:: python
Q
update  
qiaolongfei 已提交
1874

1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
          import paddle.fluid as fluid

          images = fluid.layers.data(name='data', shape=[3, 300, 300], dtype='float32')
          conv1 = fluid.layers.data(name='conv1', shape=[512, 19, 19], dtype='float32')
          conv2 = fluid.layers.data(name='conv2', shape=[1024, 10, 10], dtype='float32')
          conv3 = fluid.layers.data(name='conv3', shape=[512, 5, 5], dtype='float32')
          conv4 = fluid.layers.data(name='conv4', shape=[256, 3, 3], dtype='float32')
          conv5 = fluid.layers.data(name='conv5', shape=[256, 2, 2], dtype='float32')
          conv6 = fluid.layers.data(name='conv6', shape=[128, 1, 1], dtype='float32')

Q
update  
qiaolongfei 已提交
1885
          mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
1886
            inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
C
chengduoZH 已提交
1887 1888 1889 1890 1891 1892 1893 1894 1895
            image=images,
            num_classes=21,
            min_ratio=20,
            max_ratio=90,
            aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
            base_size=300,
            offset=0.5,
            flip=True,
            clip=True)
C
chengduoZH 已提交
1896 1897
    """

C
chengduoZH 已提交
1898
    def _reshape_with_axis_(input, axis=1):
1899
        out = nn.flatten(x=input, axis=axis)
C
chengduoZH 已提交
1900
        return out
1901

1902 1903
    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))
1904

C
chengduoZH 已提交
1905 1906 1907 1908
    def _is_list_or_tuple_and_equal(data, length, err_info):
        if not (_is_list_or_tuple_(data) and len(data) == length):
            raise ValueError(err_info)

1909 1910
    if not _is_list_or_tuple_(inputs):
        raise ValueError('inputs should be a list or tuple.')
C
chengduoZH 已提交
1911

C
chengduoZH 已提交
1912 1913 1914 1915 1916
    num_layer = len(inputs)

    if num_layer <= 2:
        assert min_sizes is not None and max_sizes is not None
        assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
1917
    elif min_sizes is None and max_sizes is None:
C
chengduoZH 已提交
1918 1919 1920
        min_sizes = []
        max_sizes = []
        step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
M
minqiyang 已提交
1921
        for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
C
chengduoZH 已提交
1922 1923 1924 1925 1926
            min_sizes.append(base_size * ratio / 100.)
            max_sizes.append(base_size * (ratio + step) / 100.)
        min_sizes = [base_size * .10] + min_sizes
        max_sizes = [base_size * .20] + max_sizes

C
chengduoZH 已提交
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
    if aspect_ratios:
        _is_list_or_tuple_and_equal(
            aspect_ratios, num_layer,
            'aspect_ratios should be list or tuple, and the length of inputs '
            'and aspect_ratios should be the same.')
    if step_h:
        _is_list_or_tuple_and_equal(
            step_h, num_layer,
            'step_h should be list or tuple, and the length of inputs and '
            'step_h should be the same.')
    if step_w:
        _is_list_or_tuple_and_equal(
            step_w, num_layer,
            'step_w should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
    if steps:
        _is_list_or_tuple_and_equal(
            steps, num_layer,
            'steps should be list or tuple, and the length of inputs and '
            'step_w should be the same.')
        step_w = steps
        step_h = steps

C
chengduoZH 已提交
1950 1951
    mbox_locs = []
    mbox_confs = []
C
chengduoZH 已提交
1952 1953
    box_results = []
    var_results = []
C
chengduoZH 已提交
1954 1955
    for i, input in enumerate(inputs):
        min_size = min_sizes[i]
C
chengduoZH 已提交
1956 1957
        max_size = max_sizes[i]

1958
        if not _is_list_or_tuple_(min_size):
C
chengduoZH 已提交
1959
            min_size = [min_size]
C
chengduoZH 已提交
1960 1961
        if not _is_list_or_tuple_(max_size):
            max_size = [max_size]
C
chengduoZH 已提交
1962 1963 1964 1965

        aspect_ratio = []
        if aspect_ratios is not None:
            aspect_ratio = aspect_ratios[i]
1966
            if not _is_list_or_tuple_(aspect_ratio):
C
chengduoZH 已提交
1967
                aspect_ratio = [aspect_ratio]
1968
        step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
C
chengduoZH 已提交
1969

1970
        box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
1971 1972
                             variance, flip, clip, step, offset, None,
                             min_max_aspect_ratios_order)
C
chengduoZH 已提交
1973 1974 1975 1976 1977

        box_results.append(box)
        var_results.append(var)

        num_boxes = box.shape[2]
C
chengduoZH 已提交
1978

1979
        # get loc
Y
Yuan Gao 已提交
1980
        num_loc_output = num_boxes * 4
1981
        mbox_loc = nn.conv2d(
C
chengduoZH 已提交
1982
            input=input,
1983 1984 1985 1986 1987
            num_filters=num_loc_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)

1988
        mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
1989
        mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
Y
Yuan Gao 已提交
1990
        mbox_locs.append(mbox_loc_flatten)
C
chengduoZH 已提交
1991

1992
        # get conf
C
chengduoZH 已提交
1993
        num_conf_output = num_boxes * num_classes
1994
        conf_loc = nn.conv2d(
C
chengduoZH 已提交
1995
            input=input,
1996 1997 1998 1999
            num_filters=num_conf_output,
            filter_size=kernel_size,
            padding=pad,
            stride=stride)
2000
        conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
2001
        conf_loc_flatten = nn.flatten(conf_loc, axis=1)
Y
Yuan Gao 已提交
2002
        mbox_confs.append(conf_loc_flatten)
C
chengduoZH 已提交
2003

C
chengduoZH 已提交
2004 2005 2006
    if len(box_results) == 1:
        box = box_results[0]
        var = var_results[0]
Y
Yuan Gao 已提交
2007 2008
        mbox_locs_concat = mbox_locs[0]
        mbox_confs_concat = mbox_confs[0]
C
chengduoZH 已提交
2009 2010 2011 2012 2013 2014 2015 2016 2017
    else:
        reshaped_boxes = []
        reshaped_vars = []
        for i in range(len(box_results)):
            reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
            reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))

        box = tensor.concat(reshaped_boxes)
        var = tensor.concat(reshaped_vars)
Y
Yuan Gao 已提交
2018
        mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
2019
        mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
Y
Yuan Gao 已提交
2020
        mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
2021 2022
        mbox_confs_concat = nn.reshape(
            mbox_confs_concat, shape=[0, -1, num_classes])
C
chengduoZH 已提交
2023

2024 2025
    box.stop_gradient = True
    var.stop_gradient = True
Y
Yuan Gao 已提交
2026
    return mbox_locs_concat, mbox_confs_concat, box, var
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044


def anchor_generator(input,
                     anchor_sizes=None,
                     aspect_ratios=None,
                     variance=[0.1, 0.1, 0.2, 0.2],
                     stride=None,
                     offset=0.5,
                     name=None):
    """
    **Anchor generator operator**

    Generate anchors for Faster RCNN algorithm.
    Each position of the input produce N anchors, N =
    size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
    is firstly aspect_ratios loop then anchor_sizes loop.

    Args:
W
wangguanzhong 已提交
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
       input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
       anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
          anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
          For instance, the anchor size of 64 means the area of this anchor 
          equals to 64**2. None by default.
       aspect_ratios(float32|list|tuple, optional): The height / width ratios 
           of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
       variance(list|tuple, optional): The variances to be used in box 
           regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by 
           default.
       stride(list|tuple, optional): The anchors stride across width and height.
           The data type is float32. e.g. [16.0, 16.0]. None by default.
       offset(float32, optional): Prior boxes center offset. 0.5 by default.
       name(str, optional): For detailed information, please refer 
           to :ref:`api_guide_Name`. Usually name is no need to set and None 
           by default. 
2061 2062

    Returns:
W
wangguanzhong 已提交
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
        Tuple:

        Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
        H is the height of input, W is the width of input,
        num_anchors is the box count of each position. 
        Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
 
        Variances(Variable): The expanded variances of anchors
        with a layout of [H, W, num_priors, 4].
        H is the height of input, W is the width of input
        num_anchors is the box count of each position.
        Each variance is in (xcenter, ycenter, w, h) format.
2075 2076 2077 2078 2079 2080


    Examples:

        .. code-block:: python

2081
            import paddle.fluid as fluid
J
jerrywgz 已提交
2082 2083
            conv1 = fluid.layers.data(name='conv1', shape=[48, 16, 16], dtype='float32')
            anchor, var = fluid.layers.anchor_generator(
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
                input=conv1,
                anchor_sizes=[64, 128, 256, 512],
                aspect_ratios=[0.5, 1.0, 2.0],
                variance=[0.1, 0.1, 0.2, 0.2],
                stride=[16.0, 16.0],
                offset=0.5)
    """
    helper = LayerHelper("anchor_generator", **locals())
    dtype = helper.input_dtype()

    def _is_list_or_tuple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if not _is_list_or_tuple_(anchor_sizes):
        anchor_sizes = [anchor_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not (_is_list_or_tuple_(stride) and len(stride) == 2):
        raise ValueError('stride should be a list or tuple ',
                         'with length 2, (stride_width, stride_height).')

    anchor_sizes = list(map(float, anchor_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    stride = list(map(float, stride))

    attrs = {
        'anchor_sizes': anchor_sizes,
        'aspect_ratios': aspect_ratios,
        'variances': variance,
        'stride': stride,
        'offset': offset
    }

X
Xin Pan 已提交
2117 2118
    anchor = helper.create_variable_for_type_inference(dtype)
    var = helper.create_variable_for_type_inference(dtype)
2119 2120 2121 2122 2123 2124 2125 2126 2127
    helper.append_op(
        type="anchor_generator",
        inputs={"Input": input},
        outputs={"Anchors": anchor,
                 "Variances": var},
        attrs=attrs, )
    anchor.stop_gradient = True
    var.stop_gradient = True
    return anchor, var
2128 2129


W
whs 已提交
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
def roi_perspective_transform(input,
                              rois,
                              transformed_height,
                              transformed_width,
                              spatial_scale=1.0):
    """
    ROI perspective transform op.

    Args:
        input (Variable): The input of ROIPerspectiveTransformOp. The format of 
                          input tensor is NCHW. Where N is batch size, C is the
                          number of input channels, H is the height of the feature,
                          and W is the width of the feature.
        rois (Variable):  ROIs (Regions of Interest) to be transformed. It should be
                          a 2-D LoDTensor of shape (num_rois, 8). Given as 
                          [[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the 
                          top left coordinates, and (x2, y2) is the top right 
                          coordinates, and (x3, y3) is the bottom right coordinates, 
                          and (x4, y4) is the bottom left coordinates.
        transformed_height (integer): The height of transformed output.
S
SunGaofeng 已提交
2150
        transformed_width (integer): The width of transformed output.
W
whs 已提交
2151 2152 2153
        spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0

    Returns:
2154 2155 2156 2157 2158 2159 2160 2161 2162
            tuple: A tuple with three Variables. (out, mask, transform_matrix)

            out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
            (num_rois, channels, transformed_h, transformed_w).

            mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
            (num_rois, 1, transformed_h, transformed_w).

            transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
2163
            a 2-D tensor with shape (num_rois, 9).
W
whs 已提交
2164 2165 2166 2167

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
2168
            import paddle.fluid as fluid
2169

S
SunGaofeng 已提交
2170 2171
            x = fluid.layers.data(name='x', shape=[256, 28, 28], dtype='float32')
            rois = fluid.layers.data(name='rois', shape=[8], lod_level=1, dtype='float32')
2172
            out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
W
whs 已提交
2173 2174 2175
    """
    helper = LayerHelper('roi_perspective_transform', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2176
    out = helper.create_variable_for_type_inference(dtype)
2177 2178
    mask = helper.create_variable_for_type_inference(dtype="int32")
    transform_matrix = helper.create_variable_for_type_inference(dtype)
2179 2180
    out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
    out2in_w = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
2181 2182 2183 2184
    helper.append_op(
        type="roi_perspective_transform",
        inputs={"X": input,
                "ROIs": rois},
2185 2186 2187
        outputs={
            "Out": out,
            "Out2InIdx": out2in_idx,
2188 2189 2190
            "Out2InWeights": out2in_w,
            "Mask": mask,
            "TransformMatrix": transform_matrix
2191
        },
W
whs 已提交
2192 2193 2194 2195 2196
        attrs={
            "transformed_height": transformed_height,
            "transformed_width": transformed_width,
            "spatial_scale": spatial_scale
        })
2197
    return out, mask, transform_matrix
W
whs 已提交
2198 2199


2200 2201
def generate_proposal_labels(rpn_rois,
                             gt_classes,
2202
                             is_crowd,
2203
                             gt_boxes,
2204
                             im_info,
2205 2206 2207 2208 2209 2210
                             batch_size_per_im=256,
                             fg_fraction=0.25,
                             fg_thresh=0.25,
                             bg_thresh_hi=0.5,
                             bg_thresh_lo=0.0,
                             bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
2211
                             class_nums=None,
2212 2213 2214
                             use_random=True,
                             is_cls_agnostic=False,
                             is_cascade_rcnn=False):
2215
    """
2216
    **Generate Proposal Labels of Faster-RCNN**
2217

B
buxingyuan 已提交
2218
    This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
B
buxingyuan 已提交
2219
    to sample foreground boxes and background boxes, and compute loss target.
B
buxingyuan 已提交
2220 2221 2222

    RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
    were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
B
buxingyuan 已提交
2223
    If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
B
buxingyuan 已提交
2224 2225
    If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
    then it was considered as a background sample.
B
buxingyuan 已提交
2226
    After all foreground and background boxes are chosen (so called Rois),
B
buxingyuan 已提交
2227
    then we apply random sampling to make sure
B
buxingyuan 已提交
2228
    the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
B
buxingyuan 已提交
2229 2230 2231 2232 2233

    For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
    Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.

    Args:
2234 2235 2236
        rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
        is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
B
buxingyuan 已提交
2237 2238 2239
        gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
        im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.

2240 2241 2242 2243 2244 2245 2246
        batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
        fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
        fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
        bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
        bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
        bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
        class_nums(int): Class number. The data type must be int32.
B
buxingyuan 已提交
2247
        use_random(bool): Use random sampling to choose foreground and background boxes.
2248 2249
        is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
        is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
B
Bai Yifan 已提交
2250

2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
    Returns:
        tuple:
        A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.

        - **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
        - **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
        - **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
        - **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
        - **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.


B
Bai Yifan 已提交
2262 2263 2264 2265
    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
2266 2267 2268 2269 2270
            rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
            gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
            is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
            gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
2271
            rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
B
Bai Yifan 已提交
2272 2273 2274
                           rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
                           class_nums=10)

2275 2276 2277 2278
    """

    helper = LayerHelper('generate_proposal_labels', **locals())

X
Xin Pan 已提交
2279 2280 2281 2282 2283 2284 2285 2286 2287
    rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
    labels_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    bbox_targets = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_inside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
    bbox_outside_weights = helper.create_variable_for_type_inference(
        dtype=rpn_rois.dtype)
2288 2289 2290 2291 2292 2293

    helper.append_op(
        type="generate_proposal_labels",
        inputs={
            'RpnRois': rpn_rois,
            'GtClasses': gt_classes,
2294
            'IsCrowd': is_crowd,
2295
            'GtBoxes': gt_boxes,
2296
            'ImInfo': im_info
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
        },
        outputs={
            'Rois': rois,
            'LabelsInt32': labels_int32,
            'BboxTargets': bbox_targets,
            'BboxInsideWeights': bbox_inside_weights,
            'BboxOutsideWeights': bbox_outside_weights
        },
        attrs={
            'batch_size_per_im': batch_size_per_im,
            'fg_fraction': fg_fraction,
            'fg_thresh': fg_thresh,
            'bg_thresh_hi': bg_thresh_hi,
            'bg_thresh_lo': bg_thresh_lo,
            'bbox_reg_weights': bbox_reg_weights,
2312
            'class_nums': class_nums,
2313 2314 2315
            'use_random': use_random,
            'is_cls_agnostic': is_cls_agnostic,
            'is_cascade_rcnn': is_cascade_rcnn
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326
        })

    rois.stop_gradient = True
    labels_int32.stop_gradient = True
    bbox_targets.stop_gradient = True
    bbox_inside_weights.stop_gradient = True
    bbox_outside_weights.stop_gradient = True

    return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights


2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
                         labels_int32, num_classes, resolution):
    """
    ** Generate Mask Labels for Mask-RCNN **

    This operator can be, for given the RoIs and corresponding labels,
    to sample foreground RoIs. This mask branch also has
    a :math: `K \\times M^{2}` dimensional output targets for each foreground
    RoI, which encodes K binary masks of resolution M x M, one for each of the
    K classes. This mask targets are used to compute loss of mask branch.

    Please note, the data format of groud-truth segmentation, assumed the
    segmentations are as follows. The first instance has two gt objects.
    The second instance has one gt object, this object has two gt segmentations.

        .. code-block:: python

            #[
            #  [[[229.14, 370.9, 229.14, 370.9, ...]],
            #   [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
            #  [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
            #]

            batch_masks = []
            for semgs in batch_semgs:
                gt_masks = []
                for semg in semgs:
                    gt_segm = []
                    for polys in semg:
                        gt_segm.append(np.array(polys).reshape(-1, 2))
                    gt_masks.append(gt_segm)
                batch_masks.append(gt_masks)
            
            
            place = fluid.CPUPlace()
            feeder = fluid.DataFeeder(place=place, feed_list=feeds)
            feeder.feed(batch_masks)

    Args:
        im_info(Variable): A 2-D Tensor with shape [N, 3]. N is the batch size,
            each element is [height, width, scale] of image. Image scale is
            target_size) / original_size.
        gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the total
            number of ground-truth, each element is a class label.
        is_crowd(Variable): A 2-D LoDTensor with shape as gt_classes,
            each element is a flag indicating whether a groundtruth is crowd.
        gt_segms(Variable): This input is a 2D LoDTensor with shape [S, 2],
            it's LoD level is 3. Usually users do not needs to understand LoD,
            The users should return correct data format in reader.



            The LoD[0] represents the gt objects number of
            each instance. LoD[1] represents the segmentation counts of each
            objects. LoD[2] represents the polygons number of each segmentation.
            S the total number of polygons coordinate points. Each element is
            (x, y) coordinate points.
        rois(Variable): A 2-D LoDTensor with shape [R, 4]. R is the total
            number of RoIs, each element is a bounding box with
            (xmin, ymin, xmax, ymax) format in the range of original image.
        labels_int32(Variable): A 2-D LoDTensor in shape of [R, 1] with type
            of int32. R is the same as it in `rois`. Each element repersents
            a class label of a RoI.
        num_classes(int): Class number.
        resolution(int): Resolution of mask predictions.

    Returns:
        mask_rois (Variable):  A 2D LoDTensor with shape [P, 4]. P is the total
            number of sampled RoIs. Each element is a bounding box with
            [xmin, ymin, xmax, ymax] format in range of orignal image size.
        mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1],
            each element repersents the output mask RoI index with regard to
            to input RoIs.
        mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M],
            K is the classes number and M is the resolution of mask predictions.
            Each element repersents the binary mask targets.

    Examples:
        .. code-block:: python

2407 2408
          import paddle.fluid as fluid

2409 2410 2411 2412 2413 2414 2415 2416
          im_info = fluid.layers.data(name="im_info", shape=[3],
              dtype="float32")
          gt_classes = fluid.layers.data(name="gt_classes", shape=[1],
              dtype="float32", lod_level=1)
          is_crowd = fluid.layers.data(name="is_crowd", shape=[1],
              dtype="float32", lod_level=1)
          gt_masks = fluid.layers.data(name="gt_masks", shape=[2],
              dtype="float32", lod_level=3)
2417
          # rois, roi_labels can be the output of
2418
          # fluid.layers.generate_proposal_labels.
2419 2420 2421 2422
          rois = fluid.layers.data(name="rois", shape=[4],
              dtype="float32", lod_level=1)
          roi_labels = fluid.layers.data(name="roi_labels", shape=[1],
              dtype="int32", lod_level=1)
2423 2424 2425 2426 2427 2428
          mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
              im_info=im_info,
              gt_classes=gt_classes,
              is_crowd=is_crowd,
              gt_segms=gt_masks,
              rois=rois,
2429
              labels_int32=roi_labels,
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
              num_classes=81,
              resolution=14)
    """

    helper = LayerHelper('generate_mask_labels', **locals())

    mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
    roi_has_mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)
    mask_int32 = helper.create_variable_for_type_inference(
        dtype=gt_classes.dtype)

    helper.append_op(
        type="generate_mask_labels",
        inputs={
            'ImInfo': im_info,
            'GtClasses': gt_classes,
            'IsCrowd': is_crowd,
            'GtSegms': gt_segms,
            'Rois': rois,
            'LabelsInt32': labels_int32
        },
        outputs={
            'MaskRois': mask_rois,
            'RoiHasMaskInt32': roi_has_mask_int32,
            'MaskInt32': mask_int32
        },
        attrs={'num_classes': num_classes,
               'resolution': resolution})

    mask_rois.stop_gradient = True
    roi_has_mask_int32.stop_gradient = True
    mask_int32.stop_gradient = True

    return mask_rois, roi_has_mask_int32, mask_int32


2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
def generate_proposals(scores,
                       bbox_deltas,
                       im_info,
                       anchors,
                       variances,
                       pre_nms_top_n=6000,
                       post_nms_top_n=1000,
                       nms_thresh=0.5,
                       min_size=0.1,
                       eta=1.0,
                       name=None):
    """
H
haowang101779990 已提交
2479 2480
    **Generate proposal Faster-RCNN**

2481 2482 2483 2484
    This operation proposes RoIs according to each box with their
    probability to be a foreground object and 
    the box can be calculated by anchors. Bbox_deltais and scores
    to be an object are the output of RPN. Final proposals
H
haowang101779990 已提交
2485 2486 2487 2488
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

2489 2490
    1. Transposes and resizes scores and bbox_deltas in size of
       (H*W*A, 1) and (H*W*A, 4)
H
haowang101779990 已提交
2491 2492 2493 2494 2495 2496
    2. Calculate box locations as proposals candidates. 
    3. Clip boxes to image
    4. Remove predicted boxes with small area. 
    5. Apply NMS to get final proposals as output.

    Args:
2497 2498 2499
        scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
2500
            width of the feature map. The data type must be float32.
2501 2502
        bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
            represents the differece between predicted box locatoin and
2503
            anchor location. The data type must be float32.
2504 2505
        im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
            image information for N batch. Info contains height, width and scale
H
haowang101779990 已提交
2506
            between origin image size and the size of feature map.
2507
            The data type must be int32.
2508 2509 2510
        anchors(Variable):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
2511 2512
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
2513
            [H, W, num_priors, 4]. Each variance is in
2514
            (xcenter, ycenter, w, h) format. The data type must be float32.
2515
        pre_nms_top_n(float): Number of total bboxes to be kept per
2516
            image before NMS. The data type must be float32. `6000` by default.
2517
        post_nms_top_n(float): Number of total bboxes to be kept per
2518 2519
            image after NMS. The data type must be float32. `1000` by default.
        nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
2520
        min_size(float): Remove predicted boxes with either height or
2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
            width < min_size. The data type must be float32. `0.1` by default.
        eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration.

    Returns:
        tuple:
        A tuple with format ``(rpn_rois, rpn_roi_probs)``.

        - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
B
Bai Yifan 已提交
2531 2532 2533 2534 2535

    Examples:
        .. code-block:: python
        
            import paddle.fluid as fluid
2536 2537 2538 2539 2540
            scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
            bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
            im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
            anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
            variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
B
Bai Yifan 已提交
2541 2542 2543
            rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
                         im_info, anchors, variances)

2544 2545 2546
    """
    helper = LayerHelper('generate_proposals', **locals())

X
Xin Pan 已提交
2547 2548 2549 2550
    rpn_rois = helper.create_variable_for_type_inference(
        dtype=bbox_deltas.dtype)
    rpn_roi_probs = helper.create_variable_for_type_inference(
        dtype=scores.dtype)
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572
    helper.append_op(
        type="generate_proposals",
        inputs={
            'Scores': scores,
            'BboxDeltas': bbox_deltas,
            'ImInfo': im_info,
            'Anchors': anchors,
            'Variances': variances
        },
        attrs={
            'pre_nms_topN': pre_nms_top_n,
            'post_nms_topN': post_nms_top_n,
            'nms_thresh': nms_thresh,
            'min_size': min_size,
            'eta': eta
        },
        outputs={'RpnRois': rpn_rois,
                 'RpnRoiProbs': rpn_roi_probs})
    rpn_rois.stop_gradient = True
    rpn_roi_probs.stop_gradient = True

    return rpn_rois, rpn_roi_probs
J
jerrywgz 已提交
2573 2574


J
jerrywgz 已提交
2575
def box_clip(input, im_info, name=None):
J
jerrywgz 已提交
2576 2577
    """
    Clip the box into the size given by im_info
J
jerrywgz 已提交
2578
    For each input box, The formula is given as follows:
2579 2580 2581
        
    .. code-block:: text

J
jerrywgz 已提交
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
        xmin = max(min(xmin, im_w - 1), 0)
        ymin = max(min(ymin, im_h - 1), 0) 
        xmax = max(min(xmax, im_w - 1), 0)
        ymax = max(min(ymax, im_h - 1), 0)
    
    where im_w and im_h are computed from im_info:
 
    .. code-block:: text

        im_h = round(height / scale)
        im_w = round(weight / scale)
J
jerrywgz 已提交
2593 2594

    Args:
W
wangguanzhong 已提交
2595 2596 2597 2598 2599 2600 2601 2602 2603
        input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
            the last dimension is 4 and data type is float32 or float64.
        im_info(Variable): The 2-D Tensor with shape [N, 3] with layout 
            (height, width, scale) represeting the information of image. 
            height and width is the input size and scale is the ratio of input
            size and original size. The data type is float32 or float64.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
2604 2605
    
    Returns:
W
wangguanzhong 已提交
2606 2607 2608 2609 2610
        Variable:

        output(Variable): The cliped tensor with data type float32 or float64. 
        The shape is same as input.

2611
        
J
jerrywgz 已提交
2612 2613
    Examples:
        .. code-block:: python
2614
        
2615
            import paddle.fluid as fluid
J
jerrywgz 已提交
2616
            boxes = fluid.layers.data(
J
jerrywgz 已提交
2617
                name='boxes', shape=[8, 4], dtype='float32', lod_level=1)
J
jerrywgz 已提交
2618 2619
            im_info = fluid.layers.data(name='im_info', shape=[3])
            out = fluid.layers.box_clip(
J
jerrywgz 已提交
2620
                input=boxes, im_info=im_info)
J
jerrywgz 已提交
2621 2622 2623
    """

    helper = LayerHelper("box_clip", **locals())
J
jerrywgz 已提交
2624
    output = helper.create_variable_for_type_inference(dtype=input.dtype)
2625
    inputs = {"Input": input, "ImInfo": im_info}
J
jerrywgz 已提交
2626
    helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
J
jerrywgz 已提交
2627

2628 2629
    return output

J
jerrywgz 已提交
2630

2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737
def retinanet_detection_output(bboxes,
                               scores,
                               anchors,
                               im_info,
                               score_threshold=0.05,
                               nms_top_k=1000,
                               keep_top_k=100,
                               nms_threshold=0.3,
                               nms_eta=1.):
    """
    **Detection Output Layer for Retinanet.**

    This operation is to get the detection results by performing following
    steps:

    1. Decode top-scoring bounding box predictions per FPN level according 
       to the anchor boxes.
    2. Merge top predictions from all levels and apply multi-class non 
       maximum suppression (NMS) on them to get the final detections.

    Args:
        bboxes(List): A list of tensors from multiple FPN levels. Each
            element is a 3-D Tensor with shape [N, Mi, 4] representing the
            predicted locations of Mi bounding boxes. N is the batch size,
            Mi is the number of bounding boxes from i-th FPN level and each 
            bounding box has four coordinate values and the layout is
            [xmin, ymin, xmax, ymax].
        scores(List): A list of tensors from multiple FPN levels. Each
            element is a 3-D Tensor with shape [N, Mi, C] representing the
            predicted confidence predictions. N is the batch size, C is the
            class number (excluding background), Mi is the number of bounding
            boxes from i-th FPN level. For each bounding box, there are total
            C scores.
        anchors(List): A 2-D Tensor with shape [Mi, 4] represents the locations
            of Mi anchor boxes from all FPN level. Each bounding box has four
            coordinate values and the layout is [xmin, ymin, xmax, ymax].
        im_info(Variable): A 2-D LoDTensor with shape [N, 3] represents the
            image information. N is the batch size, each image information
            includes height, width and scale.
        score_threshold(float): Threshold to filter out bounding boxes
            with a confidence score.
        nms_top_k(int): Maximum number of detections per FPN layer to be
            kept according to the confidences before NMS.
        keep_top_k(int): Number of total bounding boxes to be kept per image after
            NMS step. -1 means keeping all bounding boxes after NMS step.
        nms_threshold(float): The threshold to be used in NMS.
        nms_eta(float): The parameter for adaptive NMS.

    Returns:
        Variable:
            The detection output is a LoDTensor with shape [No, 6].
            Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
            `No` is the total number of detections in this mini-batch. For each
            instance, the offsets in first dimension are called LoD, the offset
            number is N + 1, N is the batch size. The i-th image has
            `LoD[i + 1] - LoD[i]` detected results, if it is 0, the i-th image
            has no detected results. If all images have no detected results,
            LoD will be set to 0, and the output tensor is empty (None).

    Examples:
        .. code-block:: python
        
            import paddle.fluid as fluid

            bboxes = layers.data(name='bboxes', shape=[1, 21, 4],
                append_batch_size=False, dtype='float32')
            scores = layers.data(name='scores', shape=[1, 21, 10],
                append_batch_size=False, dtype='float32')
            anchors = layers.data(name='anchors', shape=[21, 4],
                append_batch_size=False, dtype='float32')
            im_info = layers.data(name="im_info", shape=[1, 3],
                append_batch_size=False, dtype='float32')
            nmsed_outs = fluid.layers.retinanet_detection_output(
                                                    bboxes=[bboxes, bboxes],
                                                    scores=[scores, scores],
                                                    anchors=[anchors, anchors],
                                                    im_info=im_info,
                                                    score_threshold=0.05,
                                                    nms_top_k=1000,
                                                    keep_top_k=100,
                                                    nms_threshold=0.3,
                                                    nms_eta=1.)
    """

    helper = LayerHelper('retinanet_detection_output', **locals())
    output = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('scores'))
    helper.append_op(
        type="retinanet_detection_output",
        inputs={
            'BBoxes': bboxes,
            'Scores': scores,
            'Anchors': anchors,
            'ImInfo': im_info
        },
        attrs={
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'keep_top_k': keep_top_k,
            'nms_eta': 1.,
        },
        outputs={'Out': output})
    output.stop_gradient = True
    return output


J
jerrywgz 已提交
2738 2739 2740 2741 2742
def multiclass_nms(bboxes,
                   scores,
                   score_threshold,
                   nms_top_k,
                   keep_top_k,
J
jerrywgz 已提交
2743
                   nms_threshold=0.3,
J
jerrywgz 已提交
2744 2745
                   normalized=True,
                   nms_eta=1.,
2746 2747
                   background_label=0,
                   name=None):
J
jerrywgz 已提交
2748
    """
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
    **Multiclass NMS**
    
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
    See below for an example:

    .. code-block:: text

        if:
            box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
            box1.scores = (0.7, 0.2, 0.4)  which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)

            box2.data = (3.0, 4.0, 8.0, 5.0)
            box2.score = (0.3, 0.3, 0.1)

            nms_threshold = 0.3
            background_label = 0
            score_threshold = 0
2777

2778 2779 2780 2781 2782 2783 2784

        Then:
            iou = 4/11 > 0.3
            out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],    
                         [2, 0.4, 2.0, 3.0, 7.0, 5.0]]
                         
            Out format is (label, confidence, xmin, ymin, xmax, ymax)
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824
    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is 
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
                           M is the number of bounding boxes, C is the 
                           class number   
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is 
                           number of bounding boxes. For each category there 
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes.
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
                           case with shape [M, C, 4].
        background_label (int): The index of background label, the background 
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided, 
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences aftern the filtering detections based
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
2825
        Out(Variable): A 2-D LoDTensor with shape [No, 6] represents the detections.
2826 2827 2828 2829 2830
             Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
             or A 2-D LoDTensor with shape [No, 10] represents the detections.
             Each row has 10 values: 
             [label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the 
             total number of detections. If there is no detected boxes for all
J
jerrywgz 已提交
2831 2832 2833 2834
             images, lod will be set to {1} and Out only contains one value
             which is -1.
             (After version 1.3, when no boxes detected, the lod is changed 
             from {0} to {1}) 
2835

2836

2837 2838 2839
    Examples:
        .. code-block:: python

2840

2841
            import paddle.fluid as fluid
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
            boxes = fluid.layers.data(name='bboxes', shape=[81, 4],
                                      dtype='float32', lod_level=1)
            scores = fluid.layers.data(name='scores', shape=[81],
                                      dtype='float32', lod_level=1)
            out = fluid.layers.multiclass_nms(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False)
J
jerrywgz 已提交
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
    """
    helper = LayerHelper('multiclass_nms', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    helper.append_op(
        type="multiclass_nms",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output})
    output.stop_gradient = True
J
jerrywgz 已提交
2874 2875

    return output
2876 2877


2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
def multiclass_nms2(bboxes,
                    scores,
                    score_threshold,
                    nms_top_k,
                    keep_top_k,
                    nms_threshold=0.3,
                    normalized=True,
                    nms_eta=1.,
                    background_label=0,
                    return_index=False,
                    name=None):
    """
    **Multiclass NMS2**
    
    This operator is to do multi-class non maximum suppression (NMS) on
    boxes and scores.

    In the NMS step, this operator greedily selects a subset of detection bounding
    boxes that have high scores larger than score_threshold, if providing this
    threshold, then selects the largest nms_top_k confidences scores if nms_top_k
    is larger than -1. Then this operator pruns away boxes that have high IOU
    (intersection over union) overlap with already selected boxes by adaptive
    threshold NMS based on parameters of nms_threshold and nms_eta.

    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.

    Args:
        bboxes (Variable): Two types of bboxes are supported:
                           1. (Tensor) A 3-D Tensor with shape
                           [N, M, 4 or 8 16 24 32] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is 
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
                           M is the number of bounding boxes, C is the 
                           class number   
        scores (Variable): Two types of scores are supported:
                           1. (Tensor) A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is 
                           number of bounding boxes. For each category there 
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes.
                           2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
                           M is the number of bbox, C is the class number.
                           In this case, input BBoxes should be the second
                           case with shape [M, C, 4].
        background_label (int): The index of background label, the background 
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score. If not provided, 
                                 consider all boxes.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences aftern the filtering detections based
                         on score_threshold.
        nms_threshold (float): The threshold to be used in NMS. Default: 0.3
        nms_eta (float): The threshold to be used in NMS. Default: 1.0
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
        normalized (bool): Whether detections are normalized. Default: True
        return_index(bool): Whether return selected index. Default: False
        name(str): Name of the multiclass nms op. Default: None.

    Returns:
        A tuple with two Variables: (Out, Index) if return_index is True,
        otherwise, a tuple with one Variable(Out) is returned. 

        Out: A 2-D LoDTensor with shape [No, 6] represents the detections. 
        Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax] 
        or A 2-D LoDTensor with shape [No, 10] represents the detections. 
        Each row has 10 values: [label, confidence, x1, y1, x2, y2, x3, y3, 
        x4, y4]. No is the total number of detections. 

        If all images have not detected results, all elements in LoD will be
        0, and output tensor is empty (None).

        Index: Only return when return_index is True. A 2-D LoDTensor with 
        shape [No, 1] represents the selected index which type is Integer. 
        The index is the absolute value cross batches. No is the same number 
        as Out. If the index is used to gather other attribute such as age, 
        one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where 
        N is the batch size and M is the number of boxes.


    Examples:
        .. code-block:: python


            import paddle.fluid as fluid
            boxes = fluid.layers.data(name='bboxes', shape=[81, 4],
                                      dtype='float32', lod_level=1)
            scores = fluid.layers.data(name='scores', shape=[81],
                                      dtype='float32', lod_level=1)
            out, index = fluid.layers.multiclass_nms2(bboxes=boxes,
                                              scores=scores,
                                              background_label=0,
                                              score_threshold=0.5,
                                              nms_top_k=400,
                                              nms_threshold=0.3,
                                              keep_top_k=200,
                                              normalized=False,
                                              return_index=True)
    """
    helper = LayerHelper('multiclass_nms2', **locals())

    output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
    index = helper.create_variable_for_type_inference(dtype='int')
    helper.append_op(
        type="multiclass_nms2",
        inputs={'BBoxes': bboxes,
                'Scores': scores},
        attrs={
            'background_label': background_label,
            'score_threshold': score_threshold,
            'nms_top_k': nms_top_k,
            'nms_threshold': nms_threshold,
            'nms_eta': nms_eta,
            'keep_top_k': keep_top_k,
            'nms_eta': nms_eta,
            'normalized': normalized
        },
        outputs={'Out': output,
                 'Index': index})
    output.stop_gradient = True
    index.stop_gradient = True

    if return_index:
        return output, index
    return output


3013 3014 3015 3016 3017 3018 3019
def distribute_fpn_proposals(fpn_rois,
                             min_level,
                             max_level,
                             refer_level,
                             refer_scale,
                             name=None):
    """
W
wangguanzhong 已提交
3020 3021 3022 3023 3024 3025
    **This op only takes LoDTensor as input.** In Feature Pyramid Networks 
    (FPN) models, it is needed to distribute all proposals into different FPN 
    level, with respect to scale of the proposals, the referring scale and the 
    referring level. Besides, to restore the order of proposals, we return an 
    array which indicates the original index of rois in current proposals. 
    To compute FPN level for each roi, the formula is given as follows:
3026
    
J
jerrywgz 已提交
3027
    .. math::
3028

J
jerrywgz 已提交
3029
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
3030

J
jerrywgz 已提交
3031 3032 3033
        level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)

    where BBoxArea is a function to compute the area of each roi.
3034 3035

    Args:
W
wangguanzhong 已提交
3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047

        fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is 
            float32 or float64. The input fpn_rois.
        min_level(int32): The lowest level of FPN layer where the proposals come 
            from.
        max_level(int32): The highest level of FPN layer where the proposals
            come from.
        refer_level(int32): The referring level of FPN layer with specified scale.
        refer_scale(int32): The referring scale of FPN layer with specified level.
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 
J
jerrywgz 已提交
3048

3049
    Returns:
W
wangguanzhong 已提交
3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
        Tuple:

        multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4] 
        and data type of float32 and float64. The length is 
        max_level-min_level+1. The proposals in each FPN level.

        restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is 
        the number of total rois. The data type is int32. It is
        used to restore the order of fpn_rois.

3060 3061 3062 3063

    Examples:
        .. code-block:: python

3064
            import paddle.fluid as fluid
3065 3066 3067
            fpn_rois = fluid.layers.data(
                name='data', shape=[4], dtype='float32', lod_level=1)
            multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
3068 3069 3070
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
3071 3072 3073 3074 3075
                refer_level=4,
                refer_scale=224)
    """

    helper = LayerHelper('distribute_fpn_proposals', **locals())
3076
    dtype = helper.input_dtype('fpn_rois')
3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
    num_lvl = max_level - min_level + 1
    multi_rois = [
        helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
    ]
    restore_ind = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type='distribute_fpn_proposals',
        inputs={'FpnRois': fpn_rois},
        outputs={'MultiFpnRois': multi_rois,
                 'RestoreIndex': restore_ind},
        attrs={
            'min_level': min_level,
            'max_level': max_level,
            'refer_level': refer_level,
            'refer_scale': refer_scale
        })
    return multi_rois, restore_ind
3094 3095


3096
@templatedoc()
J
jerrywgz 已提交
3097 3098 3099 3100 3101 3102
def box_decoder_and_assign(prior_box,
                           prior_box_var,
                           target_box,
                           box_score,
                           box_clip,
                           name=None):
3103 3104 3105 3106 3107 3108 3109
    """
    ${comment}
    Args:
        prior_box(${prior_box_type}): ${prior_box_comment}
        prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
        target_box(${target_box_type}): ${target_box_comment}
        box_score(${box_score_type}): ${box_score_comment}
J
jerrywgz 已提交
3110
        box_clip(${box_clip_type}): ${box_clip_comment}
W
wangguanzhong 已提交
3111 3112 3113 3114
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default. 

3115
    Returns:
W
wangguanzhong 已提交
3116
        Tuple:
J
jerrywgz 已提交
3117

W
wangguanzhong 已提交
3118 3119 3120
        decode_box(${decode_box_type}): ${decode_box_comment}

        output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
J
jerrywgz 已提交
3121 3122


3123 3124 3125
    Examples:
        .. code-block:: python

3126
            import paddle.fluid as fluid
J
jerrywgz 已提交
3127
            pb = fluid.layers.data(
J
jerrywgz 已提交
3128
                name='prior_box', shape=[4], dtype='float32')
J
jerrywgz 已提交
3129
            pbv = fluid.layers.data(
J
jerrywgz 已提交
3130 3131
                name='prior_box_var', shape=[4], 
                dtype='float32', append_batch_size=False)
J
jerrywgz 已提交
3132
            loc = fluid.layers.data(
J
jerrywgz 已提交
3133
                name='target_box', shape=[4*81], dtype='float32')
J
jerrywgz 已提交
3134
            scores = fluid.layers.data(
J
jerrywgz 已提交
3135
                name='scores', shape=[81], dtype='float32')
J
jerrywgz 已提交
3136
            decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
J
jerrywgz 已提交
3137
                pb, pbv, loc, scores, 4.135)
3138 3139 3140 3141

    """
    helper = LayerHelper("box_decoder_and_assign", **locals())

J
jerrywgz 已提交
3142
    decoded_box = helper.create_variable_for_type_inference(
3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156
        dtype=prior_box.dtype)
    output_assign_box = helper.create_variable_for_type_inference(
        dtype=prior_box.dtype)

    helper.append_op(
        type="box_decoder_and_assign",
        inputs={
            "PriorBox": prior_box,
            "PriorBoxVar": prior_box_var,
            "TargetBox": target_box,
            "BoxScore": box_score
        },
        attrs={"box_clip": box_clip},
        outputs={
J
jerrywgz 已提交
3157
            "DecodeBox": decoded_box,
3158 3159
            "OutputAssignBox": output_assign_box
        })
J
jerrywgz 已提交
3160
    return decoded_box, output_assign_box
3161 3162 3163 3164 3165 3166 3167 3168 3169


def collect_fpn_proposals(multi_rois,
                          multi_scores,
                          min_level,
                          max_level,
                          post_nms_top_n,
                          name=None):
    """
W
wangguanzhong 已提交
3170 3171 3172
    **This OP only supports LoDTensor as input**. Concat multi-level RoIs 
    (Region of Interest) and select N RoIs with respect to multi_scores. 
    This operation performs the following steps:
3173 3174 3175 3176 3177 3178 3179 3180

    1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
    2. Concat multi-level RoIs and scores
    3. Sort scores and select post_nms_top_n scores
    4. Gather RoIs by selected indices from scores
    5. Re-sort RoIs by corresponding batch_id

    Args:
W
wangguanzhong 已提交
3181 3182 3183 3184 3185 3186
        multi_rois(list): List of RoIs to collect. Element in list is 2-D 
            LoDTensor with shape [N, 4] and data type is float32 or float64, 
            N is the number of RoIs.
        multi_scores(list): List of scores of RoIs to collect. Element in list 
            is 2-D LoDTensor with shape [N, 1] and data type is float32 or
            float64, N is the number of RoIs.
3187 3188 3189
        min_level(int): The lowest level of FPN layer to collect
        max_level(int): The highest level of FPN layer to collect
        post_nms_top_n(int): The number of selected RoIs
W
wangguanzhong 已提交
3190 3191 3192 3193
        name(str, optional): For detailed information, please refer 
            to :ref:`api_guide_Name`. Usually name is no need to set and 
            None by default.        

3194
    Returns:
W
wangguanzhong 已提交
3195 3196 3197 3198 3199
        Variable:

        fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is 
        float32 or float64. Selected RoIs. 

3200 3201 3202 3203

    Examples:
        .. code-block:: python
           
3204
            import paddle.fluid as fluid
3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
            multi_rois = []
            multi_scores = []
            for i in range(4):
                multi_rois.append(fluid.layers.data(
                    name='roi_'+str(i), shape=[4], dtype='float32', lod_level=1))
            for i in range(4):
                multi_scores.append(fluid.layers.data(
                    name='score_'+str(i), shape=[1], dtype='float32', lod_level=1))

            fpn_rois = fluid.layers.collect_fpn_proposals(
                multi_rois=multi_rois, 
                multi_scores=multi_scores,
                min_level=2, 
                max_level=5, 
                post_nms_top_n=2000)
    """

    helper = LayerHelper('collect_fpn_proposals', **locals())
    dtype = helper.input_dtype('multi_rois')
    num_lvl = max_level - min_level + 1
    input_rois = multi_rois[:num_lvl]
    input_scores = multi_scores[:num_lvl]
    output_rois = helper.create_variable_for_type_inference(dtype)
    output_rois.stop_gradient = True
    helper.append_op(
        type='collect_fpn_proposals',
        inputs={
            'MultiLevelRois': input_rois,
            'MultiLevelScores': input_scores
        },
        outputs={'FpnRois': output_rois},
        attrs={'post_nms_topN': post_nms_top_n})
    return output_rois