ops.py 96.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
16 17

from paddle import _C_ops, _legacy_C_ops
18
from paddle.tensor.math import _add_with_axis
19

20
from ..fluid.data_feeder import check_type, check_variable_and_dtype
21
from ..fluid.framework import (
22
    Variable,
23
    _in_legacy_dygraph,
24 25 26
    _non_static_mode,
    in_dygraph_mode,
)
27 28
from ..fluid.initializer import Normal
from ..fluid.layer_helper import LayerHelper
29
from ..fluid.layers import utils
Y
YuanRisheng 已提交
30
from ..framework import _current_expected_place
31
from ..nn import BatchNorm2D, Conv2D, Layer, ReLU, Sequential
32

33 34 35
__all__ = [  # noqa
    'yolo_loss',
    'yolo_box',
36 37
    'prior_box',
    'box_coder',
38 39 40 41 42 43 44 45 46 47 48 49 50 51
    'deform_conv2d',
    'DeformConv2D',
    'distribute_fpn_proposals',
    'generate_proposals',
    'read_file',
    'decode_jpeg',
    'roi_pool',
    'RoIPool',
    'psroi_pool',
    'PSRoIPool',
    'roi_align',
    'RoIAlign',
    'nms',
    'matrix_nms',
52
]
53 54


55 56 57 58 59 60 61 62 63 64 65 66 67 68
def yolo_loss(
    x,
    gt_box,
    gt_label,
    anchors,
    anchor_mask,
    class_num,
    ignore_thresh,
    downsample_ratio,
    gt_score=None,
    use_label_smooth=True,
    name=None,
    scale_x_y=1.0,
):
69
    r"""
70 71 72

    This operator generates YOLOv3 loss based on given predict result and ground
    truth boxes.
73

74
    The output of previous network is in shape [N, C, H, W], while H and W
75
    should be the same, H and W specify the grid size, each grid point predict
76 77
    given number bounding boxes, this given number, which following will be represented as S,
    is specified by the number of anchor clusters in each scale. In the second dimension(the channel
78 79 80
    dimension), C should be equal to S * (class_num + 5), class_num is the object
    category number of source dataset(such as 80 in coco dataset), so in the
    second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
    also includes confidence score of the box and class one-hot key of each anchor box.

    Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box predictions
    should be as follows:

    $$
    b_x = \\sigma(t_x) + c_x
    $$
    $$
    b_y = \\sigma(t_y) + c_y
    $$
    $$
    b_w = p_w e^{t_w}
    $$
    $$
    b_h = p_h e^{t_h}
    $$

    In the equation above, :math:`c_x, c_y` is the left top corner of current grid
    and :math:`p_w, p_h` is specified by anchors.

    As for confidence score, it is the logistic regression value of IoU between
103 104
    anchor boxes and ground truth boxes, the score of the anchor box which has
    the max IoU should be 1, and if the anchor box has IoU bigger than ignore
105 106 107
    thresh, the confidence score loss of this anchor box will be ignored.

    Therefore, the YOLOv3 loss consists of three major parts: box location loss,
108 109
    objectness loss and classification loss. The L1 loss is used for
    box coordinates (w, h), sigmoid cross entropy loss is used for box
110 111
    coordinates (x, y), objectness loss and classification loss.

112
    Each groud truth box finds a best matching anchor box in all anchors.
113 114 115 116
    Prediction of this anchor box will incur all three parts of losses, and
    prediction of anchor boxes with no GT box matched will only incur objectness
    loss.

117
    In order to trade off box coordinate losses between big boxes and small
118 119 120 121 122 123 124 125 126 127
    boxes, box coordinate losses will be mutiplied by scale weight, which is
    calculated as follows.

    $$
    weight_{box} = 2.0 - t_w * t_h
    $$

    Final loss will be represented as follows.

    $$
S
sunzhongkai588 已提交
128
    loss = (loss_{xy} + loss_{wh}) * weight_{box} + loss_{conf} + loss_{class}
129 130 131
    $$

    While :attr:`use_label_smooth` is set to be :attr:`True`, the classification
132
    target will be smoothed when calculating classification loss, target of
133 134 135
    positive samples will be smoothed to :math:`1.0 - 1.0 / class\_num` and target of
    negetive samples will be smoothed to :math:`1.0 / class\_num`.

136 137
    While :attr:`gt_score` is given, which means the mixup score of ground truth
    boxes, all losses incured by a ground truth box will be multiplied by its
138 139 140 141 142 143 144
    mixup score.

    Args:
        x (Tensor): The input tensor of YOLOv3 loss operator, This is a 4-D
                      tensor with shape of [N, C, H, W]. H and W should be same,
                      and the second dimension(C) stores box locations, confidence
                      score and classification one-hot keys of each anchor box.
145
                      The data type is float32 or float64.
146
        gt_box (Tensor): groud truth boxes, should be in shape of [N, B, 4],
147
                          in the third dimension, x, y, w, h should be stored.
148
                          x,y is the center coordinate of boxes, w, h are the
149
                          width and height, x, y, w, h should be divided by
150
                          input image height to scale to [0, 1].
151 152
                          N is the batch number and B is the max box number in
                          an image.The data type is float32 or float64.
153
        gt_label (Tensor): class id of ground truth boxes, should be in shape
154
                            of [N, B].The data type is int32.
155 156 157 158 159 160 161 162
        anchors (list|tuple): The anchor width and height, it will be parsed
                              pair by pair.
        anchor_mask (list|tuple): The mask index of anchors used in current
                                  YOLOv3 loss calculation.
        class_num (int): The number of classes.
        ignore_thresh (float): The ignore threshold to ignore confidence loss.
        downsample_ratio (int): The downsample ratio from network input to YOLOv3
                                loss input, so 32, 16, 8 should be set for the
163
                                first, second, and thrid YOLOv3 loss operators.
164
        gt_score (Tensor, optional): mixup score of ground truth boxes, should be in shape
165
                            of [N, B]. Default None.
166 167 168 169 170 171
        use_label_smooth (bool, optional): Whether to use label smooth. Default True.
        name (str, optional): The default value is None. Normally there is no need
                       for user to set this property. For more information,
                       please refer to :ref:`api_guide_Name`
        scale_x_y (float, optional): Scale the center point of decoded bounding box.
                           Default 1.0.
172 173 174 175 176 177 178 179 180

    Returns:
        Tensor: A 1-D tensor with shape [N], the value of yolov3 loss

    Examples:
      .. code-block:: python

          import paddle

181 182 183
          x = paddle.rand([2, 14, 8, 8]).astype('float32')
          gt_box = paddle.rand([2, 10, 4]).astype('float32')
          gt_label = paddle.rand([2, 10]).astype('int32')
184 185 186 187 188 189 190 191 192 193 194 195 196 197


          loss = paddle.vision.ops.yolo_loss(x,
                                             gt_box=gt_box,
                                             gt_label=gt_label,
                                             anchors=[10, 13, 16, 30],
                                             anchor_mask=[0, 1],
                                             class_num=2,
                                             ignore_thresh=0.7,
                                             downsample_ratio=8,
                                             use_label_smooth=True,
                                             scale_x_y=1.)
    """

198
    if in_dygraph_mode():
199
        loss, _, _ = _C_ops.yolo_loss(
200 201 202 203 204 205 206 207 208 209 210 211
            x,
            gt_box,
            gt_label,
            gt_score,
            anchors,
            anchor_mask,
            class_num,
            ignore_thresh,
            downsample_ratio,
            use_label_smooth,
            scale_x_y,
        )
212 213
        return loss

214
    if _non_static_mode():
215
        loss, _, _ = _legacy_C_ops.yolov3_loss(
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
            x,
            gt_box,
            gt_label,
            gt_score,
            'anchors',
            anchors,
            'anchor_mask',
            anchor_mask,
            'class_num',
            class_num,
            'ignore_thresh',
            ignore_thresh,
            'downsample_ratio',
            downsample_ratio,
            'use_label_smooth',
            use_label_smooth,
            'scale_x_y',
            scale_x_y,
        )
235 236 237 238 239
        return loss

    helper = LayerHelper('yolov3_loss', **locals())

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss')
240 241 242
    check_variable_and_dtype(
        gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss'
    )
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss')
    check_type(anchors, 'anchors', (list, tuple), 'yolo_loss')
    check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss')
    check_type(class_num, 'class_num', int, 'yolo_loss')
    check_type(ignore_thresh, 'ignore_thresh', float, 'yolo_loss')
    check_type(use_label_smooth, 'use_label_smooth', bool, 'yolo_loss')

    loss = helper.create_variable_for_type_inference(dtype=x.dtype)

    objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
    gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')

    inputs = {
        "X": x,
        "GTBox": gt_box,
        "GTLabel": gt_label,
    }
    if gt_score is not None:
        inputs["GTScore"] = gt_score

    attrs = {
        "anchors": anchors,
        "anchor_mask": anchor_mask,
        "class_num": class_num,
        "ignore_thresh": ignore_thresh,
        "downsample_ratio": downsample_ratio,
        "use_label_smooth": use_label_smooth,
        "scale_x_y": scale_x_y,
    }

273 274 275 276 277 278 279 280 281 282
    helper.append_op(
        type='yolov3_loss',
        inputs=inputs,
        outputs={
            'Loss': loss,
            'ObjectnessMask': objectness_mask,
            'GTMatchMask': gt_match_mask,
        },
        attrs=attrs,
    )
283 284 285
    return loss


286 287 288 289 290 291 292 293 294 295 296 297 298
def yolo_box(
    x,
    img_size,
    anchors,
    class_num,
    conf_thresh,
    downsample_ratio,
    clip_bbox=True,
    name=None,
    scale_x_y=1.0,
    iou_aware=False,
    iou_aware_factor=0.5,
):
299
    r"""
300 301

    This operator generates YOLO detection boxes from output of YOLOv3 network.
302

303
    The output of previous network is in shape [N, C, H, W], while H and W
304
    should be the same, H and W specify the grid size, each grid point predict
305 306
    given number boxes, this given number, which following will be represented as S,
    is specified by the number of anchors. In the second dimension(the channel
307 308
    dimension), C should be equal to S * (5 + class_num) if :attr:`iou_aware` is false,
    otherwise C should be equal to S * (6 + class_num). class_num is the object
309 310 311
    category number of source dataset(such as 80 in coco dataset), so the
    second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
    also includes confidence score of the box and class one-hot key of each anchor
312 313
    box.

314
    Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
    predictions should be as follows:

    $$
    b_x = \\sigma(t_x) + c_x
    $$
    $$
    b_y = \\sigma(t_y) + c_y
    $$
    $$
    b_w = p_w e^{t_w}
    $$
    $$
    b_h = p_h e^{t_h}
    $$

    in the equation above, :math:`c_x, c_y` is the left top corner of current grid
    and :math:`p_w, p_h` is specified by anchors.

    The logistic regression value of the 5th channel of each anchor prediction boxes
    represents the confidence score of each prediction box, and the logistic
335
    regression value of the last :attr:`class_num` channels of each anchor prediction
336
    boxes represents the classifcation scores. Boxes with confidence scores less than
337
    :attr:`conf_thresh` should be ignored, and box final scores is the product of
338 339 340 341 342 343
    confidence scores and classification scores.

    $$
    score_{pred} = score_{conf} * score_{class}
    $$

344

345 346 347 348 349
    Args:
        x (Tensor): The input tensor of YoloBox operator is a 4-D tensor with
                      shape of [N, C, H, W]. The second dimension(C) stores box
                      locations, confidence score and classification one-hot keys
                      of each anchor box. Generally, X should be the output of
350
                      YOLOv3 network. The data type is float32 or float64.
351 352 353
        img_size (Tensor): The image size tensor of YoloBox operator, This is a
                           2-D tensor with shape of [N, 2]. This tensor holds
                           height and width of each input image used for resizing
354
                           output box in input image scale. The data type is int32.
355 356 357 358 359 360 361 362 363 364
        anchors (list|tuple): The anchor width and height, it will be parsed pair
                              by pair.
        class_num (int): The number of classes.
        conf_thresh (float): The confidence scores threshold of detection boxes.
                             Boxes with confidence scores under threshold should
                             be ignored.
        downsample_ratio (int): The downsample ratio from network input to
                                :attr:`yolo_box` operator input, so 32, 16, 8
                                should be set for the first, second, and thrid
                                :attr:`yolo_box` layer.
365
        clip_bbox (bool, optional): Whether clip output bonding box in :attr:`img_size`
366
                          boundary. Default true.
367 368 369 370 371 372
        name (str, optional): The default value is None. Normally there is no need
                       for user to set this property. For more information,
                       please refer to :ref:`api_guide_Name`.
        scale_x_y (float, optional): Scale the center point of decoded bounding box. Default 1.0
        iou_aware (bool, optional): Whether use iou aware. Default false.
        iou_aware_factor (float, optional): iou aware factor. Default 0.5.
373 374 375

    Returns:
        Tensor: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
376
        and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
377 378 379 380 381 382 383 384
        scores of boxes.

    Examples:

    .. code-block:: python

        import paddle

385 386
        x = paddle.rand([2, 14, 8, 8]).astype('float32')
        img_size = paddle.ones((2, 2)).astype('int32')
387 388 389 390 391 392 393 394 395 396

        boxes, scores = paddle.vision.ops.yolo_box(x,
                                                   img_size=img_size,
                                                   anchors=[10, 13, 16, 30],
                                                   class_num=2,
                                                   conf_thresh=0.01,
                                                   downsample_ratio=8,
                                                   clip_bbox=True,
                                                   scale_x_y=1.)
    """
H
hong 已提交
397
    if in_dygraph_mode():
398 399 400 401 402 403 404 405 406 407 408 409
        boxes, scores = _C_ops.yolo_box(
            x,
            img_size,
            anchors,
            class_num,
            conf_thresh,
            downsample_ratio,
            clip_bbox,
            scale_x_y,
            iou_aware,
            iou_aware_factor,
        )
H
hong 已提交
410 411
        return boxes, scores

J
Jiabin Yang 已提交
412
    if _non_static_mode():
413
        boxes, scores = _legacy_C_ops.yolo_box(
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
            x,
            img_size,
            'anchors',
            anchors,
            'class_num',
            class_num,
            'conf_thresh',
            conf_thresh,
            'downsample_ratio',
            downsample_ratio,
            'clip_bbox',
            clip_bbox,
            'scale_x_y',
            scale_x_y,
            'iou_aware',
            iou_aware,
            'iou_aware_factor',
            iou_aware_factor,
        )
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
        return boxes, scores

    helper = LayerHelper('yolo_box', **locals())

    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_box')
    check_variable_and_dtype(img_size, 'img_size', 'int32', 'yolo_box')
    check_type(anchors, 'anchors', (list, tuple), 'yolo_box')
    check_type(conf_thresh, 'conf_thresh', float, 'yolo_box')

    boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
    scores = helper.create_variable_for_type_inference(dtype=x.dtype)

    attrs = {
        "anchors": anchors,
        "class_num": class_num,
        "conf_thresh": conf_thresh,
        "downsample_ratio": downsample_ratio,
        "clip_bbox": clip_bbox,
        "scale_x_y": scale_x_y,
452
        "iou_aware": iou_aware,
453
        "iou_aware_factor": iou_aware_factor,
454 455
    }

456 457 458 459 460 461 462 463 464 465 466 467
    helper.append_op(
        type='yolo_box',
        inputs={
            "X": x,
            "ImgSize": img_size,
        },
        outputs={
            'Boxes': boxes,
            'Scores': scores,
        },
        attrs=attrs,
    )
468
    return boxes, scores
469 470


471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
def prior_box(
    input,
    image,
    min_sizes,
    max_sizes=None,
    aspect_ratios=[1.0],
    variance=[0.1, 0.1, 0.2, 0.2],
    flip=False,
    clip=False,
    steps=[0.0, 0.0],
    offset=0.5,
    min_max_aspect_ratios_order=False,
    name=None,
):
    r"""

    This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.

    Each position of the input produce N prior boxes, N is determined by
    the count of min_sizes, max_sizes and aspect_ratios, The size of the
    box is in range(min_size, max_size) interval, which is generated in
    sequence according to the aspect_ratios.

    Args:
       input (Tensor): 4-D tensor(NCHW), the data type should be float32 or float64.
       image (Tensor): 4-D tensor(NCHW), the input image data of PriorBoxOp,
            the data type should be float32 or float64.
       min_sizes (list|tuple|float): the min sizes of generated prior boxes.
       max_sizes (list|tuple|None, optional): the max sizes of generated prior boxes.
            Default: None, means [] and will not be used.
       aspect_ratios (list|tuple|float, optional): the aspect ratios of generated
            prior boxes. Default: [1.0].
       variance (list|tuple, optional): the variances to be encoded in prior boxes.
            Default:[0.1, 0.1, 0.2, 0.2].
       flip (bool): Whether to flip aspect ratios. Default:False.
       clip (bool): Whether to clip out-of-boundary boxes. Default: False.
       steps (list|tuple, optional): Prior boxes steps across width and height, If
            steps[0] equals to 0.0 or steps[1] equals to 0.0, the prior boxes steps across
            height or weight of the input will be automatically calculated.
            Default: [0., 0.]
       offset (float, optional)): Prior boxes center offset. Default: 0.5
       min_max_aspect_ratios_order (bool, optional): If set True, the output prior box is
            in order of [min, max, aspect_ratios], which is consistent with
            Caffe. Please note, this order affects the weights order of
            convolution layer followed by and does not affect the final
            detection results. Default: False.
       name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`

    Returns:
        Tensor: the output prior boxes and the expanded variances of PriorBox.
            The prior boxes is a 4-D tensor, the layout is [H, W, num_priors, 4],
            num_priors is the total box count of each position of input.
            The expanded variances is a 4-D tensor, same shape as the prior boxes.

    Examples:
        .. code-block:: python

            import paddle

            input = paddle.rand((1, 3, 6, 9), dtype=paddle.float32)
            image = paddle.rand((1, 3, 9, 12), dtype=paddle.float32)

            box, var = paddle.vision.ops.prior_box(
                input=input,
                image=image,
                min_sizes=[2.0, 4.0],
                clip=True,
                flip=True)

    """
    helper = LayerHelper("prior_box", **locals())
    dtype = helper.input_dtype()
    check_variable_and_dtype(
        input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box'
    )

    def _is_list_or_tuple_(data):
        return isinstance(data, list) or isinstance(data, tuple)

    if not _is_list_or_tuple_(min_sizes):
        min_sizes = [min_sizes]
    if not _is_list_or_tuple_(aspect_ratios):
        aspect_ratios = [aspect_ratios]
    if not _is_list_or_tuple_(steps):
        steps = [steps]
    if not len(steps) == 2:
        raise ValueError('steps should be (step_w, step_h)')

    min_sizes = list(map(float, min_sizes))
    aspect_ratios = list(map(float, aspect_ratios))
    steps = list(map(float, steps))

    cur_max_sizes = None
    if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
        if not _is_list_or_tuple_(max_sizes):
            max_sizes = [max_sizes]
        cur_max_sizes = max_sizes

    if in_dygraph_mode():
        step_w, step_h = steps
572
        if max_sizes is None:
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
            max_sizes = []
        box, var = _C_ops.prior_box(
            input,
            image,
            min_sizes,
            aspect_ratios,
            variance,
            max_sizes,
            flip,
            clip,
            step_w,
            step_h,
            offset,
            min_max_aspect_ratios_order,
        )
        return box, var

    if _in_legacy_dygraph():
        attrs = (
            'min_sizes',
            min_sizes,
            'aspect_ratios',
            aspect_ratios,
            'variances',
            variance,
            'flip',
            flip,
            'clip',
            clip,
            'step_w',
            steps[0],
            'step_h',
            steps[1],
            'offset',
            offset,
            'min_max_aspect_ratios_order',
            min_max_aspect_ratios_order,
        )
        if cur_max_sizes is not None:
            attrs += ('max_sizes', cur_max_sizes)
        box, var = _legacy_C_ops.prior_box(input, image, *attrs)
        return box, var
    else:
        attrs = {
            'min_sizes': min_sizes,
            'aspect_ratios': aspect_ratios,
            'variances': variance,
            'flip': flip,
            'clip': clip,
            'step_w': steps[0],
            'step_h': steps[1],
            'offset': offset,
            'min_max_aspect_ratios_order': min_max_aspect_ratios_order,
        }
        if cur_max_sizes is not None:
            attrs['max_sizes'] = cur_max_sizes

        box = helper.create_variable_for_type_inference(dtype)
        var = helper.create_variable_for_type_inference(dtype)
        helper.append_op(
            type="prior_box",
            inputs={"Input": input, "Image": image},
            outputs={"Boxes": box, "Variances": var},
            attrs=attrs,
        )
        box.stop_gradient = True
        var.stop_gradient = True
        return box, var


def box_coder(
    prior_box,
    prior_box_var,
    target_box,
    code_type="encode_center_size",
    box_normalized=True,
    axis=0,
    name=None,
):
    r"""
    Encode/Decode the target bounding box with the priorbox information.

    The Encoding schema described below:

    .. math::

        ox &= (tx - px) / pw / pxv

        oy &= (ty - py) / ph / pyv

        ow &= log(abs(tw / pw)) / pwv

        oh &= log(abs(th / ph)) / phv

    The Decoding schema described below:

    .. math::

        ox &= (pw * pxv * tx * + px) - tw / 2

        oy &= (ph * pyv * ty * + py) - th / 2

        ow &= exp(pwv * tw) * pw + tw / 2

        oh &= exp(phv * th) * ph + th / 2

    where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
    width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
    the priorbox's (anchor) center coordinates, width and height. `pxv`,
    `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
    `ow`, `oh` denote the encoded/decoded coordinates, width and height.
    During Box Decoding, two modes for broadcast are supported. Say target
    box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
    [M, 4]. Then prior box will broadcast to target box along the
    assigned axis.

    Args:
        prior_box (Tensor): Box list prior_box is a 2-D Tensor with shape
            [M, 4] holds M boxes and data type is float32 or float64. Each box
            is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
            left top coordinate of the anchor box, if the input is image feature
            map, they are close to the origin of the coordinate system.
            [xmax, ymax] is the right bottom coordinate of the anchor box.
        prior_box_var (List|Tensor|None): prior_box_var supports three types
            of input. One is Tensor with shape [M, 4] which holds M group and
            data type is float32 or float64. The second is list consist of
            4 elements shared by all boxes and data type is float32 or float64.
            Other is None and not involved in calculation.
        target_box (Tensor): This input can be a 2-D LoDTensor with shape
            [N, 4] when code_type is 'encode_center_size'. This input also can
            be a 3-D Tensor with shape [N, M, 4] when code_type is
            'decode_center_size'. Each box is represented as
            [xmin, ymin, xmax, ymax]. The data type is float32 or float64.
        code_type (str, optional): The code type used with the target box. It can be
            `encode_center_size` or `decode_center_size`. `encode_center_size`
            by default.
        box_normalized (bool, optional): Whether treat the priorbox as a normalized box.
            Set true by default.
        axis (int, optional): Which axis in PriorBox to broadcast for box decode,
            for example, if axis is 0 and TargetBox has shape [N, M, 4] and
            PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
            for decoding. It is only valid when code type is
            `decode_center_size`. Set 0 by default.
        name (str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

    Returns:
        Tensor: output boxes, when code_type is 'encode_center_size', the
            output tensor of box_coder_op with shape [N, M, 4] representing the
            result of N target boxes encoded with M Prior boxes and variances.
            When code_type is 'decode_center_size', N represents the batch size
            and M represents the number of decoded boxes.

    Examples:
        .. code-block:: python

            import paddle

            # For encode
            prior_box_encode = paddle.rand((80, 4), dtype=paddle.float32)
            prior_box_var_encode = paddle.rand((80, 4), dtype=paddle.float32)
            target_box_encode = paddle.rand((20, 4), dtype=paddle.float32)
            output_encode = paddle.vision.ops.box_coder(
                prior_box=prior_box_encode,
                prior_box_var=prior_box_var_encode,
                target_box=target_box_encode,
                code_type="encode_center_size")

            # For decode
            prior_box_decode = paddle.rand((80, 4), dtype=paddle.float32)
            prior_box_var_decode = paddle.rand((80, 4), dtype=paddle.float32)
            target_box_decode = paddle.rand((20, 80, 4), dtype=paddle.float32)
            output_decode = paddle.vision.ops.box_coder(
                prior_box=prior_box_decode,
                prior_box_var=prior_box_var_decode,
                target_box=target_box_decode,
                code_type="decode_center_size",
                box_normalized=False)

    """
    check_variable_and_dtype(
        prior_box, 'prior_box', ['float32', 'float64'], 'box_coder'
    )
    check_variable_and_dtype(
        target_box, 'target_box', ['float32', 'float64'], 'box_coder'
    )

    if in_dygraph_mode():
        if isinstance(prior_box_var, Variable):
            output_box = _C_ops.box_coder(
                prior_box,
                prior_box_var,
                target_box,
                code_type,
                box_normalized,
                axis,
                [],
            )
        elif isinstance(prior_box_var, list):
            output_box = _C_ops.box_coder(
                prior_box,
                None,
                target_box,
                code_type,
                box_normalized,
                axis,
                prior_box_var,
            )
        else:
            raise TypeError("Input prior_box_var must be Variable or list")
        return output_box

    if _in_legacy_dygraph():
        if isinstance(prior_box_var, Variable):
            output_box = _legacy_C_ops.box_coder(
                prior_box,
                prior_box_var,
                target_box,
                "code_type",
                code_type,
                "box_normalized",
                box_normalized,
                "axis",
                axis,
            )
        elif isinstance(prior_box_var, list):
            output_box = _legacy_C_ops.box_coder(
                prior_box,
                None,
                target_box,
                "code_type",
                code_type,
                "box_normalized",
                box_normalized,
                "axis",
                axis,
                "variance",
                prior_box_var,
            )
        else:
            raise TypeError("Input prior_box_var must be Variable or list")
        return output_box
    else:
        helper = LayerHelper("box_coder", **locals())

        output_box = helper.create_variable_for_type_inference(
            dtype=prior_box.dtype
        )

        inputs = {"PriorBox": prior_box, "TargetBox": target_box}
        attrs = {
            "code_type": code_type,
            "box_normalized": box_normalized,
            "axis": axis,
        }
        if isinstance(prior_box_var, Variable):
            inputs['PriorBoxVar'] = prior_box_var
        elif isinstance(prior_box_var, list):
            attrs['variance'] = prior_box_var
        else:
            raise TypeError("Input prior_box_var must be Variable or list")
        helper.append_op(
            type="box_coder",
            inputs=inputs,
            attrs=attrs,
            outputs={"OutputBox": output_box},
        )
        return output_box


844 845 846 847 848 849 850 851 852 853 854 855 856
def deform_conv2d(
    x,
    offset,
    weight,
    bias=None,
    stride=1,
    padding=0,
    dilation=1,
    deformable_groups=1,
    groups=1,
    mask=None,
    name=None,
):
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
    r"""
    Compute 2-D deformable convolution on 4-D input.
    Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:


    Deformable Convolution v2:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}

    Deformable Convolution v1:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}

    Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
    Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
    <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.

    Example:
        - Input:

          x shape: :math:`(N, C_{in}, H_{in}, W_{in})`

          weight shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

          offset shape: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`

          mask shape: :math:`(N, H_f * W_f, H_{out}, W_{out})`

        - Output:

          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

        Where

        .. math::

897 898
            H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
            W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
899 900 901 902 903 904 905 906 907

    Args:
        x (Tensor): The input image with [N, C, H, W] format. A Tensor with type
            float32, float64.
        offset (Tensor): The input coordinate offset of deformable convolution layer.
            A Tensor with type float32, float64.
        weight (Tensor): The convolution kernel with shape [M, C/g, kH, kW], where M is
            the number of output channels, g is the number of groups, kH is the filter's
            height, kW is the filter's width.
908
        bias (Tensor, optional): The bias with shape [M,]. Default: None.
909
        stride (int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
910
            contain two integers, (stride_H, stride_W). Otherwise, the
911
            stride_H = stride_W = stride. Default: 1.
912
        padding (int|list|tuple, optional): The padding size. If padding is a list/tuple, it must
913
            contain two integers, (padding_H, padding_W). Otherwise, the
914
            padding_H = padding_W = padding. Default: 0.
915
        dilation (int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
916
            contain two integers, (dilation_H, dilation_W). Otherwise, the
917
            dilation_H = dilation_W = dilation. Default: 1.
918
        deformable_groups (int): The number of deformable group partitions.
919
            Default: 1.
920 921 922 923
        groups (int, optonal): The groups number of the deformable conv layer. According to
            grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
924
            connected to the second half of the input channels. Default: 1.
925 926
        mask (Tensor, optional): The input mask of deformable convolution layer.
            A Tensor with type float32, float64. It should be None when you use
927
            deformable convolution v1. Default: None.
928 929 930
        name(str, optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
    Returns:
931 932
        Tensor: 4-D Tensor storing the deformable convolution result.\
            A Tensor with type float32, float64.
933

934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
    Examples:
        .. code-block:: python

          #deformable conv v2:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          weight = paddle.rand((16, 1, kh, kw))
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # mask shape should be [bs, hw * hw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          mask = paddle.rand((8, kh * kw, 26, 26))
          out = paddle.vision.ops.deform_conv2d(input, offset, weight, mask=mask)
          print(out.shape)
          # returns
          [8, 16, 26, 26]

          #deformable conv v1:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          weight = paddle.rand((16, 1, kh, kw))
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          out = paddle.vision.ops.deform_conv2d(input, offset, weight)
          print(out.shape)
          # returns
          [8, 16, 26, 26]
    """
    stride = utils.convert_to_list(stride, 2, 'stride')
    padding = utils.convert_to_list(padding, 2, 'padding')
    dilation = utils.convert_to_list(dilation, 2, 'dilation')

    use_deform_conv2d_v1 = True if mask is None else False

975
    if in_dygraph_mode():
976 977 978 979 980 981 982 983 984 985 986 987
        pre_bias = _C_ops.deformable_conv(
            x,
            offset,
            weight,
            mask,
            stride,
            padding,
            dilation,
            deformable_groups,
            groups,
            1,
        )
988
        if bias is not None:
989
            out = _add_with_axis(pre_bias, bias, axis=1)
990 991 992
        else:
            out = pre_bias
    elif _in_legacy_dygraph():
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
        attrs = (
            'strides',
            stride,
            'paddings',
            padding,
            'dilations',
            dilation,
            'deformable_groups',
            deformable_groups,
            'groups',
            groups,
            'im2col_step',
            1,
        )
1007 1008
        if use_deform_conv2d_v1:
            op_type = 'deformable_conv_v1'
1009 1010 1011
            pre_bias = getattr(_legacy_C_ops, op_type)(
                x, offset, weight, *attrs
            )
1012 1013
        else:
            op_type = 'deformable_conv'
1014 1015 1016
            pre_bias = getattr(_legacy_C_ops, op_type)(
                x, offset, mask, weight, *attrs
            )
1017
        if bias is not None:
1018
            out = _add_with_axis(pre_bias, bias, axis=1)
1019 1020 1021
        else:
            out = pre_bias
    else:
1022 1023 1024 1025 1026 1027
        check_variable_and_dtype(
            x, "x", ['float32', 'float64'], 'deform_conv2d'
        )
        check_variable_and_dtype(
            offset, "offset", ['float32', 'float64'], 'deform_conv2d'
        )
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061

        num_channels = x.shape[1]

        helper = LayerHelper('deformable_conv', **locals())
        dtype = helper.input_dtype()

        stride = utils.convert_to_list(stride, 2, 'stride')
        padding = utils.convert_to_list(padding, 2, 'padding')
        dilation = utils.convert_to_list(dilation, 2, 'dilation')

        pre_bias = helper.create_variable_for_type_inference(dtype)

        if use_deform_conv2d_v1:
            op_type = 'deformable_conv_v1'
            inputs = {
                'Input': x,
                'Filter': weight,
                'Offset': offset,
            }
        else:
            op_type = 'deformable_conv'
            inputs = {
                'Input': x,
                'Filter': weight,
                'Offset': offset,
                'Mask': mask,
            }

        outputs = {"Output": pre_bias}
        attrs = {
            'strides': stride,
            'paddings': padding,
            'dilations': dilation,
            'groups': groups,
1062
            'deformable_groups': deformable_groups,
1063 1064
            'im2col_step': 1,
        }
1065 1066 1067
        helper.append_op(
            type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
        )
1068 1069 1070

        if bias is not None:
            out = helper.create_variable_for_type_inference(dtype)
1071 1072 1073 1074 1075 1076
            helper.append_op(
                type='elementwise_add',
                inputs={'X': [pre_bias], 'Y': [bias]},
                outputs={'Out': [out]},
                attrs={'axis': 1},
            )
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
        else:
            out = pre_bias
    return out


class DeformConv2D(Layer):
    r"""
    Compute 2-D deformable convolution on 4-D input.
    Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:


    Deformable Convolution v2:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}

    Deformable Convolution v1:

    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}

    Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
    Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
    <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.

    Example:
        - Input:

          x shape: :math:`(N, C_{in}, H_{in}, W_{in})`

          weight shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

          offset shape: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`

          mask shape: :math:`(N, H_f * W_f, H_{out}, W_{out})`

        - Output:

          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

        Where

        .. math::

S
sunzhongkai588 已提交
1123 1124
            H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
            W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
1125 1126 1127 1128 1129 1130


    Parameters:
        in_channels(int): The number of input channels in the input image.
        out_channels(int): The number of output channels produced by the convolution.
        kernel_size(int|list|tuple): The size of the convolving kernel.
1131
        stride(int|list|tuple, optional): The stride size. If stride is a list/tuple, it must
1132 1133
            contain three integers, (stride_H, stride_W). Otherwise, the
            stride_H = stride_W = stride. The default value is 1.
1134
        padding (int|list|tuple, optional): The padding size. If padding is a list/tuple, it must
1135 1136
            contain two integers, (padding_H, padding_W). Otherwise, the
            padding_H = padding_W = padding. Default: padding = 0.
1137
        dilation(int|list|tuple, optional): The dilation size. If dilation is a list/tuple, it must
1138 1139
            contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the
            dilation_D = dilation_H = dilation_W = dilation. The default value is 1.
1140
        deformable_groups (int, optional): The number of deformable group partitions.
1141
            Default: deformable_groups = 1.
1142 1143 1144 1145 1146 1147 1148 1149 1150
        groups(int, optional): The groups number of the Conv3D Layer. According to grouped
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. The default value is 1.
        weight_attr(ParamAttr, optional): The parameter attribute for learnable parameters/weights
            of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as param_attr. If it is set to None, the parameter
            is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
S
sunzhongkai588 已提交
1151
            :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. The default value is None.
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
        bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of conv2d.
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. The default value is None.
    Attribute:
        **weight** (Parameter): the learnable weights of filter of this layer.
        **bias** (Parameter or None): the learnable bias of this layer.
    Shape:
        - x: :math:`(N, C_{in}, H_{in}, W_{in})`
        - offset: :math:`(N, 2 * H_f * W_f, H_{out}, W_{out})`
        - mask: :math:`(N, H_f * W_f, H_{out}, W_{out})`
        - output: :math:`(N, C_{out}, H_{out}, W_{out})`
1165

1166
        Where
1167

1168
        ..  math::
S
sunzhongkai588 已提交
1169 1170 1171 1172

            H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (kernel\_size[0] - 1) + 1))}{strides[0]} + 1 \\
            W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (kernel\_size[1] - 1) + 1))}{strides[1]} + 1

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
    Examples:
        .. code-block:: python

          #deformable conv v2:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # mask shape should be [bs, hw * hw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          mask = paddle.rand((8, kh * kw, 26, 26))
          deform_conv = paddle.vision.ops.DeformConv2D(
              in_channels=1,
              out_channels=16,
              kernel_size=[kh, kw])
          out = deform_conv(input, offset, mask)
          print(out.shape)
          # returns
          [8, 16, 26, 26]

          #deformable conv v1:

          import paddle
          input = paddle.rand((8, 1, 28, 28))
          kh, kw = 3, 3
          # offset shape should be [bs, 2 * kh * kw, out_h, out_w]
          # mask shape should be [bs, hw * hw, out_h, out_w]
          # In this case, for an input of 28, stride of 1
          # and kernel size of 3, without padding, the output size is 26
          offset = paddle.rand((8, 2 * kh * kw, 26, 26))
          deform_conv = paddle.vision.ops.DeformConv2D(
              in_channels=1,
              out_channels=16,
              kernel_size=[kh, kw])
          out = deform_conv(input, offset)
          print(out.shape)
          # returns
          [8, 16, 26, 26]
    """

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        stride=1,
        padding=0,
        dilation=1,
        deformable_groups=1,
        groups=1,
        weight_attr=None,
        bias_attr=None,
    ):
1229
        super().__init__()
1230 1231 1232
        assert (
            weight_attr is not False
        ), "weight_attr should not be False in Conv."
1233 1234
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr
1235
        self._deformable_groups = deformable_groups
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
        self._groups = groups
        self._in_channels = in_channels
        self._out_channels = out_channels
        self._channel_dim = 1

        self._stride = utils.convert_to_list(stride, 2, 'stride')
        self._dilation = utils.convert_to_list(dilation, 2, 'dilation')
        self._kernel_size = utils.convert_to_list(kernel_size, 2, 'kernel_size')

        if in_channels % groups != 0:
            raise ValueError("in_channels must be divisible by groups.")

        self._padding = utils.convert_to_list(padding, 2, 'padding')

        filter_shape = [out_channels, in_channels // groups] + self._kernel_size

        def _get_default_param_initializer():
            filter_elem_num = np.prod(self._kernel_size) * self._in_channels
1254
            std = (2.0 / filter_elem_num) ** 0.5
1255 1256 1257 1258 1259
            return Normal(0.0, std, 0)

        self.weight = self.create_parameter(
            shape=filter_shape,
            attr=self._weight_attr,
1260 1261 1262 1263 1264
            default_initializer=_get_default_param_initializer(),
        )
        self.bias = self.create_parameter(
            attr=self._bias_attr, shape=[self._out_channels], is_bias=True
        )
1265 1266

    def forward(self, x, offset, mask=None):
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
        out = deform_conv2d(
            x=x,
            offset=offset,
            weight=self.weight,
            bias=self.bias,
            stride=self._stride,
            padding=self._padding,
            dilation=self._dilation,
            deformable_groups=self._deformable_groups,
            groups=self._groups,
            mask=mask,
        )
1279
        return out
1280 1281


1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
def distribute_fpn_proposals(
    fpn_rois,
    min_level,
    max_level,
    refer_level,
    refer_scale,
    pixel_offset=False,
    rois_num=None,
    name=None,
):
1292
    r"""
1293 1294

    In Feature Pyramid Networks (FPN) models, it is needed to distribute
1295 1296 1297
    all proposals into different FPN level, with respect to scale of the proposals,
    the referring scale and the referring level. Besides, to restore the order of
    proposals, we return an array which indicates the original index of rois
1298
    in current proposals. To compute FPN level for each roi, the formula is given as follows:
1299

1300
    .. math::
1301 1302 1303
        roi\_scale &= \sqrt{BBoxArea(fpn\_roi)} \\
        level &= floor(\log(\frac{roi\_scale}{refer\_scale}) + refer\_level)

1304 1305 1306 1307 1308
    where BBoxArea is a function to compute the area of each roi.

    Args:
        fpn_rois (Tensor): The input fpn_rois. 2-D Tensor with shape [N, 4] and data type can be
            float32 or float64.
1309
        min_level (int): The lowest level of FPN layer where the proposals come
1310 1311 1312 1313 1314
            from.
        max_level (int): The highest level of FPN layer where the proposals
            come from.
        refer_level (int): The referring level of FPN layer with specified scale.
        refer_scale (int): The referring scale of FPN layer with specified level.
1315
        pixel_offset (bool, optional): Whether there is pixel offset. If True, the offset of
1316
            image shape will be 1. 'False' by default.
1317
        rois_num (Tensor, optional): 1-D Tensor contains the number of RoIs in each image.
1318
            The shape is [B] and data type is int32. B is the number of images.
1319
            If rois_num not None, it will return a list of 1-D Tensor. Each element
1320 1321
            is the output RoIs' number of each image on the corresponding level
            and the shape is [B]. None by default.
1322 1323 1324
        name (str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
1325 1326

    Returns:
1327 1328 1329 1330 1331 1332 1333
        - multi_rois (List), The proposals in each FPN level. It is a list of 2-D Tensor with shape [M, 4], where M is
          and data type is same as `fpn_rois` . The length is max_level-min_level+1.
        - restore_ind (Tensor), The index used to restore the order of fpn_rois. It is a 2-D Tensor with shape [N, 1]
          , where N is the number of total rois. The data type is int32.
        - rois_num_per_level (List), A list of 1-D Tensor and each Tensor is
          the RoIs' number in each image on the corresponding level. The shape
          is [B] and data type of int32, where B is the number of images.
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349

    Examples:
        .. code-block:: python

            import paddle

            fpn_rois = paddle.rand((10, 4))
            rois_num = paddle.to_tensor([3, 1, 4, 2], dtype=paddle.int32)

            multi_rois, restore_ind, rois_num_per_level = paddle.vision.ops.distribute_fpn_proposals(
                fpn_rois=fpn_rois,
                min_level=2,
                max_level=5,
                refer_level=4,
                refer_scale=224,
                rois_num=rois_num)
1350

1351 1352 1353
    """
    num_lvl = max_level - min_level + 1

1354
    if in_dygraph_mode():
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
        assert (
            rois_num is not None
        ), "rois_num should not be None in dygraph mode."
        (
            multi_rois,
            rois_num_per_level,
            restore_ind,
        ) = _C_ops.distribute_fpn_proposals(
            fpn_rois,
            rois_num,
            min_level,
            max_level,
            refer_level,
            refer_scale,
            pixel_offset,
        )
1371 1372
        return multi_rois, restore_ind, rois_num_per_level

1373
    if _non_static_mode():
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
        assert (
            rois_num is not None
        ), "rois_num should not be None in dygraph mode."
        attrs = (
            'min_level',
            min_level,
            'max_level',
            max_level,
            'refer_level',
            refer_level,
            'refer_scale',
            refer_scale,
            'pixel_offset',
            pixel_offset,
        )
        (
            multi_rois,
            restore_ind,
            rois_num_per_level,
        ) = _legacy_C_ops.distribute_fpn_proposals(
            fpn_rois, rois_num, num_lvl, num_lvl, *attrs
        )
1396 1397 1398
        return multi_rois, restore_ind, rois_num_per_level

    else:
1399 1400 1401 1402 1403 1404
        check_variable_and_dtype(
            fpn_rois,
            'fpn_rois',
            ['float32', 'float64'],
            'distribute_fpn_proposals',
        )
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
        helper = LayerHelper('distribute_fpn_proposals', **locals())
        dtype = helper.input_dtype('fpn_rois')
        multi_rois = [
            helper.create_variable_for_type_inference(dtype)
            for i in range(num_lvl)
        ]

        restore_ind = helper.create_variable_for_type_inference(dtype='int32')

        inputs = {'FpnRois': fpn_rois}
        outputs = {
            'MultiFpnRois': multi_rois,
            'RestoreIndex': restore_ind,
        }

        if rois_num is not None:
            inputs['RoisNum'] = rois_num
            rois_num_per_level = [
                helper.create_variable_for_type_inference(dtype='int32')
                for i in range(num_lvl)
            ]
            outputs['MultiLevelRoIsNum'] = rois_num_per_level
        else:
            rois_num_per_level = None

1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
        helper.append_op(
            type='distribute_fpn_proposals',
            inputs=inputs,
            outputs=outputs,
            attrs={
                'min_level': min_level,
                'max_level': max_level,
                'refer_level': refer_level,
                'refer_scale': refer_scale,
                'pixel_offset': pixel_offset,
            },
        )
1442 1443 1444
        return multi_rois, restore_ind, rois_num_per_level


1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
def read_file(filename, name=None):
    """
    Reads and outputs the bytes contents of a file as a uint8 Tensor
    with one dimension.

    Args:
        filename (str): Path of the file to be read.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
        A uint8 tensor.

    Examples:
        .. code-block:: python

            import cv2
            import paddle

1465
            fake_img = (paddle.rand((400, 300, 3)).numpy() * 255).astype('uint8')
1466 1467 1468 1469

            cv2.imwrite('fake.jpg', fake_img)

            img_bytes = paddle.vision.ops.read_file('fake.jpg')
1470

1471
            print(img_bytes.shape)
1472
            # [142915]
1473 1474
    """

J
Jiabin Yang 已提交
1475
    if _non_static_mode():
1476
        return _legacy_C_ops.read_file('filename', filename)
1477 1478 1479 1480 1481 1482

    inputs = dict()
    attrs = {'filename': filename}

    helper = LayerHelper("read_file", **locals())
    out = helper.create_variable_for_type_inference('uint8')
1483 1484 1485
    helper.append_op(
        type="read_file", inputs=inputs, attrs=attrs, outputs={"Out": out}
    )
1486 1487 1488 1489 1490 1491

    return out


def decode_jpeg(x, mode='unchanged', name=None):
    """
1492 1493
    Decodes a JPEG image into a 3 dimensional RGB Tensor or 1 dimensional Gray Tensor.
    Optionally converts the image to the desired format.
1494 1495 1496
    The values of the output tensor are uint8 between 0 and 255.

    Args:
1497
        x (Tensor): A one dimensional uint8 tensor containing the raw bytes
1498
            of the JPEG image.
1499
        mode (str, optional): The read mode used for optionally converting the image.
1500 1501 1502 1503 1504 1505 1506 1507 1508
            Default: 'unchanged'.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
    Returns:
        Tensor: A decoded image tensor with shape (imge_channels, image_height, image_width)

    Examples:
        .. code-block:: python
1509 1510

            # required: gpu
1511
            import cv2
1512
            import numpy as np
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
            import paddle

            fake_img = (np.random.random(
                        (400, 300, 3)) * 255).astype('uint8')

            cv2.imwrite('fake.jpg', fake_img)

            img_bytes = paddle.vision.ops.read_file('fake.jpg')
            img = paddle.vision.ops.decode_jpeg(img_bytes)

            print(img.shape)
    """
Y
YuanRisheng 已提交
1525 1526 1527
    if in_dygraph_mode():
        return _C_ops.decode_jpeg(x, mode, _current_expected_place())
    elif _non_static_mode():
1528
        return _legacy_C_ops.decode_jpeg(x, "mode", mode)
1529 1530 1531 1532 1533 1534

    inputs = {'X': x}
    attrs = {"mode": mode}

    helper = LayerHelper("decode_jpeg", **locals())
    out = helper.create_variable_for_type_inference('uint8')
1535 1536 1537
    helper.append_op(
        type="decode_jpeg", inputs=inputs, attrs=attrs, outputs={"Out": out}
    )
1538 1539

    return out
1540 1541 1542 1543 1544


def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
    """
    Position sensitive region of interest pooling (also known as PSROIPooling) is to perform
1545
    position-sensitive average pooling on regions of interest specified by input. It performs
1546 1547 1548 1549 1550 1551 1552
    on inputs of nonuniform sizes to obtain fixed-size feature maps.

    PSROIPooling is proposed by R-FCN. Please refer to https://arxiv.org/abs/1605.06409 for more details.

    Args:
        x (Tensor): Input features with shape (N, C, H, W). The data type can be float32 or float64.
        boxes (Tensor): Box coordinates of ROIs (Regions of Interest) to pool over. It should be
1553
                         a 2-D Tensor with shape (num_rois, 4). Given as [[x1, y1, x2, y2], ...],
1554 1555 1556
                         (x1, y1) is the top left coordinates, and (x2, y2) is the bottom
                         right coordinates.
        boxes_num (Tensor): The number of boxes contained in each picture in the batch.
1557
        output_size (int|Tuple(int, int))  The pooled output size(H, W), data type
1558
                               is int32. If int, H and W are both equal to output_size.
1559
        spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
                               input scale to the scale used when pooling. Default: 1.0
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`

    Returns:
        4-D Tensor. The pooled ROIs with shape (num_rois, output_channels, pooled_h, pooled_w).
        The output_channels equal to C / (pooled_h * pooled_w), where C is the channels of input.

    Examples:
        .. code-block:: python
1571

1572 1573 1574 1575 1576
            import paddle
            x = paddle.uniform([2, 490, 28, 28], dtype='float32')
            boxes = paddle.to_tensor([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], dtype='float32')
            boxes_num = paddle.to_tensor([1, 2], dtype='int32')
            pool_out = paddle.vision.ops.psroi_pool(x, boxes, boxes_num, 7, 1.0)
1577 1578
            print(pool_out.shape)
            # [3, 10, 7, 7]
1579 1580 1581 1582 1583 1584
    """

    check_type(output_size, 'output_size', (int, tuple, list), 'psroi_pool')
    if isinstance(output_size, int):
        output_size = (output_size, output_size)
    pooled_height, pooled_width = output_size
1585
    assert len(x.shape) == 4, "Input features with shape should be (N, C, H, W)"
1586
    output_channels = int(x.shape[1] / (pooled_height * pooled_width))
Z
zyfncg 已提交
1587
    if in_dygraph_mode():
1588 1589 1590 1591 1592 1593 1594 1595 1596
        return _C_ops.psroi_pool(
            x,
            boxes,
            boxes_num,
            pooled_height,
            pooled_width,
            output_channels,
            spatial_scale,
        )
Z
zyfncg 已提交
1597
    if _in_legacy_dygraph():
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
        return _legacy_C_ops.psroi_pool(
            x,
            boxes,
            boxes_num,
            "output_channels",
            output_channels,
            "spatial_scale",
            spatial_scale,
            "pooled_height",
            pooled_height,
            "pooled_width",
            pooled_width,
        )
1611 1612 1613 1614

    helper = LayerHelper('psroi_pool', **locals())
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
    helper.append_op(
        type='psroi_pool',
        inputs={'X': x, 'ROIs': boxes},
        outputs={'Out': out},
        attrs={
            'output_channels': output_channels,
            'spatial_scale': spatial_scale,
            'pooled_height': pooled_height,
            'pooled_width': pooled_width,
        },
    )
1626 1627 1628 1629 1630 1631 1632 1633 1634
    return out


class PSRoIPool(Layer):
    """
    This interface is used to construct a callable object of the ``PSRoIPool`` class. Please
    refer to :ref:`api_paddle_vision_ops_psroi_pool`.

    Args:
1635
        output_size (int|Tuple(int, int))  The pooled output size(H, W), data type
1636
                               is int32. If int, H and W are both equal to output_size.
1637
        spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
                               input scale to the scale used when pooling. Default: 1.0.

    Shape:
        - x: 4-D Tensor with shape (N, C, H, W).
        - boxes: 2-D Tensor with shape (num_rois, 4).
        - boxes_num: 1-D Tensor.
        - output: 4-D tensor with shape (num_rois, output_channels, pooled_h, pooled_w).
              The output_channels equal to C / (pooled_h * pooled_w), where C is the channels of input.

    Returns:
1648
        None.
1649 1650 1651

    Examples:
        .. code-block:: python
1652

1653
            import paddle
1654

1655 1656 1657 1658 1659
            psroi_module = paddle.vision.ops.PSRoIPool(7, 1.0)
            x = paddle.uniform([2, 490, 28, 28], dtype='float32')
            boxes = paddle.to_tensor([[1, 5, 8, 10], [4, 2, 6, 7], [12, 12, 19, 21]], dtype='float32')
            boxes_num = paddle.to_tensor([1, 2], dtype='int32')
            pool_out = psroi_module(x, boxes, boxes_num)
1660
            print(pool_out.shape) # [3, 10, 7, 7]
1661 1662 1663
    """

    def __init__(self, output_size, spatial_scale=1.0):
1664
        super().__init__()
1665 1666 1667 1668
        self.output_size = output_size
        self.spatial_scale = spatial_scale

    def forward(self, x, boxes, boxes_num):
1669 1670 1671
        return psroi_pool(
            x, boxes, boxes_num, self.output_size, self.spatial_scale
        )
W
Wenyu 已提交
1672 1673 1674 1675 1676 1677


def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
    """
    This operator implements the roi_pooling layer.
    Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
1678
    The operator has three steps: 1. Dividing each region proposal into equal-sized sections with output_size(h, w) 2. Finding the largest value in each section 3. Copying these max values to the output buffer
W
Wenyu 已提交
1679 1680 1681
    For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn.

    Args:
1682 1683
        x (Tensor): input feature, 4D-Tensor with the shape of [N,C,H,W],
            where N is the batch size, C is the input channel, H is Height, W is weight.
W
Wenyu 已提交
1684
            The data type is float32 or float64.
1685 1686 1687
        boxes (Tensor): boxes (Regions of Interest) to pool over.
            2D-Tensor with the shape of [num_boxes,4].
            Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates,
W
Wenyu 已提交
1688
            and (x2, y2) is the bottom right coordinates.
1689
        boxes_num (Tensor): the number of RoIs in each image, data type is int32.
W
Wenyu 已提交
1690
        output_size (int or tuple[int, int]): the pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
1691 1692
        spatial_scale (float, optional): multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0.
        name(str, optional): for detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Default: None.
W
Wenyu 已提交
1693 1694

    Returns:
1695
        pool_out (Tensor): the pooled feature, 4D-Tensor with the shape of [num_boxes, C, output_size[0], output_size[1]].
W
Wenyu 已提交
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.ops import roi_pool

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            pool_out = roi_pool(data, boxes, boxes_num=boxes_num, output_size=3)
            assert pool_out.shape == [3, 256, 3, 3], ''
    """

    check_type(output_size, 'output_size', (int, tuple), 'roi_pool')
    if isinstance(output_size, int):
        output_size = (output_size, output_size)

    pooled_height, pooled_width = output_size
Z
zyfncg 已提交
1717
    if in_dygraph_mode():
1718 1719 1720 1721 1722 1723
        assert (
            boxes_num is not None
        ), "boxes_num should not be None in dygraph mode."
        return _C_ops.roi_pool(
            x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale
        )
Z
zyfncg 已提交
1724
    if _in_legacy_dygraph():
1725 1726 1727
        assert (
            boxes_num is not None
        ), "boxes_num should not be None in dygraph mode."
1728
        pool_out, argmaxes = _legacy_C_ops.roi_pool(
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
            x,
            boxes,
            boxes_num,
            "pooled_height",
            pooled_height,
            "pooled_width",
            pooled_width,
            "spatial_scale",
            spatial_scale,
        )
W
Wenyu 已提交
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
        return pool_out

    else:
        check_variable_and_dtype(x, 'x', ['float32'], 'roi_pool')
        check_variable_and_dtype(boxes, 'boxes', ['float32'], 'roi_pool')
        helper = LayerHelper('roi_pool', **locals())
        dtype = helper.input_dtype()
        pool_out = helper.create_variable_for_type_inference(dtype)
        argmaxes = helper.create_variable_for_type_inference(dtype='int32')

        inputs = {
            "X": x,
            "ROIs": boxes,
        }
        if boxes_num is not None:
            inputs['RoisNum'] = boxes_num
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
        helper.append_op(
            type="roi_pool",
            inputs=inputs,
            outputs={"Out": pool_out, "Argmax": argmaxes},
            attrs={
                "pooled_height": pooled_height,
                "pooled_width": pooled_width,
                "spatial_scale": spatial_scale,
            },
        )
W
Wenyu 已提交
1765 1766 1767 1768 1769 1770
        return pool_out


class RoIPool(Layer):
    """
    This interface is used to construct a callable object of the `RoIPool` class. Please
1771
    refer to :ref:`api_paddle_vision_ops_roi_pool`.
W
Wenyu 已提交
1772 1773 1774 1775 1776 1777

    Args:
        output_size (int or tuple[int, int]): the pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
        spatial_scale (float, optional): multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0.

    Returns:
1778
        pool_out (Tensor): the pooled feature, 4D-Tensor with the shape of [num_boxes, C, output_size[0], output_size[1]].
W
Wenyu 已提交
1779 1780 1781 1782 1783 1784

    Examples:
        .. code-block:: python

            import paddle
            from paddle.vision.ops import RoIPool
1785

W
Wenyu 已提交
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            roi_pool = RoIPool(output_size=(4, 3))
            pool_out = roi_pool(data, boxes, boxes_num)
            assert pool_out.shape == [3, 256, 4, 3], ''
    """

    def __init__(self, output_size, spatial_scale=1.0):
1797
        super().__init__()
W
Wenyu 已提交
1798 1799 1800 1801
        self._output_size = output_size
        self._spatial_scale = spatial_scale

    def forward(self, x, boxes, boxes_num):
1802 1803 1804 1805 1806 1807 1808
        return roi_pool(
            x=x,
            boxes=boxes,
            boxes_num=boxes_num,
            output_size=self._output_size,
            spatial_scale=self._spatial_scale,
        )
W
Wenyu 已提交
1809 1810 1811 1812

    def extra_repr(self):
        main_str = 'output_size={_output_size}, spatial_scale={_spatial_scale}'
        return main_str.format(**self.__dict__)
F
Feng Ni 已提交
1813 1814


1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
def roi_align(
    x,
    boxes,
    boxes_num,
    output_size,
    spatial_scale=1.0,
    sampling_ratio=-1,
    aligned=True,
    name=None,
):
F
Feng Ni 已提交
1825
    """
1826
    Implementing the roi_align layer.
F
Feng Ni 已提交
1827 1828 1829 1830 1831 1832 1833 1834 1835
    Region of Interest (RoI) Align operator (also known as RoI Align) is to
    perform bilinear interpolation on inputs of nonuniform sizes to obtain
    fixed-size feature maps (e.g. 7*7), as described in Mask R-CNN.

    Dividing each region proposal into equal-sized sections with the pooled_width
    and pooled_height. Location remains the origin result.

    In each ROI bin, the value of the four regularly sampled locations are
    computed directly through bilinear interpolation. The output is the mean of
1836
    four locations. Thus avoid the misaligned problem.
F
Feng Ni 已提交
1837 1838

    Args:
1839
        x (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],
F
Feng Ni 已提交
1840 1841
            where N is the batch size, C is the input channel, H is Height,
            W is weight. The data type is float32 or float64.
1842
        boxes (Tensor): Boxes (RoIs, Regions of Interest) to pool over. It
F
Feng Ni 已提交
1843 1844 1845 1846 1847 1848 1849
            should be a 2-D Tensor of shape (num_boxes, 4). The data type is
            float32 or float64. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
            the top left coordinates, and (x2, y2) is the bottom right coordinates.
        boxes_num (Tensor): The number of boxes contained in each picture in
            the batch, the data type is int32.
        output_size (int or Tuple[int, int]): The pooled output size(h, w), data
            type is int32. If int, h and w are both equal to output_size.
1850
        spatial_scale (float32, optional): Multiplicative spatial scale factor to translate
F
Feng Ni 已提交
1851
            ROI coords from their input scale to the scale used when pooling.
1852 1853
            Default: 1.0.
        sampling_ratio (int32, optional): number of sampling points in the interpolation
F
Feng Ni 已提交
1854 1855 1856 1857 1858
            grid used to compute the output value of each pooled output bin.
            If > 0, then exactly ``sampling_ratio x sampling_ratio`` sampling
            points per bin are used.
            If <= 0, then an adaptive number of grid points are used (computed
            as ``ceil(roi_width / output_width)``, and likewise for height).
1859 1860
            Default: -1.
        aligned (bool, optional): If False, use the legacy implementation. If True, pixel
F
Feng Ni 已提交
1861 1862
            shift the box coordinates it by -0.5 for a better alignment with the
            two neighboring pixel indices. This version is used in Detectron2.
1863
            Default: True.
F
Feng Ni 已提交
1864 1865
        name(str, optional): For detailed information, please refer to :
            ref:`api_guide_Name`. Usually name is no need to set and None by
1866
            default. Default: None.
F
Feng Ni 已提交
1867 1868

    Returns:
1869
        The output of ROIAlignOp is a 4-D tensor with shape (num_boxes,\
F
Feng Ni 已提交
1870 1871 1872 1873
            channels, pooled_h, pooled_w). The data type is float32 or float64.

    Examples:
        .. code-block:: python
1874

F
Feng Ni 已提交
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
            import paddle
            from paddle.vision.ops import roi_align

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            align_out = roi_align(data, boxes, boxes_num, output_size=3)
            assert align_out.shape == [3, 256, 3, 3]
    """

    check_type(output_size, 'output_size', (int, tuple), 'roi_align')
    if isinstance(output_size, int):
        output_size = (output_size, output_size)

    pooled_height, pooled_width = output_size
1892
    if in_dygraph_mode():
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
        assert (
            boxes_num is not None
        ), "boxes_num should not be None in dygraph mode."
        return _C_ops.roi_align(
            x,
            boxes,
            boxes_num,
            pooled_height,
            pooled_width,
            spatial_scale,
            sampling_ratio,
            aligned,
        )
1906
    if _in_legacy_dygraph():
1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
        assert (
            boxes_num is not None
        ), "boxes_num should not be None in dygraph mode."
        align_out = _legacy_C_ops.roi_align(
            x,
            boxes,
            boxes_num,
            "pooled_height",
            pooled_height,
            "pooled_width",
            pooled_width,
            "spatial_scale",
            spatial_scale,
            "sampling_ratio",
            sampling_ratio,
            "aligned",
            aligned,
        )
F
Feng Ni 已提交
1925 1926 1927 1928
        return align_out

    else:
        check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'roi_align')
1929 1930 1931
        check_variable_and_dtype(
            boxes, 'boxes', ['float32', 'float64'], 'roi_align'
        )
F
Feng Ni 已提交
1932 1933 1934 1935 1936 1937 1938 1939 1940
        helper = LayerHelper('roi_align', **locals())
        dtype = helper.input_dtype()
        align_out = helper.create_variable_for_type_inference(dtype)
        inputs = {
            "X": x,
            "ROIs": boxes,
        }
        if boxes_num is not None:
            inputs['RoisNum'] = boxes_num
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
        helper.append_op(
            type="roi_align",
            inputs=inputs,
            outputs={"Out": align_out},
            attrs={
                "pooled_height": pooled_height,
                "pooled_width": pooled_width,
                "spatial_scale": spatial_scale,
                "sampling_ratio": sampling_ratio,
                "aligned": aligned,
            },
        )
F
Feng Ni 已提交
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
        return align_out


class RoIAlign(Layer):
    """
    This interface is used to construct a callable object of the `RoIAlign` class.
    Please refer to :ref:`api_paddle_vision_ops_roi_align`.

    Args:
        output_size (int or tuple[int, int]): The pooled output size(h, w),
            data type is int32. If int, h and w are both equal to output_size.
        spatial_scale (float32, optional): Multiplicative spatial scale factor
            to translate ROI coords from their input scale to the scale used
1966
            when pooling. Default: 1.0.
F
Feng Ni 已提交
1967 1968

    Returns:
1969
        The output of ROIAlign operator is a 4-D tensor with \
F
Feng Ni 已提交
1970 1971 1972 1973
            shape (num_boxes, channels, pooled_h, pooled_w).

    Examples:
        ..  code-block:: python
1974

F
Feng Ni 已提交
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
            import paddle
            from paddle.vision.ops import RoIAlign

            data = paddle.rand([1, 256, 32, 32])
            boxes = paddle.rand([3, 4])
            boxes[:, 2] += boxes[:, 0] + 3
            boxes[:, 3] += boxes[:, 1] + 4
            boxes_num = paddle.to_tensor([3]).astype('int32')
            roi_align = RoIAlign(output_size=(4, 3))
            align_out = roi_align(data, boxes, boxes_num)
            assert align_out.shape == [3, 256, 4, 3]
    """

    def __init__(self, output_size, spatial_scale=1.0):
1989
        super().__init__()
F
Feng Ni 已提交
1990 1991 1992 1993
        self._output_size = output_size
        self._spatial_scale = spatial_scale

    def forward(self, x, boxes, boxes_num, aligned=True):
1994 1995 1996 1997 1998 1999 2000 2001
        return roi_align(
            x=x,
            boxes=boxes,
            boxes_num=boxes_num,
            output_size=self._output_size,
            spatial_scale=self._spatial_scale,
            aligned=aligned,
        )
N
Nyakku Shigure 已提交
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011


class ConvNormActivation(Sequential):
    """
    Configurable block used for Convolution-Normalzation-Activation blocks.
    This code is based on the torchvision code with modifications.
    You can also see at https://github.com/pytorch/vision/blob/main/torchvision/ops/misc.py#L68
    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the Convolution-Normalzation-Activation block
2012 2013 2014
        kernel_size: (int|list|tuple, optional): Size of the convolving kernel. Default: 3
        stride (int|list|tuple, optional): Stride of the convolution. Default: 1
        padding (int|str|tuple|list, optional): Padding added to all four sides of the input. Default: None,
N
Nyakku Shigure 已提交
2015 2016 2017
            in wich case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., paddle.nn.Layer], optional): Norm layer that will be stacked on top of the convolutiuon layer.
2018
            If ``None`` this layer wont be used. Default: ``paddle.nn.BatchNorm2D``
N
Nyakku Shigure 已提交
2019 2020 2021 2022 2023 2024
        activation_layer (Callable[..., paddle.nn.Layer], optional): Activation function which will be stacked on top of the normalization
            layer (if not ``None``), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``paddle.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
    """

2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size=3,
        stride=1,
        padding=None,
        groups=1,
        norm_layer=BatchNorm2D,
        activation_layer=ReLU,
        dilation=1,
        bias=None,
    ):
N
Nyakku Shigure 已提交
2038 2039 2040 2041 2042
        if padding is None:
            padding = (kernel_size - 1) // 2 * dilation
        if bias is None:
            bias = norm_layer is None
        layers = [
2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
            Conv2D(
                in_channels,
                out_channels,
                kernel_size,
                stride,
                padding,
                dilation=dilation,
                groups=groups,
                bias_attr=bias,
            )
N
Nyakku Shigure 已提交
2053 2054 2055 2056 2057 2058
        ]
        if norm_layer is not None:
            layers.append(norm_layer(out_channels))
        if activation_layer is not None:
            layers.append(activation_layer())
        super().__init__(*layers)
2059 2060


2061 2062 2063 2064 2065 2066 2067 2068
def nms(
    boxes,
    iou_threshold=0.3,
    scores=None,
    category_idxs=None,
    categories=None,
    top_k=None,
):
2069 2070
    r"""
    This operator implements non-maximum suppression. Non-maximum suppression (NMS)
2071 2072 2073
    is used to select one bounding box out of many overlapping bounding boxes in object detection.
    Boxes with IoU > iou_threshold will be considered as overlapping boxes,
    just one with highest score can be kept. Here IoU is Intersection Over Union,
2074 2075 2076 2077 2078 2079 2080
    which can be computed by:

    ..  math::

        IoU = \frac{intersection\_area(box1, box2)}{union\_area(box1, box2)}

    If scores are provided, input boxes will be sorted by their scores firstly.
R
RichardWooSJTU 已提交
2081

2082
    If category_idxs and categories are provided, NMS will be performed with a batched style,
2083 2084
    which means NMS will be applied to each category respectively and results of each category
    will be concated and sorted by scores.
2085

2086 2087 2088
    If K is provided, only the first k elements will be returned. Otherwise, all box indices sorted by scores will be returned.

    Args:
2089 2090 2091 2092
        boxes(Tensor): The input boxes data to be computed, it's a 2D-Tensor with
            the shape of [num_boxes, 4]. The data type is float32 or float64.
            Given as [[x1, y1, x2, y2], …],  (x1, y1) is the top left coordinates,
            and (x2, y2) is the bottom right coordinates.
2093
            Their relation should be ``0 <= x1 < x2 && 0 <= y1 < y2``.
R
RichardWooSJTU 已提交
2094
        iou_threshold(float32, optional): IoU threshold for determine overlapping boxes. Default value: 0.3.
2095
        scores(Tensor, optional): Scores corresponding to boxes, it's a 1D-Tensor with
R
RichardWooSJTU 已提交
2096
            shape of [num_boxes]. The data type is float32 or float64. Default: None.
2097
        category_idxs(Tensor, optional): Category indices corresponding to boxes.
R
RichardWooSJTU 已提交
2098 2099
            it's a 1D-Tensor with shape of [num_boxes]. The data type is int64. Default: None.
        categories(List, optional): A list of unique id of all categories. The data type is int64. Default: None.
2100
        top_k(int64, optional): The top K boxes who has higher score and kept by NMS preds to
R
RichardWooSJTU 已提交
2101
            consider. top_k should be smaller equal than num_boxes. Default: None.
2102 2103 2104 2105 2106 2107

    Returns:
        Tensor: 1D-Tensor with the shape of [num_boxes]. Indices of boxes kept by NMS.

    Examples:
        .. code-block:: python
2108

2109 2110
            import paddle

2111
            boxes = paddle.rand([4, 4]).astype('float32')
2112 2113
            boxes[:, 2] = boxes[:, 0] + boxes[:, 2]
            boxes[:, 3] = boxes[:, 1] + boxes[:, 3]
2114 2115 2116 2117 2118 2119
            print(boxes)
            # Tensor(shape=[4, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
            #        [[0.64811575, 0.89756244, 0.86473107, 1.48552322],
            #         [0.48085716, 0.84799081, 0.54517937, 0.86396021],
            #         [0.62646860, 0.72901905, 1.17392159, 1.69691563],
            #         [0.89729202, 0.46281594, 1.88733089, 0.98588502]])
2120

2121 2122 2123 2124
            out = paddle.vision.ops.nms(boxes, 0.1)
            print(out)
            # Tensor(shape=[3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
            #        [0, 1, 3])
2125

2126
            scores = paddle.to_tensor([0.6, 0.7, 0.4, 0.233])
2127 2128

            categories = [0, 1, 2, 3]
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
            category_idxs = paddle.to_tensor([2, 0, 0, 3], dtype="int64")

            out = paddle.vision.ops.nms(boxes,
                                        0.1,
                                        paddle.to_tensor(scores),
                                        paddle.to_tensor(category_idxs),
                                        categories,
                                        4)
            print(out)
            # Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
            #        [1, 0, 2, 3])
2140 2141 2142
    """

    def _nms(boxes, iou_threshold):
2143
        if in_dygraph_mode():
2144
            return _C_ops.nms(boxes, iou_threshold)
2145

2146
        if _non_static_mode():
2147
            return _legacy_C_ops.nms(boxes, 'iou_threshold', iou_threshold)
2148 2149 2150

        helper = LayerHelper('nms', **locals())
        out = helper.create_variable_for_type_inference('int64')
2151 2152 2153 2154 2155 2156
        helper.append_op(
            type='nms',
            inputs={'Boxes': boxes},
            outputs={'KeepBoxesIdxs': out},
            attrs={'iou_threshold': iou_threshold},
        )
2157 2158 2159 2160 2161 2162
        return out

    if scores is None:
        return _nms(boxes, iou_threshold)

    import paddle
2163

2164 2165
    if category_idxs is None:
        sorted_global_indices = paddle.argsort(scores, descending=True)
2166 2167 2168
        sorted_keep_boxes_indices = _nms(
            boxes[sorted_global_indices], iou_threshold
        )
2169
        return sorted_global_indices[sorted_keep_boxes_indices]
2170 2171

    if top_k is not None:
2172 2173 2174 2175 2176 2177
        assert (
            top_k <= scores.shape[0]
        ), "top_k should be smaller equal than the number of boxes"
    assert (
        categories is not None
    ), "if category_idxs is given, categories which is a list of unique id of all categories is necessary"
2178 2179 2180 2181 2182 2183

    mask = paddle.zeros_like(scores, dtype=paddle.int32)

    for category_id in categories:
        cur_category_boxes_idxs = paddle.where(category_idxs == category_id)[0]
        shape = cur_category_boxes_idxs.shape[0]
2184 2185 2186
        cur_category_boxes_idxs = paddle.reshape(
            cur_category_boxes_idxs, [shape]
        )
2187 2188 2189 2190 2191 2192 2193
        if shape == 0:
            continue
        elif shape == 1:
            mask[cur_category_boxes_idxs] = 1
            continue
        cur_category_boxes = boxes[cur_category_boxes_idxs]
        cur_category_scores = scores[cur_category_boxes_idxs]
2194 2195 2196
        cur_category_sorted_indices = paddle.argsort(
            cur_category_scores, descending=True
        )
2197
        cur_category_sorted_boxes = cur_category_boxes[
2198 2199
            cur_category_sorted_indices
        ]
2200

2201 2202 2203
        cur_category_keep_boxes_sub_idxs = cur_category_sorted_indices[
            _nms(cur_category_sorted_boxes, iou_threshold)
        ]
2204 2205 2206

        updates = paddle.ones_like(
            cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs],
2207 2208
            dtype=paddle.int32,
        )
2209 2210 2211 2212
        mask = paddle.scatter(
            mask,
            cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs],
            updates,
2213 2214
            overwrite=True,
        )
2215 2216 2217
    keep_boxes_idxs = paddle.where(mask)[0]
    shape = keep_boxes_idxs.shape[0]
    keep_boxes_idxs = paddle.reshape(keep_boxes_idxs, [shape])
2218 2219 2220
    sorted_sub_indices = paddle.argsort(
        scores[keep_boxes_idxs], descending=True
    )
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230

    if top_k is None:
        return keep_boxes_idxs[sorted_sub_indices]

    if _non_static_mode():
        top_k = shape if shape < top_k else top_k
        _, topk_sub_indices = paddle.topk(scores[keep_boxes_idxs], top_k)
        return keep_boxes_idxs[topk_sub_indices]

    return keep_boxes_idxs[sorted_sub_indices][:top_k]
2231 2232


2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
def generate_proposals(
    scores,
    bbox_deltas,
    img_size,
    anchors,
    variances,
    pre_nms_top_n=6000,
    post_nms_top_n=1000,
    nms_thresh=0.5,
    min_size=0.1,
    eta=1.0,
    pixel_offset=False,
    return_rois_num=False,
    name=None,
):
2248 2249
    """
    This operation proposes RoIs according to each box with their
2250 2251
    probability to be a foreground object. And
    the proposals of RPN output are  calculated by anchors, bbox_deltas and scores. Final proposals
2252 2253 2254 2255 2256 2257
    could be used to train detection net.

    For generating proposals, this operation performs following steps:

    1. Transpose and resize scores and bbox_deltas in size of
       (H * W * A, 1) and (H * W * A, 4)
2258
    2. Calculate box locations as proposals candidates.
2259
    3. Clip boxes to image
2260
    4. Remove predicted boxes with small area.
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
    5. Apply non-maximum suppression (NMS) to get final proposals as output.

    Args:
        scores (Tensor): A 4-D Tensor with shape [N, A, H, W] represents
            the probability for each box to be an object.
            N is batch size, A is number of anchors, H and W are height and
            width of the feature map. The data type must be float32.
        bbox_deltas (Tensor): A 4-D Tensor with shape [N, 4*A, H, W]
            represents the difference between predicted box location and
            anchor location. The data type must be float32.
        img_size (Tensor): A 2-D Tensor with shape [N, 2] represents origin
            image shape information for N batch, including height and width of the input sizes.
            The data type can be float32 or float64.
        anchors (Tensor):   A 4-D Tensor represents the anchors with a layout
            of [H, W, A, 4]. H and W are height and width of the feature map,
            num_anchors is the box count of each position. Each anchor is
            in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
        variances (Tensor): A 4-D Tensor. The expanded variances of anchors with a layout of
            [H, W, num_priors, 4]. Each variance is in
            (xcenter, ycenter, w, h) format. The data type must be float32.
        pre_nms_top_n (float, optional): Number of total bboxes to be kept per
            image before NMS. `6000` by default.
        post_nms_top_n (float, optional): Number of total bboxes to be kept per
            image after NMS. `1000` by default.
        nms_thresh (float, optional): Threshold in NMS. The data type must be float32. `0.5` by default.
        min_size (float, optional): Remove predicted boxes with either height or
            width less than this value. `0.1` by default.
        eta(float, optional): Apply in adaptive NMS, only works if adaptive `threshold > 0.5`,
            `adaptive_threshold = adaptive_threshold * eta` in each iteration. 1.0 by default.
        pixel_offset (bool, optional): Whether there is pixel offset. If True, the offset of `img_size` will be 1. 'False' by default.
        return_rois_num (bool, optional): Whether to return `rpn_rois_num` . When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
            num of each image in one batch. 'False' by default.
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

    Returns:
        - rpn_rois (Tensor): The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - rpn_roi_probs (Tensor): The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
        - rpn_rois_num (Tensor): Rois's num of each image in one batch. 1-D Tensor with shape ``[B,]`` while ``B`` is the batch size. And its sum equals to RoIs number ``N`` .

    Examples:
        .. code-block:: python

            import paddle

            scores = paddle.rand((2,4,5,5), dtype=paddle.float32)
            bbox_deltas = paddle.rand((2, 16, 5, 5), dtype=paddle.float32)
            img_size = paddle.to_tensor([[224.0, 224.0], [224.0, 224.0]])
            anchors = paddle.rand((2,5,4,4), dtype=paddle.float32)
            variances = paddle.rand((2,5,10,4), dtype=paddle.float32)
            rois, roi_probs, roi_nums = paddle.vision.ops.generate_proposals(scores, bbox_deltas,
                         img_size, anchors, variances, return_rois_num=True)
            print(rois, roi_probs, roi_nums)
    """

Z
zhiboniu 已提交
2317
    if in_dygraph_mode():
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
        assert (
            return_rois_num
        ), "return_rois_num should be True in dygraph mode."
        attrs = (
            pre_nms_top_n,
            post_nms_top_n,
            nms_thresh,
            min_size,
            eta,
            pixel_offset,
        )
2329
        rpn_rois, rpn_roi_probs, rpn_rois_num = _C_ops.generate_proposals(
2330 2331
            scores, bbox_deltas, img_size, anchors, variances, *attrs
        )
Z
zhiboniu 已提交
2332 2333 2334

        return rpn_rois, rpn_roi_probs, rpn_rois_num
    elif _non_static_mode():
2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358
        assert (
            return_rois_num
        ), "return_rois_num should be True in dygraph mode."
        attrs = (
            'pre_nms_topN',
            pre_nms_top_n,
            'post_nms_topN',
            post_nms_top_n,
            'nms_thresh',
            nms_thresh,
            'min_size',
            min_size,
            'eta',
            eta,
            'pixel_offset',
            pixel_offset,
        )
        (
            rpn_rois,
            rpn_roi_probs,
            rpn_rois_num,
        ) = _legacy_C_ops.generate_proposals_v2(
            scores, bbox_deltas, img_size, anchors, variances, *attrs
        )
2359 2360 2361 2362 2363

        return rpn_rois, rpn_roi_probs, rpn_rois_num

    helper = LayerHelper('generate_proposals_v2', **locals())

2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378
    check_variable_and_dtype(
        scores, 'scores', ['float32'], 'generate_proposals_v2'
    )
    check_variable_and_dtype(
        bbox_deltas, 'bbox_deltas', ['float32'], 'generate_proposals_v2'
    )
    check_variable_and_dtype(
        img_size, 'img_size', ['float32', 'float64'], 'generate_proposals_v2'
    )
    check_variable_and_dtype(
        anchors, 'anchors', ['float32'], 'generate_proposals_v2'
    )
    check_variable_and_dtype(
        variances, 'variances', ['float32'], 'generate_proposals_v2'
    )
2379 2380

    rpn_rois = helper.create_variable_for_type_inference(
2381 2382
        dtype=bbox_deltas.dtype
    )
2383
    rpn_roi_probs = helper.create_variable_for_type_inference(
2384 2385
        dtype=scores.dtype
    )
2386 2387 2388 2389 2390 2391 2392 2393 2394
    outputs = {
        'RpnRois': rpn_rois,
        'RpnRoiProbs': rpn_roi_probs,
    }
    if return_rois_num:
        rpn_rois_num = helper.create_variable_for_type_inference(dtype='int32')
        rpn_rois_num.stop_gradient = True
        outputs['RpnRoisNum'] = rpn_rois_num

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
    helper.append_op(
        type="generate_proposals_v2",
        inputs={
            'Scores': scores,
            'BboxDeltas': bbox_deltas,
            'ImShape': img_size,
            'Anchors': anchors,
            'Variances': variances,
        },
        attrs={
            'pre_nms_topN': pre_nms_top_n,
            'post_nms_topN': post_nms_top_n,
            'nms_thresh': nms_thresh,
            'min_size': min_size,
            'eta': eta,
            'pixel_offset': pixel_offset,
        },
        outputs=outputs,
    )
2414 2415 2416 2417 2418 2419
    rpn_rois.stop_gradient = True
    rpn_roi_probs.stop_gradient = True
    if not return_rois_num:
        rpn_rois_num = None

    return rpn_rois, rpn_roi_probs, rpn_rois_num
2420 2421


2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
def matrix_nms(
    bboxes,
    scores,
    score_threshold,
    post_threshold,
    nms_top_k,
    keep_top_k,
    use_gaussian=False,
    gaussian_sigma=2.0,
    background_label=0,
    normalized=True,
    return_index=False,
    return_rois_num=True,
    name=None,
):
2437
    """
2438

2439 2440 2441 2442 2443 2444 2445
    This operator does matrix non maximum suppression (NMS).
    First selects a subset of candidate bounding boxes that have higher scores
    than score_threshold (if provided), then the top k candidate is selected if
    nms_top_k is larger than -1. Score of the remaining candidate are then
    decayed according to the Matrix NMS scheme.
    Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
    per image if keep_top_k is larger than -1.
2446

2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469
    Args:
        bboxes (Tensor): A 3-D Tensor with shape [N, M, 4] represents the
                           predicted locations of M bounding bboxes,
                           N is the batch size. Each bounding box has four
                           coordinate values and the layout is
                           [xmin, ymin, xmax, ymax], when box size equals to 4.
                           The data type is float32 or float64.
        scores (Tensor): A 3-D Tensor with shape [N, C, M]
                           represents the predicted confidence predictions.
                           N is the batch size, C is the class number, M is
                           number of bounding boxes. For each category there
                           are total M scores which corresponding M bounding
                           boxes. Please note, M is equal to the 2nd dimension
                           of BBoxes. The data type is float32 or float64.
        score_threshold (float): Threshold to filter out bounding boxes with
                                 low confidence score.
        post_threshold (float): Threshold to filter out bounding boxes with
                                low confidence score AFTER decaying.
        nms_top_k (int): Maximum number of detections to be kept according to
                         the confidences after the filtering detections based
                         on score_threshold.
        keep_top_k (int): Number of total bboxes to be kept per image after NMS
                          step. -1 means keeping all bboxes after NMS step.
2470 2471 2472
        use_gaussian (bool, optional): Use Gaussian as the decay function. Default: False
        gaussian_sigma (float, optional): Sigma for Gaussian decay function. Default: 2.0
        background_label (int, optional): The index of background label, the background
2473 2474
                                label will be ignored. If set to -1, then all
                                categories will be considered. Default: 0
2475 2476 2477 2478
        normalized (bool, optional): Whether detections are normalized. Default: True
        return_index(bool, optional): Whether return selected index. Default: False
        return_rois_num(bool, optional): whether return rois_num. Default: True
        name(str, optional): Name of the matrix nms op. Default: None.
2479
    Returns:
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
        - A tuple with three Tensor, (Out, Index, RoisNum) if return_index is True,
          otherwise, a tuple with two Tensor (Out, RoisNum) is returned.
        - Out (Tensor), A 2-D Tensor with shape [No, 6] containing the
          detection results.
          Each row has 6 values, [label, confidence, xmin, ymin, xmax, ymax]
        - Index (Tensor), A 2-D Tensor with shape [No, 1] containing the
          selected indices, which are absolute values cross batches.
        - rois_num (Tensor), A 1-D Tensor with shape [N] containing
          the number of detected boxes in each image.

2490 2491
    Examples:
        .. code-block:: python
2492

2493 2494
            import paddle
            from paddle.vision.ops import matrix_nms
2495

2496 2497 2498 2499 2500 2501 2502
            boxes = paddle.rand([4, 1, 4])
            boxes[..., 2] = boxes[..., 0] + boxes[..., 2]
            boxes[..., 3] = boxes[..., 1] + boxes[..., 3]
            scores = paddle.rand([4, 80, 1])
            out = matrix_nms(bboxes=boxes, scores=scores, background_label=0,
                                 score_threshold=0.5, post_threshold=0.1,
                                 nms_top_k=400, keep_top_k=200, normalized=False)
2503

2504
    """
2505 2506 2507 2508 2509 2510
    check_variable_and_dtype(
        bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms'
    )
    check_variable_and_dtype(
        scores, 'Scores', ['float32', 'float64'], 'matrix_nms'
    )
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
    check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
    check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
    check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
    check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
    check_type(normalized, 'normalized', bool, 'matrix_nms')
    check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
    check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
    check_type(background_label, 'background_label', int, 'matrix_nms')

    if in_dygraph_mode():
2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
        out, index, rois_num = _C_ops.matrix_nms(
            bboxes,
            scores,
            score_threshold,
            nms_top_k,
            keep_top_k,
            post_threshold,
            use_gaussian,
            gaussian_sigma,
            background_label,
            normalized,
        )
Z
zhiboniu 已提交
2533 2534 2535 2536 2537 2538
        if not return_index:
            index = None
        if not return_rois_num:
            rois_num = None
        return out, rois_num, index
    elif _in_legacy_dygraph():
2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
        attrs = (
            'background_label',
            background_label,
            'score_threshold',
            score_threshold,
            'post_threshold',
            post_threshold,
            'nms_top_k',
            nms_top_k,
            'gaussian_sigma',
            gaussian_sigma,
            'use_gaussian',
            use_gaussian,
            'keep_top_k',
            keep_top_k,
            'normalized',
            normalized,
        )
2557
        out, index, rois_num = _legacy_C_ops.matrix_nms(bboxes, scores, *attrs)
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
        if not return_index:
            index = None
        if not return_rois_num:
            rois_num = None
        return out, rois_num, index
    else:
        helper = LayerHelper('matrix_nms', **locals())
        output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
        index = helper.create_variable_for_type_inference(dtype='int32')
        outputs = {'Out': output, 'Index': index}
        if return_rois_num:
            rois_num = helper.create_variable_for_type_inference(dtype='int32')
            outputs['RoisNum'] = rois_num

2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
        helper.append_op(
            type="matrix_nms",
            inputs={'BBoxes': bboxes, 'Scores': scores},
            attrs={
                'background_label': background_label,
                'score_threshold': score_threshold,
                'post_threshold': post_threshold,
                'nms_top_k': nms_top_k,
                'gaussian_sigma': gaussian_sigma,
                'use_gaussian': use_gaussian,
                'keep_top_k': keep_top_k,
                'normalized': normalized,
            },
            outputs=outputs,
        )
2587 2588 2589 2590 2591 2592 2593
        output.stop_gradient = True

        if not return_index:
            index = None
        if not return_rois_num:
            rois_num = None
        return output, rois_num, index