未验证 提交 e577040e 编写于 作者: H HydrogenSulfate 提交者: GitHub

Remove/move 16 fluid APIs (#48377)

* remove density_prior_box

* remove anchor_generator

* remove roi_perspective_transform

* remove generate_proposal_labels

* remove generate_mask_labels

* remove generate_proposals

* remove box_clip

* remove retinanet_detection_output

* remove multiclass_nms

* remove locality_aware_nms

* remove matrix_nms

* remove distribute_fpn_proposals

* remove box_decoder_and_assign

* remove collect_fpn_proposals

* remove 2 trt files

* move prior_box to static/nn/common.py

* move multi_box_head to static/nn/common.py

* fix for CI/CE

* remove retinanet_detection_output

* restore compile_vs_runtime_white_list.py

* restore test_retinanet_detection_output to white list

* replace nn.flatten by paddle.flatten, and fix doc for retinanet_target_assign

* add enable_static in demo and fix bug

* remove roi_perspective_transform in test_layers

* remove multi_box_head

* change self.multiclass_nms to _legacy_C_ops.multiclass_nms

* empty commit

* empty commit

* check code style

* fix prior_box

* fix CI

* remove redundant prior_box in detection.py

* fix docs

* remove detection

* fix prior_box en doc

* delete prior_box in common

* remote proir_box from __init__.py
上级 04dd2861
......@@ -21,7 +21,6 @@ from .framework import Program, Variable, program_guard
from . import unique_name
from .layer_helper import LayerHelper
from .initializer import Constant
from .layers import detection
def _clone_var_(block, var):
......
......@@ -24,8 +24,6 @@ from . import math_op_patch
from .math_op_patch import *
from . import loss
from .loss import *
from . import detection
from .detection import *
from .learning_rate_scheduler import *
from .collective import *
from .sequence_lod import *
......@@ -36,7 +34,6 @@ __all__ += nn.__all__
__all__ += io.__all__
__all__ += tensor.__all__
__all__ += control_flow.__all__
__all__ += detection.__all__
__all__ += learning_rate_scheduler.__all__
__all__ += sequence_lod.__all__
__all__ += loss.__all__
......
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
import paddle
from .layer_function_generator import templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable, _non_static_mode, static_only, in_dygraph_mode
from .. import core
from paddle.fluid.layers import softmax_with_cross_entropy
from . import tensor
from . import nn
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import numpy as np
from functools import reduce
from ..data_feeder import (
convert_dtype,
check_variable_and_dtype,
check_type,
check_dtype,
)
from paddle.utils import deprecated
from paddle import _C_ops, _legacy_C_ops
from ..framework import in_dygraph_mode
__all__ = [
'density_prior_box',
'multi_box_head',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def density_prior_box(
input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None,
):
r"""
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
import paddle
paddle.enable_static()
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'density_prior_box'
)
def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple)
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError(
'steps should be a list or tuple ',
'with length 2, (step_width, step_height).',
)
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input, "Image": image},
outputs={"Boxes": box, "Variances": var},
attrs=attrs,
)
box.stop_gradient = True
var.stop_gradient = True
return box, var
@static_only
def multi_box_head(
inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False,
):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle
paddle.enable_static()
images = paddle.static.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = paddle.static.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = paddle.static.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = paddle.static.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = paddle.static.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = paddle.static.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = paddle.static.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = paddle.static.nn.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle
paddle.enable_static()
images = paddle.static.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = paddle.static.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = paddle.static.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = paddle.static.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = paddle.static.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = paddle.static.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = paddle.static.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = paddle.static.nn.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
# Note : axis!=0 in current references to this func
# if axis == 0:
# x = paddle.flatten(input, 0, -1)
# x = paddle.unsqueeze(x, 0)
# return x
# else:
x = paddle.flatten(input, axis, -1)
x = paddle.flatten(x, 0, axis - 1)
return x
def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple)
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.0)
max_sizes.append(base_size * (ratio + step) / 100.0)
min_sizes = [base_size * 0.10] + min_sizes
max_sizes = [base_size * 0.20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios,
num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.',
)
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h,
num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.',
)
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w,
num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.',
)
if steps is not None:
_is_list_or_tuple_and_equal(
steps,
num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.',
)
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = paddle.vision.ops.prior_box(
input,
image,
min_size,
max_size,
aspect_ratio,
variance,
flip,
clip,
step,
offset,
min_max_aspect_ratios_order,
None,
)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride,
)
mbox_loc = paddle.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = paddle.flatten(mbox_loc, 1, -1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride,
)
conf_loc = paddle.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = paddle.flatten(conf_loc, 1, -1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = paddle.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = paddle.reshape(
mbox_confs_concat, shape=[0, -1, num_classes]
)
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(
input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None,
):
"""
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple)
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError(
'stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).',
)
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset,
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor, "Variances": var},
attrs=attrs,
)
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(
input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None,
):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(
input, 'input', ['float32'], 'roi_perspective_transform'
)
check_variable_and_dtype(
rois, 'rois', ['float32'], 'roi_perspective_transform'
)
check_type(
transformed_height,
'transformed_height',
int,
'roi_perspective_transform',
)
check_type(
transformed_width, 'transformed_width', int, 'roi_perspective_transform'
)
check_type(
spatial_scale, 'spatial_scale', float, 'roi_perspective_transform'
)
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input, "ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix,
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale,
},
)
return out, mask, transform_matrix
def generate_proposal_labels(
rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False,
max_overlap=None,
return_max_overlap=False,
):
"""
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
max_overlap(Variable): Maximum overlap between each proposal box and ground-truth.
return_max_overlap(bool): Whether return the maximum overlap between each sampled RoI and ground-truth.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights, max_overlap)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
- **max_overlap**: 1-D LoDTensor with shape ``[P]``. P is the number of output ``rois``. The maximum overlap between each sampled RoI and ground-truth.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='int32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(
rpn_rois, 'rpn_rois', ['float32', 'float64'], 'generate_proposal_labels'
)
check_variable_and_dtype(
gt_classes, 'gt_classes', ['int32'], 'generate_proposal_labels'
)
check_variable_and_dtype(
is_crowd, 'is_crowd', ['int32'], 'generate_proposal_labels'
)
if is_cascade_rcnn:
assert (
max_overlap is not None
), "Input max_overlap of generate_proposal_labels should not be None if is_cascade_rcnn is True"
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype
)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype
)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype
)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype
)
max_overlap_with_gt = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype
)
inputs = {
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info,
}
if max_overlap is not None:
inputs['MaxOverlap'] = max_overlap
helper.append_op(
type="generate_proposal_labels",
inputs=inputs,
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights,
'MaxOverlapWithGT': max_overlap_with_gt,
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn,
},
)
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
max_overlap_with_gt.stop_gradient = True
if return_max_overlap:
return (
rois,
labels_int32,
bbox_targets,
bbox_inside_weights,
bbox_outside_weights,
max_overlap_with_gt,
)
return (
rois,
labels_int32,
bbox_targets,
bbox_inside_weights,
bbox_outside_weights,
)
def generate_mask_labels(
im_info,
gt_classes,
is_crowd,
gt_segms,
rois,
labels_int32,
num_classes,
resolution,
):
r"""
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype
)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype
)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32,
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32,
},
attrs={'num_classes': num_classes, 'resolution': resolution},
)
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(
scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
return_rois_num=False,
name=None,
):
"""
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
return paddle.vision.ops.generate_proposals(
scores=scores,
bbox_deltas=bbox_deltas,
img_size=im_info[:2],
anchors=anchors,
variances=variances,
pre_nms_top_n=pre_nms_top_n,
post_nms_top_n=post_nms_top_n,
nms_thresh=nms_thresh,
min_size=min_size,
eta=eta,
return_rois_num=return_rois_num,
name=name,
)
def box_clip(input, im_info, name=None):
"""
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(
im_info, 'im_info', ['float32', 'float64'], 'box_clip'
)
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(
bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0,
):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(
bbox,
'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output',
)
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(
score,
'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output',
)
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(
anchor,
'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output',
)
check_variable_and_dtype(
im_info, 'im_info', ['float32', 'float64'], 'retinanet_detection_output'
)
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores')
)
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info,
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.0,
},
outputs={'Out': output},
)
output.stop_gradient = True
return output
def multiclass_nms(
bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.0,
background_label=0,
name=None,
):
"""
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(
bboxes, 'BBoxes', ['float32', 'float64'], 'multiclass_nms'
)
check_variable_and_dtype(
scores, 'Scores', ['float32', 'float64'], 'multiclass_nms'
)
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes, 'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized,
},
outputs={'Out': output},
)
output.stop_gradient = True
return output
def locality_aware_nms(
bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.0,
background_label=-1,
name=None,
):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(
bboxes, 'bboxes', ['float32', 'float64'], 'locality_aware_nms'
)
check_variable_and_dtype(
scores, 'scores', ['float32', 'float64'], 'locality_aware_nms'
)
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert (
shape[1] == 1
), "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes, 'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized,
},
outputs={'Out': output},
)
output.stop_gradient = True
return output
def matrix_nms(
bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.0,
background_label=0,
normalized=True,
return_index=False,
name=None,
):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
if in_dygraph_mode():
attrs = (
score_threshold,
nms_top_k,
keep_top_k,
post_threshold,
use_gaussian,
gaussian_sigma,
background_label,
normalized,
)
out, index = _C_ops.matrix_nms(bboxes, scores, *attrs)
if return_index:
return out, index
else:
return out
check_variable_and_dtype(
bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms'
)
check_variable_and_dtype(
scores, 'Scores', ['float32', 'float64'], 'matrix_nms'
)
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes, 'Scores': scores},
attrs={
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'use_gaussian': use_gaussian,
'gaussian_sigma': gaussian_sigma,
'background_label': background_label,
'normalized': normalized,
},
outputs={'Out': output, 'Index': index},
)
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(
fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
rois_num=None,
name=None,
):
r"""
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
The shape is [B] and data type is int32. B is the number of images.
If it is not None then return a list of 1-D Tensor. Each element
is the output RoIs' number of each image on the corresponding level
and the shape is [B]. None by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
rois_num_per_level(List): A list of 1-D Tensor and each Tensor is
the RoIs' number in each image on the corresponding level. The shape
is [B] and data type of int32. B is the number of images
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
return paddle.vision.ops.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=min_level,
max_level=max_level,
refer_level=refer_level,
refer_scale=refer_scale,
rois_num=rois_num,
name=name,
)
@templatedoc()
def box_decoder_and_assign(
prior_box, prior_box_var, target_box, box_score, box_clip, name=None
):
"""
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(
prior_box, 'prior_box', ['float32', 'float64'], 'box_decoder_and_assign'
)
check_variable_and_dtype(
target_box,
'target_box',
['float32', 'float64'],
'box_decoder_and_assign',
)
check_variable_and_dtype(
box_score, 'box_score', ['float32', 'float64'], 'box_decoder_and_assign'
)
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype
)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype
)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score,
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box,
},
)
return decoded_box, output_assign_box
def collect_fpn_proposals(
multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
rois_num_per_level=None,
name=None,
):
"""
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
rois_num_per_level(list, optional): The List of RoIs' numbers.
Each element is 1-D Tensor which contains the RoIs' number of each
image on each level and the shape is [B] and data type is
int32, B is the number of images. If it is not None then return
a 1-D Tensor contains the output RoIs' number of each image and
the shape is [B]. Default: None
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
rois_num(Tensor): 1-D Tensor contains the RoIs's number of each
image. The shape is [B] and data type is int32. B is the number of
images.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
if _non_static_mode():
assert (
rois_num_per_level is not None
), "rois_num_per_level should not be None in dygraph mode."
attrs = ('post_nms_topN', post_nms_top_n)
output_rois, rois_num = _legacy_C_ops.collect_fpn_proposals(
input_rois, input_scores, rois_num_per_level, *attrs
)
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(
dtype, 'multi_rois', ['float32', 'float64'], 'collect_fpn_proposals'
)
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
inputs = {
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores,
}
outputs = {'FpnRois': output_rois}
if rois_num_per_level is not None:
inputs['MultiLevelRoIsNum'] = rois_num_per_level
rois_num = helper.create_variable_for_type_inference(dtype='int32')
rois_num.stop_gradient = True
outputs['RoisNum'] = rois_num
helper.append_op(
type='collect_fpn_proposals',
inputs=inputs,
outputs=outputs,
attrs={'post_nms_topN': post_nms_top_n},
)
if rois_num_per_level is not None:
return output_rois, rois_num
return output_rois
......@@ -23,7 +23,6 @@ from .initializer import Constant
from . import unique_name
from .framework import Program, Variable, program_guard
from . import layers
from .layers import detection
__all__ = [
'MetricBase',
......
......@@ -75,230 +75,6 @@ class LayerTest(unittest.TestCase):
yield
class TestDensityPriorBox(unittest.TestCase):
def test_density_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32'
)
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.density_prior_box(
input=conv1,
image=images,
densities=[3, 4],
fixed_sizes=[50.0, 60.0],
fixed_ratios=[1.0],
clip=True,
)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[-1] == 4
class TestAnchorGenerator(unittest.TestCase):
def test_anchor_generator(self):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32'
)
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5,
)
assert len(anchor.shape) == 4
assert anchor.shape == var.shape
assert anchor.shape[3] == 4
class TestGenerateProposalLabels(unittest.TestCase):
def check_out(self, outs):
rois = outs[0]
labels_int32 = outs[1]
bbox_targets = outs[2]
bbox_inside_weights = outs[3]
bbox_outside_weights = outs[4]
assert rois.shape[1] == 4
assert rois.shape[0] == labels_int32.shape[0]
assert rois.shape[0] == bbox_targets.shape[0]
assert rois.shape[0] == bbox_inside_weights.shape[0]
assert rois.shape[0] == bbox_outside_weights.shape[0]
assert bbox_targets.shape[1] == 4 * self.class_nums
assert bbox_inside_weights.shape[1] == 4 * self.class_nums
assert bbox_outside_weights.shape[1] == 4 * self.class_nums
if len(outs) == 6:
max_overlap_with_gt = outs[5]
assert max_overlap_with_gt.shape[0] == rois.shape[0]
def test_generate_proposal_labels(self):
program = Program()
with program_guard(program):
rpn_rois = fluid.data(
name='rpn_rois', shape=[4, 4], dtype='float32', lod_level=1
)
gt_classes = fluid.data(
name='gt_classes', shape=[6], dtype='int32', lod_level=1
)
is_crowd = fluid.data(
name='is_crowd', shape=[6], dtype='int32', lod_level=1
)
gt_boxes = fluid.data(
name='gt_boxes', shape=[6, 4], dtype='float32', lod_level=1
)
im_info = fluid.data(name='im_info', shape=[1, 3], dtype='float32')
max_overlap = fluid.data(
name='max_overlap', shape=[4], dtype='float32', lod_level=1
)
self.class_nums = 5
outs = fluid.layers.generate_proposal_labels(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=2,
fg_fraction=0.5,
fg_thresh=0.5,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=self.class_nums,
)
outs_1 = fluid.layers.generate_proposal_labels(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=2,
fg_fraction=0.5,
fg_thresh=0.5,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=self.class_nums,
is_cascade_rcnn=True,
max_overlap=max_overlap,
return_max_overlap=True,
)
self.check_out(outs)
self.check_out(outs_1)
rois = outs[0]
class TestGenerateMaskLabels(unittest.TestCase):
def test_generate_mask_labels(self):
program = Program()
with program_guard(program):
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False,
)
gt_classes = layers.data(
name='gt_classes',
shape=[2, 1],
dtype='int32',
lod_level=1,
append_batch_size=False,
)
is_crowd = layers.data(
name='is_crowd',
shape=[2, 1],
dtype='int32',
lod_level=1,
append_batch_size=False,
)
gt_segms = layers.data(
name='gt_segms',
shape=[20, 2],
dtype='float32',
lod_level=3,
append_batch_size=False,
)
rois = layers.data(
name='rois',
shape=[4, 4],
dtype='float32',
lod_level=1,
append_batch_size=False,
)
labels_int32 = layers.data(
name='labels_int32',
shape=[4, 1],
dtype='int32',
lod_level=1,
append_batch_size=False,
)
num_classes = 5
resolution = 14
outs = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_segms,
rois=rois,
labels_int32=labels_int32,
num_classes=num_classes,
resolution=resolution,
)
mask_rois, roi_has_mask_int32, mask_int32 = outs
assert mask_rois.shape[1] == 4
assert mask_int32.shape[1] == num_classes * resolution * resolution
class TestMultiBoxHead(unittest.TestCase):
def test_multi_box_head(self):
data_shape = [3, 224, 224]
mbox_locs, mbox_confs, box, var = self.multi_box_head_output(data_shape)
assert len(box.shape) == 2
assert box.shape == var.shape
assert box.shape[1] == 4
assert mbox_locs.shape[1] == mbox_confs.shape[1]
def multi_box_head_output(self, data_shape):
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32'
)
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
conv2 = fluid.layers.conv2d(conv1, 3, 3, 2)
conv3 = fluid.layers.conv2d(conv2, 3, 3, 2)
conv4 = fluid.layers.conv2d(conv3, 3, 3, 2)
conv5 = fluid.layers.conv2d(conv4, 3, 3, 2)
mbox_locs, mbox_confs, box, var = layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv5],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[
[2.0],
[2.0, 3.0],
[2.0, 3.0],
[2.0, 3.0],
[2.0],
[2.0],
],
base_size=300,
offset=0.5,
flip=True,
clip=True,
)
return mbox_locs, mbox_confs, box, var
class TestGenerateProposals(LayerTest):
def test_generate_proposals(self):
scores_np = np.random.rand(2, 3, 4, 4).astype('float32')
......@@ -323,10 +99,10 @@ class TestGenerateProposals(LayerTest):
variances = fluid.data(
name='var', shape=[4, 4, 3, 4], dtype='float32'
)
rois, roi_probs, rois_num = fluid.layers.generate_proposals(
rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals(
scores,
bbox_deltas,
im_info,
im_info[:2],
anchors,
variances,
pre_nms_top_n=10,
......@@ -355,10 +131,10 @@ class TestGenerateProposals(LayerTest):
im_info_dy = base.to_variable(im_info_np)
anchors_dy = base.to_variable(anchors_np)
variances_dy = base.to_variable(variances_np)
rois, roi_probs, rois_num = fluid.layers.generate_proposals(
rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals(
scores_dy,
bbox_deltas_dy,
im_info_dy,
im_info_dy[:2],
anchors_dy,
variances_dy,
pre_nms_top_n=10,
......@@ -374,62 +150,6 @@ class TestGenerateProposals(LayerTest):
np.testing.assert_array_equal(np.array(rois_num_stat), rois_num_dy)
class TestBoxClip(unittest.TestCase):
def test_box_clip(self):
program = Program()
with program_guard(program):
input_box = layers.data(
name='input_box', shape=[7, 4], dtype='float32', lod_level=1
)
im_info = layers.data(name='im_info', shape=[3], dtype='float32')
out = layers.box_clip(input_box, im_info)
self.assertIsNotNone(out)
class TestMulticlassNMS(unittest.TestCase):
def test_multiclass_nms(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32'
)
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7)
self.assertIsNotNone(output)
def test_multiclass_nms_error(self):
program = Program()
with program_guard(program):
bboxes1 = fluid.data(
name='bboxes1', shape=[10, 10, 4], dtype='int32'
)
scores1 = fluid.data(
name='scores1', shape=[10, 10], dtype='float32'
)
bboxes2 = fluid.data(
name='bboxes2', shape=[10, 10, 4], dtype='float32'
)
scores2 = fluid.data(name='scores2', shape=[10, 10], dtype='int32')
self.assertRaises(
TypeError,
layers.multiclass_nms,
bboxes=bboxes1,
scores=scores1,
score_threshold=0.5,
nms_top_k=400,
keep_top_k=200,
)
self.assertRaises(
TypeError,
layers.multiclass_nms,
bboxes=bboxes2,
scores=scores2,
score_threshold=0.5,
nms_top_k=400,
keep_top_k=200,
)
class TestMulticlassNMS2(unittest.TestCase):
def test_multiclass_nms2(self):
program = Program()
......@@ -449,138 +169,6 @@ class TestMulticlassNMS2(unittest.TestCase):
self.assertIsNotNone(index)
class TestCollectFpnPropsals(LayerTest):
def test_collect_fpn_proposals(self):
multi_bboxes_np = []
multi_scores_np = []
rois_num_per_level_np = []
for i in range(4):
bboxes_np = np.random.rand(5, 4).astype('float32')
scores_np = np.random.rand(5, 1).astype('float32')
rois_num = np.array([2, 3]).astype('int32')
multi_bboxes_np.append(bboxes_np)
multi_scores_np.append(scores_np)
rois_num_per_level_np.append(rois_num)
with self.static_graph():
multi_bboxes = []
multi_scores = []
rois_num_per_level = []
for i in range(4):
bboxes = fluid.data(
name='rois' + str(i),
shape=[5, 4],
dtype='float32',
lod_level=1,
)
scores = fluid.data(
name='scores' + str(i),
shape=[5, 1],
dtype='float32',
lod_level=1,
)
rois_num = fluid.data(
name='rois_num' + str(i), shape=[None], dtype='int32'
)
multi_bboxes.append(bboxes)
multi_scores.append(scores)
rois_num_per_level.append(rois_num)
fpn_rois, rois_num = layers.collect_fpn_proposals(
multi_bboxes,
multi_scores,
2,
5,
10,
rois_num_per_level=rois_num_per_level,
)
feed = {}
for i in range(4):
feed['rois' + str(i)] = multi_bboxes_np[i]
feed['scores' + str(i)] = multi_scores_np[i]
feed['rois_num' + str(i)] = rois_num_per_level_np[i]
fpn_rois_stat, rois_num_stat = self.get_static_graph_result(
feed=feed, fetch_list=[fpn_rois, rois_num], with_lod=True
)
fpn_rois_stat = np.array(fpn_rois_stat)
rois_num_stat = np.array(rois_num_stat)
with self.dynamic_graph():
multi_bboxes_dy = []
multi_scores_dy = []
rois_num_per_level_dy = []
for i in range(4):
bboxes_dy = base.to_variable(multi_bboxes_np[i])
scores_dy = base.to_variable(multi_scores_np[i])
rois_num_dy = base.to_variable(rois_num_per_level_np[i])
multi_bboxes_dy.append(bboxes_dy)
multi_scores_dy.append(scores_dy)
rois_num_per_level_dy.append(rois_num_dy)
fpn_rois_dy, rois_num_dy = fluid.layers.collect_fpn_proposals(
multi_bboxes_dy,
multi_scores_dy,
2,
5,
10,
rois_num_per_level=rois_num_per_level_dy,
)
fpn_rois_dy = fpn_rois_dy.numpy()
rois_num_dy = rois_num_dy.numpy()
np.testing.assert_array_equal(fpn_rois_stat, fpn_rois_dy)
np.testing.assert_array_equal(rois_num_stat, rois_num_dy)
def test_collect_fpn_proposals_error(self):
def generate_input(bbox_type, score_type, name):
multi_bboxes = []
multi_scores = []
for i in range(4):
bboxes = fluid.data(
name='rois' + name + str(i),
shape=[10, 4],
dtype=bbox_type,
lod_level=1,
)
scores = fluid.data(
name='scores' + name + str(i),
shape=[10, 1],
dtype=score_type,
lod_level=1,
)
multi_bboxes.append(bboxes)
multi_scores.append(scores)
return multi_bboxes, multi_scores
program = Program()
with program_guard(program):
bbox1 = fluid.data(
name='rois', shape=[5, 10, 4], dtype='float32', lod_level=1
)
score1 = fluid.data(
name='scores', shape=[5, 10, 1], dtype='float32', lod_level=1
)
bbox2, score2 = generate_input('int32', 'float32', '2')
self.assertRaises(
TypeError,
layers.collect_fpn_proposals,
multi_rois=bbox1,
multi_scores=score1,
min_level=2,
max_level=5,
post_nms_top_n=2000,
)
self.assertRaises(
TypeError,
layers.collect_fpn_proposals,
multi_rois=bbox2,
multi_scores=score2,
min_level=2,
max_level=5,
post_nms_top_n=2000,
)
class TestDistributeFpnProposals(LayerTest):
def test_distribute_fpn_proposals(self):
rois_np = np.random.rand(10, 4).astype('float32')
......@@ -592,7 +180,7 @@ class TestDistributeFpnProposals(LayerTest):
multi_rois,
restore_ind,
rois_num_per_level,
) = layers.distribute_fpn_proposals(
) = paddle.vision.ops.distribute_fpn_proposals(
fpn_rois=rois,
min_level=2,
max_level=5,
......@@ -619,7 +207,7 @@ class TestDistributeFpnProposals(LayerTest):
multi_rois_dy,
restore_ind_dy,
rois_num_per_level_dy,
) = layers.distribute_fpn_proposals(
) = paddle.vision.ops.distribute_fpn_proposals(
fpn_rois=rois_dy,
min_level=2,
max_level=5,
......@@ -646,7 +234,7 @@ class TestDistributeFpnProposals(LayerTest):
)
self.assertRaises(
TypeError,
layers.distribute_fpn_proposals,
paddle.vision.ops.distribute_fpn_proposals,
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
......@@ -655,81 +243,6 @@ class TestDistributeFpnProposals(LayerTest):
)
class TestBoxDecoderAndAssign(unittest.TestCase):
def test_box_decoder_and_assign(self):
program = Program()
with program_guard(program):
pb = fluid.data(name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4 * 81], dtype='float32'
)
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32'
)
(
decoded_box,
output_assign_box,
) = fluid.layers.box_decoder_and_assign(pb, pbv, loc, scores, 4.135)
self.assertIsNotNone(decoded_box)
self.assertIsNotNone(output_assign_box)
def test_box_decoder_and_assign_error(self):
def generate_input(pb_type, pbv_type, loc_type, score_type, name):
pb = fluid.data(
name='prior_box' + name, shape=[None, 4], dtype=pb_type
)
pbv = fluid.data(
name='prior_box_var' + name, shape=[4], dtype=pbv_type
)
loc = fluid.data(
name='target_box' + name, shape=[None, 4 * 81], dtype=loc_type
)
scores = fluid.data(
name='scores' + name, shape=[None, 81], dtype=score_type
)
return pb, pbv, loc, scores
program = Program()
with program_guard(program):
pb1, pbv1, loc1, scores1 = generate_input(
'int32', 'float32', 'float32', 'float32', '1'
)
pb2, pbv2, loc2, scores2 = generate_input(
'float32', 'float32', 'int32', 'float32', '2'
)
pb3, pbv3, loc3, scores3 = generate_input(
'float32', 'float32', 'float32', 'int32', '3'
)
self.assertRaises(
TypeError,
layers.box_decoder_and_assign,
prior_box=pb1,
prior_box_var=pbv1,
target_box=loc1,
box_score=scores1,
box_clip=4.0,
)
self.assertRaises(
TypeError,
layers.box_decoder_and_assign,
prior_box=pb2,
prior_box_var=pbv2,
target_box=loc2,
box_score=scores2,
box_clip=4.0,
)
self.assertRaises(
TypeError,
layers.box_decoder_and_assign,
prior_box=pb3,
prior_box_var=pbv3,
target_box=loc3,
box_score=scores3,
box_clip=4.0,
)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -19,6 +19,7 @@ from darknet import ConvBNLayer, DarkNet53_conv_body
import paddle
import paddle.fluid as fluid
from paddle import _legacy_C_ops
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.jit.api import declarative
......@@ -351,7 +352,7 @@ class YOLOv3(fluid.dygraph.Layer):
yolo_boxes = fluid.layers.concat(self.boxes, axis=1)
yolo_scores = fluid.layers.concat(self.scores, axis=2)
pred = fluid.layers.multiclass_nms(
pred = _legacy_C_ops.multiclass_nms(
bboxes=yolo_boxes,
scores=yolo_scores,
score_threshold=cfg.valid_thresh,
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.static.nn as nn
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTAnchorGeneratorBaseTest(InferencePassTest):
def setUp(self):
self.bs = 1
self.channel = 16
self.height = 32
self.width = 32
self.anchor_sizes = [64.0, 128.0, 256.0, 512.0]
self.aspect_ratios = [0.5, 1.0, 2.0]
self.variance = [0.1, 0.1, 0.2, 0.2]
self.stride = [8.0, 8.0]
self.precision = AnalysisConfig.Precision.Float32
self.serialize = False
self.enable_trt = True
self.feeds = {
'data': np.random.random(
[self.bs, self.channel, self.height, self.width]
).astype('float32'),
}
def build(self):
min_graph_size = 3 if self.dynamic_shape_params is not None else 2
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30,
self.bs,
min_graph_size,
self.precision,
self.serialize,
False,
)
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name='data',
shape=[-1, self.channel, self.height, self.width],
dtype='float32',
)
anchor, var = fluid.layers.detection.anchor_generator(
data,
anchor_sizes=self.anchor_sizes,
aspect_ratios=self.aspect_ratios,
variance=self.variance,
stride=self.stride,
)
if self.dynamic_shape_params is not None:
anchor = paddle.transpose(anchor, [2, 3, 0, 1])
out = nn.batch_norm(anchor, is_test=True)
self.fetch_list = [out, var]
def run_test(self):
self.build()
self.check_output()
def set_dynamic(self):
self.dynamic_shape_params = InferencePassTest.DynamicShapeParam(
{
'data': [
self.bs,
self.channel,
self.height // 2,
self.width // 2,
]
},
{'data': [self.bs, self.channel, self.height, self.width]},
{'data': [self.bs, self.channel, self.height, self.width]},
False,
)
def test_base(self):
self.run_test()
def test_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.run_test()
def test_serialize(self):
self.serialize = True
self.run_test()
def test_dynamic(self):
self.set_dynamic()
self.run_test()
def test_dynamic_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.set_dynamic()
self.run_test()
def test_dynamic_serialize(self):
self.serialize = True
self.set_dynamic()
self.run_test()
def test_dynamic_fp16_serialize(self):
self.serialize = True
self.precision = AnalysisConfig.Precision.Half
self.set_dynamic()
self.run_test()
def check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
atol = 1e-5
if self.trt_parameters.precision == AnalysisConfig.Precision.Half:
atol = 1e-3
self.check_output_with_option(use_gpu, atol, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.static.nn as nn
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TensorRTMultiClassNMSTest(InferencePassTest):
def setUp(self):
self.enable_trt = True
self.enable_tensorrt_varseqlen = True
self.precision = AnalysisConfig.Precision.Float32
self.serialize = False
self.bs = 1
self.background_label = -1
self.score_threshold = 0.5
self.nms_top_k = 8
self.nms_threshold = 0.3
self.keep_top_k = 8
self.normalized = False
self.num_classes = 8
self.num_boxes = 8
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30, self.bs, 2, self.precision, self.serialize, False
)
def build(self):
with fluid.program_guard(self.main_program, self.startup_program):
boxes = fluid.data(
name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32'
)
scores = fluid.data(
name='scores',
shape=[-1, self.num_classes, self.num_boxes],
dtype='float32',
)
multiclass_nms_out = fluid.layers.multiclass_nms(
bboxes=boxes,
scores=scores,
background_label=self.background_label,
score_threshold=self.score_threshold,
nms_top_k=self.nms_top_k,
nms_threshold=self.nms_threshold,
keep_top_k=self.keep_top_k,
normalized=self.normalized,
)
mutliclass_nms_out = multiclass_nms_out + 1.0
multiclass_nms_out = paddle.reshape(
multiclass_nms_out,
[self.bs, 1, self.keep_top_k, 6],
name='reshape',
)
out = nn.batch_norm(multiclass_nms_out, is_test=True)
boxes_data = (
np.arange(self.num_boxes * 4)
.reshape([self.bs, self.num_boxes, 4])
.astype('float32')
)
scores_data = (
np.arange(1 * self.num_classes * self.num_boxes)
.reshape([self.bs, self.num_classes, self.num_boxes])
.astype('float32')
)
self.feeds = {
'bboxes': boxes_data,
'scores': scores_data,
}
self.fetch_list = [out]
def run_test(self):
self.build()
self.check_output()
def run_test_all(self):
precision_opt = [
AnalysisConfig.Precision.Float32,
AnalysisConfig.Precision.Half,
]
serialize_opt = [False, True]
max_shape = {
'bboxes': [self.bs, self.num_boxes, 4],
'scores': [self.bs, self.num_classes, self.num_boxes],
}
opt_shape = max_shape
dynamic_shape_opt = [
None,
InferencePassTest.DynamicShapeParam(
{'bboxes': [1, 1, 4], 'scores': [1, 1, 1]},
max_shape,
opt_shape,
False,
),
]
for precision, serialize, dynamic_shape in itertools.product(
precision_opt, serialize_opt, dynamic_shape_opt
):
self.precision = precision
self.serialize = serialize
self.dynamic_shape_params = dynamic_shape
self.build()
self.check_output()
def check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
def test_base(self):
self.run_test()
def test_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.run_test()
def test_serialize(self):
self.serialize = True
self.run_test()
def test_dynamic(self):
max_shape = {
'bboxes': [self.bs, self.num_boxes, 4],
'scores': [self.bs, self.num_classes, self.num_boxes],
}
opt_shape = max_shape
self.dynamic_shape_params = InferencePassTest.DynamicShapeParam(
{'bboxes': [1, 1, 4], 'scores': [1, 1, 1]},
max_shape,
opt_shape,
False,
)
self.run_test()
def test_background(self):
self.background = 7
self.run_test()
def test_disable_varseqlen(self):
self.diable_tensorrt_varseqlen = False
self.run_test()
if __name__ == "__main__":
unittest.main()
......@@ -96,7 +96,6 @@ class TestDirectory(unittest.TestCase):
'paddle.static.nn.group_norm',
'paddle.static.nn.instance_norm',
'paddle.static.nn.layer_norm',
'paddle.static.nn.multi_box_head',
'paddle.static.nn.nce',
'paddle.static.nn.prelu',
'paddle.static.nn.row_conv',
......
......@@ -16,7 +16,6 @@ import math
import unittest
import numpy as np
from op_test import OpTest
'''
# Equivalent code
......@@ -314,148 +313,5 @@ def trans_lod(lod):
return new_lod
class TestGenerateMaskLabels(OpTest):
def set_data(self):
self.init_test_case()
self.make_generate_proposal_labels_out()
self.generate_gt_polys()
self.generate_groundtruth()
self.init_test_output()
self.inputs = {
'ImInfo': self.im_info,
'GtClasses': (self.gt_classes.astype(np.int32), self.gt_lod),
'IsCrowd': (self.is_crowd.astype(np.int32), self.gt_lod),
'LabelsInt32': (self.label_int32.astype(np.int32), self.rois_lod),
'GtSegms': (self.gt_polys.astype(np.float32), self.masks_lod),
'Rois': (self.rois.astype(np.float32), self.rois_lod),
}
self.attrs = {
'num_classes': self.num_classes,
'resolution': self.resolution,
}
self.outputs = {
'MaskRois': (self.mask_rois, [self.new_lod]),
'RoiHasMaskInt32': (self.roi_has_mask_int32, [self.new_lod]),
'MaskInt32': (self.mask_int32, [self.new_lod]),
}
def init_test_case(self):
self.num_classes = 81
self.resolution = 14
self.batch_size = 2
self.batch_size_per_im = 64
self.images_shape = [100, 200]
np.random.seed(0)
def make_generate_proposal_labels_out(self):
rois = []
self.rois_lod = [[]]
self.label_int32 = []
for bno in range(self.batch_size):
self.rois_lod[0].append(self.batch_size_per_im)
for i in range(self.batch_size_per_im):
xywh = np.random.rand(4)
xy1 = xywh[0:2] * 2
wh = xywh[2:4] * (self.images_shape[0] - xy1)
xy2 = xy1 + wh
roi = [xy1[0], xy1[1], xy2[0], xy2[1]]
rois.append(roi)
self.rois = np.array(rois).astype("float32")
for idx, roi_num in enumerate(self.rois_lod[0]):
for roi_id in range(roi_num):
class_id = np.random.random_integers(self.num_classes - 1)
if idx == 0:
# set an image with no foreground, to test the empty case
self.label_int32.append(0)
else:
self.label_int32.append(class_id)
label_np = np.array(self.label_int32)
self.label_int32 = label_np[:, np.newaxis]
def generate_gt_polys(self):
h, w = self.images_shape[0:2]
self.gt_polys = []
self.gt_polys_list = []
max_gt = 4
max_poly_num = 5
min_poly_size = 4
max_poly_size = 16
lod0 = []
lod1 = []
lod2 = []
for i in range(self.batch_size):
gt_num = np.random.randint(1, high=max_gt, size=1)[0]
lod0.append(gt_num)
ptss = []
for i in range(gt_num):
poly_num = np.random.randint(1, max_poly_num, size=1)[0]
lod1.append(poly_num)
pts = []
for j in range(poly_num):
poly_size = np.random.randint(
min_poly_size, max_poly_size, size=1
)[0]
x = np.random.rand(poly_size, 1) * w
y = np.random.rand(poly_size, 1) * h
xy = np.concatenate((x, y), axis=1)
pts.append(xy.flatten().tolist())
self.gt_polys.extend(xy.flatten().tolist())
lod2.append(poly_size)
ptss.append(pts)
self.gt_polys_list.append(ptss)
self.masks_lod = [lod0, lod1, lod2]
self.gt_lod = [lod0]
self.gt_polys = np.array(self.gt_polys).astype('float32').reshape(-1, 2)
def generate_groundtruth(self):
self.im_info = []
self.gt_classes = []
self.is_crowd = []
for roi_num in self.gt_lod[0]:
self.im_info.append(self.images_shape + [1.0])
for roi_id in range(roi_num):
class_id = np.random.random_integers(self.num_classes - 1)
self.gt_classes.append(class_id)
self.is_crowd.append(0)
self.im_info = np.array(self.im_info).astype(np.float32)
gt_classes_np = np.array(self.gt_classes)
self.gt_classes = gt_classes_np[:, np.newaxis]
is_crowd_np = np.array(self.is_crowd)
self.is_crowd = is_crowd_np[:, np.newaxis]
def init_test_output(self):
roi_lod = trans_lod(self.rois_lod[0])
gt_lod = trans_lod(self.gt_lod[0])
outs = generate_mask_labels(
self.num_classes,
self.im_info,
self.gt_classes,
self.is_crowd,
self.label_int32,
self.gt_polys_list,
self.resolution,
self.rois,
roi_lod,
gt_lod,
)
self.mask_rois = outs[0]
self.roi_has_mask_int32 = outs[1]
self.mask_int32 = outs[2]
self.new_lod = outs[3]
self.mask_rois = np.vstack(self.mask_rois)
self.roi_has_mask_int32 = np.hstack(self.roi_has_mask_int32)[
:, np.newaxis
]
self.mask_int32 = np.vstack(self.mask_int32)
def setUp(self):
self.op_type = "generate_mask_labels"
self.set_data()
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
......@@ -2806,16 +2806,6 @@ class TestBook(LayerTest):
x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1)
out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)
def test_roi_perspective_transform(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
rois = layers.data(
name="rois", shape=[8], dtype="float32", lod_level=1
)
output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6)
return output
def test_row_conv(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......@@ -2897,47 +2887,6 @@ class TestBook(LayerTest):
out = paddle.addmm(input=input, x=x, y=y)
return out
def test_retinanet_detection_output(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
bboxes = layers.data(
name='bboxes',
shape=[1, 21, 4],
append_batch_size=False,
dtype='float32',
)
scores = layers.data(
name='scores',
shape=[1, 21, 10],
append_batch_size=False,
dtype='float32',
)
anchors = layers.data(
name='anchors',
shape=[21, 4],
append_batch_size=False,
dtype='float32',
)
im_info = layers.data(
name="im_info",
shape=[1, 3],
append_batch_size=False,
dtype='float32',
)
nmsed_outs = layers.retinanet_detection_output(
bboxes=[bboxes, bboxes],
scores=[scores, scores],
anchors=[anchors, anchors],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0,
)
return nmsed_outs
def test_warpctc_with_padding(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......
......@@ -19,8 +19,6 @@ import numpy as np
from op_test import OpTest
from test_multiclass_nms_op import iou
import paddle.fluid as fluid
def weight_merge(box1, box2, score1, score2):
for i in range(len(box1)):
......@@ -409,153 +407,5 @@ class TestLocalAwareNMSOp4Points(OpTest):
self.check_output()
class TestLocalityAwareNMSAPI(unittest.TestCase):
def test_api(self):
boxes = fluid.data(name='bboxes', shape=[None, 81, 8], dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81], dtype='float32')
fluid.layers.locality_aware_nms(
bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False,
)
class TestLocalityAwareNMSError(unittest.TestCase):
def test_error(self):
boxes = fluid.data(name='bboxes', shape=[None, 81, 8], dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81], dtype='float32')
boxes_int = fluid.data(
name='bboxes_int', shape=[None, 81, 8], dtype='int32'
)
scores_int = fluid.data(
name='scores_int', shape=[None, 1, 81], dtype='int32'
)
boxes_tmp = [1, 2]
scores_tmp = [1, 2]
# type of boxes and scores must be variable
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes_tmp,
scores,
0.5,
400,
200,
)
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores_tmp,
0.5,
400,
200,
)
# dtype of boxes and scores must in ['float32', 'float64']
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes_int,
scores,
0.5,
400,
200,
)
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores_int,
0.5,
400,
200,
)
score_threshold = int(1)
# type of score_threshold must be float
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
score_threshold,
400,
200,
)
nms_top_k = 400.5
# type of num_top_k must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
nms_top_k,
200,
)
keep_top_k = 200.5
# type of keep_top_k must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
keep_top_k,
)
nms_threshold = int(0)
# type of nms_threshold must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
200,
nms_threshold,
)
nms_eta = int(1)
# type of nms_eta must be float
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
200,
0.5,
nms_eta,
)
bg_label = 1.5
# type of background_label must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
200,
0.5,
1.0,
bg_label,
)
if __name__ == '__main__':
unittest.main()
......@@ -334,14 +334,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_bboxes_Variable():
# the bboxes type must be Variable
fluid.layers.matrix_nms(
bboxes=boxes_np,
scores=scores_data,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
paddle.vision.ops.matrix_nms(
bboxes=boxes_np,
scores=scores_data,
......@@ -353,14 +345,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_scores_Variable():
# the scores type must be Variable
fluid.layers.matrix_nms(
bboxes=boxes_data,
scores=scores_np,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
paddle.vision.ops.matrix_nms(
bboxes=boxes_data,
scores=scores_np,
......@@ -372,17 +356,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_empty():
# when all score are lower than threshold
try:
fluid.layers.matrix_nms(
bboxes=boxes_data,
scores=scores_data,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
except Exception as e:
self.fail(e)
try:
paddle.vision.ops.matrix_nms(
bboxes=boxes_data,
......@@ -397,17 +370,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_coverage():
# cover correct workflow
try:
fluid.layers.matrix_nms(
bboxes=boxes_data,
scores=scores_data,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
except Exception as e:
self.fail(e)
try:
paddle.vision.ops.matrix_nms(
bboxes=boxes_data,
......
......@@ -19,14 +19,8 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import (
Program,
_non_static_mode,
in_dygraph_mode,
program_guard,
)
from paddle.fluid import _non_static_mode, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
......@@ -738,39 +732,6 @@ class TestMulticlassNMS2LoDNoOutput(TestMulticlassNMS2LoDInput):
self.score_threshold = 2.0
class TestMulticlassNMSError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
M = 1200
N = 7
C = 21
BOX_SIZE = 4
boxes_np = np.random.random((M, C, BOX_SIZE)).astype('float32')
scores = np.random.random((N * M, C)).astype('float32')
scores = np.apply_along_axis(softmax, 1, scores)
scores = np.reshape(scores, (N, M, C))
scores_np = np.transpose(scores, (0, 2, 1))
boxes_data = fluid.data(
name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32'
)
scores_data = fluid.data(
name='scores', shape=[N, C, M], dtype='float32'
)
def test_bboxes_Variable():
# the bboxes type must be Variable
fluid.layers.multiclass_nms(bboxes=boxes_np, scores=scores_data)
def test_scores_Variable():
# the bboxes type must be Variable
fluid.layers.multiclass_nms(bboxes=boxes_data, scores=scores_np)
self.assertRaises(TypeError, test_bboxes_Variable)
self.assertRaises(TypeError, test_scores_Variable)
class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
def setUp(self):
self.python_api = multiclass_nms3
......
......@@ -21,8 +21,6 @@ from test_anchor_generator_op import anchor_generator_in_python
from test_multiclass_nms_op import nms
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold):
......@@ -508,132 +506,6 @@ class TestRetinanetDetectionOutOpNo5(TestRetinanetDetectionOutOp1):
self.layer_w.append(2 ** (num_levels - i))
class TestRetinanetDetectionOutOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
bboxes_low1 = fluid.data(
name='bboxes_low1', shape=[1, 44, 4], dtype='float32'
)
bboxes_high1 = fluid.data(
name='bboxes_high1', shape=[1, 11, 4], dtype='float32'
)
scores_low1 = fluid.data(
name='scores_low1', shape=[1, 44, 10], dtype='float32'
)
scores_high1 = fluid.data(
name='scores_high1', shape=[1, 11, 10], dtype='float32'
)
anchors_low1 = fluid.data(
name='anchors_low1', shape=[44, 4], dtype='float32'
)
anchors_high1 = fluid.data(
name='anchors_high1', shape=[11, 4], dtype='float32'
)
im_info1 = fluid.data(
name="im_info1", shape=[1, 3], dtype='float32'
)
# The `bboxes` must be list, each element must be Variable and
# its Tensor data type must be one of float32 and float64.
def test_bboxes_type():
fluid.layers.retinanet_detection_output(
bboxes=bboxes_low1,
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_bboxes_type)
def test_bboxes_tensor_dtype():
bboxes_high2 = fluid.data(
name='bboxes_high2', shape=[1, 11, 4], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_high2, 5],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_bboxes_tensor_dtype)
# The `scores` must be list, each element must be Variable and its
# Tensor data type must be one of float32 and float64.
def test_scores_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=scores_low1,
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_scores_type)
def test_scores_tensor_dtype():
scores_high2 = fluid.data(
name='scores_high2', shape=[1, 11, 10], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_high2, 5],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_scores_tensor_dtype)
# The `anchors` must be list, each element must be Variable and its
# Tensor data type must be one of float32 and float64.
def test_anchors_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=anchors_low1,
im_info=im_info1,
)
self.assertRaises(TypeError, test_anchors_type)
def test_anchors_tensor_dtype():
anchors_high2 = fluid.data(
name='anchors_high2', shape=[11, 4], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_high2, 5],
im_info=im_info1,
)
self.assertRaises(TypeError, test_anchors_tensor_dtype)
# The `im_info` must be Variable and the data type of `im_info`
# Tensor must be one of float32 and float64.
def test_iminfo_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=[2, 3, 4],
)
self.assertRaises(TypeError, test_iminfo_type)
def test_iminfo_tensor_dtype():
im_info2 = fluid.data(
name='im_info2', shape=[1, 3], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info2,
)
self.assertRaises(TypeError, test_iminfo_tensor_dtype)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -16,9 +16,6 @@ import unittest
from math import floor, sqrt
import numpy as np
from op_test import OpTest
from paddle import fluid
def gt_e(a, b):
......@@ -261,166 +258,5 @@ def roi_transform(
return out.astype("float32"), mask, matrix
class TestROIPoolOp(OpTest):
def set_data(self):
self.init_test_case()
self.make_rois()
self.inputs = {'X': self.x, 'ROIs': (self.rois, self.rois_lod)}
self.attrs = {
'spatial_scale': self.spatial_scale,
'transformed_height': self.transformed_height,
'transformed_width': self.transformed_width,
}
out, mask, transform_matrix = roi_transform(
self.x,
self.rois,
self.rois_lod,
self.transformed_height,
self.transformed_width,
self.spatial_scale,
)
self.outputs = {
'Out': out,
'Mask': mask,
'TransformMatrix': transform_matrix,
}
def init_test_case(self):
self.batch_size = 2
self.channels = 2
self.height = 8
self.width = 8
# n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
self.spatial_scale = 1.0 / 2.0
self.transformed_height = 2
self.transformed_width = 3
self.x = np.random.random(self.x_dim).astype('float32')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.randint(
0, self.width // self.spatial_scale - self.transformed_width
)
y1 = np.random.randint(
0,
self.height // self.spatial_scale - self.transformed_height,
)
x2 = np.random.randint(
x1 + self.transformed_width,
self.width // self.spatial_scale,
)
y2 = np.random.randint(
0,
self.height // self.spatial_scale - self.transformed_height,
)
x3 = np.random.randint(
x1 + self.transformed_width,
self.width // self.spatial_scale,
)
y3 = np.random.randint(
y1 + self.transformed_height,
self.height // self.spatial_scale,
)
x4 = np.random.randint(
0, self.width // self.spatial_scale - self.transformed_width
)
y4 = np.random.randint(
y1 + self.transformed_height,
self.height // self.spatial_scale,
)
roi = [x1, y1, x2, y2, x3, y3, x4, y4]
rois.append(roi)
self.rois_num = len(rois)
self.rois = np.array(rois).astype("float32")
def setUp(self):
self.op_type = "roi_perspective_transform"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.outputs['Out2InIdx'] = np.zeros(
[np.product(self.outputs['Out'].shape), 4]
).astype("int32")
self.outputs['Out2InWeights'] = np.zeros(
[np.product(self.outputs['Out'].shape), 4]
).astype("float32")
self.check_grad(['X'], 'Out')
def test_errors(self):
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(
name='rois', shape=[None, 8], lod_level=1, dtype='float32'
)
x_int = fluid.data(
name='x_int', shape=[100, 256, 28, 28], dtype='int32'
)
rois_int = fluid.data(
name='rois_int', shape=[None, 8], lod_level=1, dtype='int32'
)
x_tmp = [1, 2]
rois_tmp = [1, 2]
# type of intput and rois must be variable
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x_tmp, rois, 7, 7
)
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x, rois_tmp, 7, 7
)
# dtype of intput and rois must be float32
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x_int, rois, 7, 7
)
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x, rois_int, 7, 7
)
height = 7.5
width = 7.5
# type of transformed_height and transformed_width must be int
self.assertRaises(
TypeError,
fluid.layers.roi_perspective_transform,
x,
rois,
height,
7,
)
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x, rois, 7, width
)
scale = int(2)
# type of spatial_scale must be float
self.assertRaises(
TypeError,
fluid.layers.roi_perspective_transform,
x,
rois,
7,
7,
scale,
)
if __name__ == '__main__':
unittest.main()
......@@ -54,7 +54,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'reduce_max',
'reduce_min',
'reshape2',
'roi_perspective_transform',
'row_conv',
'scatter',
'sequence_conv',
......
......@@ -32,7 +32,6 @@ from .common import py_func # noqa: F401
from ...tensor.creation import create_parameter # noqa: F401
from ...fluid.layers import conv2d # noqa: F401
from ...fluid.layers import layer_norm # noqa: F401
from ...fluid.layers import multi_box_head # noqa: F401
from .loss import nce # noqa: F401
from .common import prelu # noqa: F401
from ...fluid.layers import row_conv # noqa: F401
......@@ -76,8 +75,8 @@ __all__ = [ # noqa
'group_norm',
'instance_norm',
'layer_norm',
'multi_box_head',
'nce',
'prelu',
'py_func',
'row_conv',
'spectral_norm',
......
......@@ -799,7 +799,7 @@ def conv3d(
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d, whose data type is
A Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
......@@ -1190,7 +1190,7 @@ def conv2d_transpose(
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
raise TypeError("Input of conv2d_transpose must be Tensor")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
......@@ -1280,7 +1280,7 @@ def conv2d_transpose(
output_size
):
raise ValueError(
"filter_size should not be None when output_size is Variable or contain Variable in static mode."
"filter_size should not be None when output_size is Tensor or contain Tensor in static mode."
)
else:
output_size = utils.convert_shape_to_list(output_size)
......@@ -1497,7 +1497,7 @@ def conv3d_transpose(
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d_transpose, whose data
A Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
......@@ -1546,7 +1546,7 @@ def conv3d_transpose(
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable")
raise TypeError("Input of conv3d_transpose must be Tensor")
if len(input.shape) != 5:
raise ValueError(
"Input should be 5D tensor, but received input with the shape of {}".format(
......@@ -1785,7 +1785,7 @@ def deformable_conv(
float32, float64.
offset (Tensor): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
Mask (Tensor, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
......@@ -1876,9 +1876,9 @@ def deformable_conv(
dtype = helper.input_dtype()
if not isinstance(input, paddle.static.Variable):
raise TypeError("Input of deformable_conv must be Variable")
raise TypeError("Input of deformable_conv must be Tensor")
if not isinstance(offset, paddle.static.Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
raise TypeError("Input Offset of deformable_conv must be Tensor")
if groups is None:
num_filter_channels = num_channels
......@@ -2155,9 +2155,9 @@ def bilinear_tensor_product(
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
x (Tensor): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
y (Tensor): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
......@@ -2832,7 +2832,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
# Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
paddle.static.py_func(func=element_wise_add, x=[x,y], out=output)
exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program)
......@@ -2857,7 +2857,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
raise TypeError('Input must be Tensor/list(Tensor)/tuple(Tensor)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
......@@ -2868,9 +2868,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
elif isinstance(out, list):
out_list = out
else:
raise TypeError(
'Output must be Variable/list(Variable)/tuple(Variable)'
)
raise TypeError('Output must be Tensor/list(Tensor)/tuple(Tensor)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = (
......@@ -2895,7 +2893,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
for v in skip_vars_in_backward_input:
if v.name not in fwd_in_out:
raise ValueError(
'Variable {} is not found in forward inputs and outputs'.format(
'Tensor {} is not found in forward inputs and outputs'.format(
v.name
)
)
......
......@@ -702,7 +702,6 @@ SECONDARY_HIGH_PARALLEL_JOB_NEW = [
'test_uniform_random_bf16_op',
'test_custom_concat',
'test_weight_quantization_mobilenetv1',
'test_retinanet_detection_output',
'test_concat_mkldnn_op',
'test_gaussian_random_mkldnn_op',
'test_parallel_executor_seresnext_with_reduce_cpu',
......@@ -786,7 +785,6 @@ FOURTH_HIGH_PARALLEL_JOB_NEW = [
'test_lr_scheduler',
'test_generate_proposals_op',
'test_masked_select_op',
'test_trt_anchor_generator_op',
'test_imperative_ocr_attention_model',
'test_sentiment',
'test_chunk_op',
......@@ -1748,7 +1746,6 @@ CPU_PARALLEL_JOB = [
'test_rpn_target_assign_op',
'test_row_conv',
'test_rnn_memory_helper_op',
'test_retinanet_detection_output',
'test_reshape_transpose_matmul_mkldnn_fuse_pass',
'test_reshape_bf16_op',
'test_require_version',
......@@ -2506,7 +2503,6 @@ TETRAD_PARALLEL_JOB = [
'test_where_index',
'test_variance_layer',
'test_unsqueeze_op',
'test_trt_anchor_generator_op',
'test_translated_layer',
'test_tensor_shape',
'test_slice',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册