未验证 提交 a375f671 编写于 作者: W wangguanzhong 提交者: GitHub

add distribute_fpn_proposals (#1582)

上级 1e417485
...@@ -19,6 +19,7 @@ from paddle.fluid.dygraph.base import to_variable ...@@ -19,6 +19,7 @@ from paddle.fluid.dygraph.base import to_variable
from ppdet.core.workspace import register, serializable from ppdet.core.workspace import register, serializable
from ppdet.py_op.target import generate_rpn_anchor_target, generate_proposal_target, generate_mask_target from ppdet.py_op.target import generate_rpn_anchor_target, generate_proposal_target, generate_mask_target
from ppdet.py_op.post_process import bbox_post_process from ppdet.py_op.post_process import bbox_post_process
from . import ops
@register @register
...@@ -308,7 +309,7 @@ class RoIExtractor(object): ...@@ -308,7 +309,7 @@ class RoIExtractor(object):
offset = 2 offset = 2
k_min = self.start_level + offset k_min = self.start_level + offset
k_max = self.end_level + offset k_max = self.end_level + offset
rois_dist, restore_index, rois_num_dist = fluid.layers.distribute_fpn_proposals( rois_dist, restore_index, rois_num_dist = ops.distribute_fpn_proposals(
roi, roi,
k_min, k_min,
k_max, k_max,
......
...@@ -34,7 +34,7 @@ __all__ = [ ...@@ -34,7 +34,7 @@ __all__ = [
#'box_coder', #'box_coder',
#'yolo_box', #'yolo_box',
#'multiclass_nms', #'multiclass_nms',
#'distribute_fpn_proposals', 'distribute_fpn_proposals',
'collect_fpn_proposals', 'collect_fpn_proposals',
#'matrix_nms', #'matrix_nms',
] ]
...@@ -52,11 +52,13 @@ def collect_fpn_proposals(multi_rois, ...@@ -52,11 +52,13 @@ def collect_fpn_proposals(multi_rois,
**This OP only supports LoDTensor as input**. Concat multi-level RoIs **This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores. (Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps: This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level 1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores 2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores 3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores 4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id 5. Re-sort RoIs by corresponding batch_id
Args: Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64, LoDTensor with shape [N, 4] and data type is float32 or float64,
...@@ -76,13 +78,17 @@ def collect_fpn_proposals(multi_rois, ...@@ -76,13 +78,17 @@ def collect_fpn_proposals(multi_rois,
name(str, optional): For detailed information, please refer name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and to :ref:`api_guide_Name`. Usually name is no need to set and
None by default. None by default.
Returns: Returns:
Variable: Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs. float32 or float64. Selected RoIs.
rois_num(Tensor): 1-D Tensor contains the RoIs's number of each rois_num(Tensor): 1-D Tensor contains the RoIs's number of each
image. The shape is [B] and data type is int32. B is the number of image. The shape is [B] and data type is int32. B is the number of
images. images.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -92,11 +98,12 @@ def collect_fpn_proposals(multi_rois, ...@@ -92,11 +98,12 @@ def collect_fpn_proposals(multi_rois,
multi_rois = [] multi_rois = []
multi_scores = [] multi_scores = []
for i in range(4): for i in range(4):
multi_rois.append(fluid.data( multi_rois.append(paddle.static.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1)) name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4): for i in range(4):
multi_scores.append(fluid.data( multi_scores.append(paddle.static.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1)) name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals( fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois, multi_rois=multi_rois,
multi_scores=multi_scores, multi_scores=multi_scores,
...@@ -141,3 +148,126 @@ def collect_fpn_proposals(multi_rois, ...@@ -141,3 +148,126 @@ def collect_fpn_proposals(multi_rois,
if rois_num_per_level is not None: if rois_num_per_level is not None:
return output_rois, rois_num return output_rois, rois_num
return output_rois return output_rois
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
rois_num=None,
name=None):
"""
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
The shape is [B] and data type is int32. B is the number of images.
If it is not None then return a list of 1-D Tensor. Each element
is the output RoIs' number of each image on the corresponding level
and the shape is [B]. None by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
rois_num_per_level(List): A list of 1-D Tensor and each Tensor is
the RoIs' number in each image on the corresponding level. The shape
is [B] and data type of int32. B is the number of images
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
paddle.enable_static()
fpn_rois = paddle.static.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
num_lvl = max_level - min_level + 1
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',
refer_level, 'refer_scale', refer_scale)
multi_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(
fpn_rois, rois_num, num_lvl, num_lvl, *attrs)
return multi_rois, restore_ind, rois_num_per_level
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
inputs = {'FpnRois': fpn_rois}
outputs = {
'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind,
}
if rois_num is not None:
inputs['RoisNum'] = rois_num
rois_num_per_level = [
helper.create_variable_for_type_inference(dtype='int32')
for i in range(num_lvl)
]
outputs['MultiLevelRoIsNum'] = rois_num_per_level
helper.append_op(
type='distribute_fpn_proposals',
inputs=inputs,
outputs=outputs,
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
if rois_num is not None:
return multi_rois, restore_ind, rois_num_per_level
return multi_rois, restore_ind
...@@ -154,5 +154,68 @@ class TestCollectFpnProposals(LayerTest): ...@@ -154,5 +154,68 @@ class TestCollectFpnProposals(LayerTest):
post_nms_top_n=2000) post_nms_top_n=2000)
class TestDistributeFpnProposals(LayerTest):
def test_distribute_fpn_proposals(self):
rois_np = np.random.rand(10, 4).astype('float32')
rois_num_np = np.array([4, 6]).astype('int32')
with self.static_graph():
rois = paddle.static.data(
name='rois', shape=[10, 4], dtype='float32')
rois_num = paddle.static.data(
name='rois_num', shape=[None], dtype='int32')
multi_rois, restore_ind, rois_num_per_level = ops.distribute_fpn_proposals(
fpn_rois=rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224,
rois_num=rois_num)
fetch_list = multi_rois + [restore_ind] + rois_num_per_level
output_stat = self.get_static_graph_result(
feed={'rois': rois_np,
'rois_num': rois_num_np},
fetch_list=fetch_list,
with_lod=True)
output_stat_np = []
for output in output_stat:
output_np = np.array(output)
if len(output_np) > 0:
output_stat_np.append(output_np)
with self.dynamic_graph():
rois_dy = base.to_variable(rois_np)
rois_num_dy = base.to_variable(rois_num_np)
multi_rois_dy, restore_ind_dy, rois_num_per_level_dy = ops.distribute_fpn_proposals(
fpn_rois=rois_dy,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224,
rois_num=rois_num_dy)
output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy
output_dy_np = []
for output in output_dy:
output_np = output.numpy()
if len(output_np) > 0:
output_dy_np.append(output_np)
for res_stat, res_dy in zip(output_stat_np, output_dy_np):
self.assertTrue(np.array_equal(res_stat, res_dy))
def test_distribute_fpn_proposals_error(self):
program = Program()
with program_guard(program):
fpn_rois = paddle.static.data(
name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
self.assertRaises(
TypeError,
ops.distribute_fpn_proposals,
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册