未验证 提交 e577040e 编写于 作者: H HydrogenSulfate 提交者: GitHub

Remove/move 16 fluid APIs (#48377)

* remove density_prior_box

* remove anchor_generator

* remove roi_perspective_transform

* remove generate_proposal_labels

* remove generate_mask_labels

* remove generate_proposals

* remove box_clip

* remove retinanet_detection_output

* remove multiclass_nms

* remove locality_aware_nms

* remove matrix_nms

* remove distribute_fpn_proposals

* remove box_decoder_and_assign

* remove collect_fpn_proposals

* remove 2 trt files

* move prior_box to static/nn/common.py

* move multi_box_head to static/nn/common.py

* fix for CI/CE

* remove retinanet_detection_output

* restore compile_vs_runtime_white_list.py

* restore test_retinanet_detection_output to white list

* replace nn.flatten by paddle.flatten, and fix doc for retinanet_target_assign

* add enable_static in demo and fix bug

* remove roi_perspective_transform in test_layers

* remove multi_box_head

* change self.multiclass_nms to _legacy_C_ops.multiclass_nms

* empty commit

* empty commit

* check code style

* fix prior_box

* fix CI

* remove redundant prior_box in detection.py

* fix docs

* remove detection

* fix prior_box en doc

* delete prior_box in common

* remote proir_box from __init__.py
上级 04dd2861
......@@ -21,7 +21,6 @@ from .framework import Program, Variable, program_guard
from . import unique_name
from .layer_helper import LayerHelper
from .initializer import Constant
from .layers import detection
def _clone_var_(block, var):
......
......@@ -24,8 +24,6 @@ from . import math_op_patch
from .math_op_patch import *
from . import loss
from .loss import *
from . import detection
from .detection import *
from .learning_rate_scheduler import *
from .collective import *
from .sequence_lod import *
......@@ -36,7 +34,6 @@ __all__ += nn.__all__
__all__ += io.__all__
__all__ += tensor.__all__
__all__ += control_flow.__all__
__all__ += detection.__all__
__all__ += learning_rate_scheduler.__all__
__all__ += sequence_lod.__all__
__all__ += loss.__all__
......
此差异已折叠。
......@@ -23,7 +23,6 @@ from .initializer import Constant
from . import unique_name
from .framework import Program, Variable, program_guard
from . import layers
from .layers import detection
__all__ = [
'MetricBase',
......
......@@ -19,6 +19,7 @@ from darknet import ConvBNLayer, DarkNet53_conv_body
import paddle
import paddle.fluid as fluid
from paddle import _legacy_C_ops
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.jit.api import declarative
......@@ -351,7 +352,7 @@ class YOLOv3(fluid.dygraph.Layer):
yolo_boxes = fluid.layers.concat(self.boxes, axis=1)
yolo_scores = fluid.layers.concat(self.scores, axis=2)
pred = fluid.layers.multiclass_nms(
pred = _legacy_C_ops.multiclass_nms(
bboxes=yolo_boxes,
scores=yolo_scores,
score_threshold=cfg.valid_thresh,
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.static.nn as nn
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TRTAnchorGeneratorBaseTest(InferencePassTest):
def setUp(self):
self.bs = 1
self.channel = 16
self.height = 32
self.width = 32
self.anchor_sizes = [64.0, 128.0, 256.0, 512.0]
self.aspect_ratios = [0.5, 1.0, 2.0]
self.variance = [0.1, 0.1, 0.2, 0.2]
self.stride = [8.0, 8.0]
self.precision = AnalysisConfig.Precision.Float32
self.serialize = False
self.enable_trt = True
self.feeds = {
'data': np.random.random(
[self.bs, self.channel, self.height, self.width]
).astype('float32'),
}
def build(self):
min_graph_size = 3 if self.dynamic_shape_params is not None else 2
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30,
self.bs,
min_graph_size,
self.precision,
self.serialize,
False,
)
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name='data',
shape=[-1, self.channel, self.height, self.width],
dtype='float32',
)
anchor, var = fluid.layers.detection.anchor_generator(
data,
anchor_sizes=self.anchor_sizes,
aspect_ratios=self.aspect_ratios,
variance=self.variance,
stride=self.stride,
)
if self.dynamic_shape_params is not None:
anchor = paddle.transpose(anchor, [2, 3, 0, 1])
out = nn.batch_norm(anchor, is_test=True)
self.fetch_list = [out, var]
def run_test(self):
self.build()
self.check_output()
def set_dynamic(self):
self.dynamic_shape_params = InferencePassTest.DynamicShapeParam(
{
'data': [
self.bs,
self.channel,
self.height // 2,
self.width // 2,
]
},
{'data': [self.bs, self.channel, self.height, self.width]},
{'data': [self.bs, self.channel, self.height, self.width]},
False,
)
def test_base(self):
self.run_test()
def test_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.run_test()
def test_serialize(self):
self.serialize = True
self.run_test()
def test_dynamic(self):
self.set_dynamic()
self.run_test()
def test_dynamic_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.set_dynamic()
self.run_test()
def test_dynamic_serialize(self):
self.serialize = True
self.set_dynamic()
self.run_test()
def test_dynamic_fp16_serialize(self):
self.serialize = True
self.precision = AnalysisConfig.Precision.Half
self.set_dynamic()
self.run_test()
def check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
atol = 1e-5
if self.trt_parameters.precision == AnalysisConfig.Precision.Half:
atol = 1e-3
self.check_output_with_option(use_gpu, atol, flatten=True)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.static.nn as nn
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TensorRTMultiClassNMSTest(InferencePassTest):
def setUp(self):
self.enable_trt = True
self.enable_tensorrt_varseqlen = True
self.precision = AnalysisConfig.Precision.Float32
self.serialize = False
self.bs = 1
self.background_label = -1
self.score_threshold = 0.5
self.nms_top_k = 8
self.nms_threshold = 0.3
self.keep_top_k = 8
self.normalized = False
self.num_classes = 8
self.num_boxes = 8
self.trt_parameters = InferencePassTest.TensorRTParam(
1 << 30, self.bs, 2, self.precision, self.serialize, False
)
def build(self):
with fluid.program_guard(self.main_program, self.startup_program):
boxes = fluid.data(
name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32'
)
scores = fluid.data(
name='scores',
shape=[-1, self.num_classes, self.num_boxes],
dtype='float32',
)
multiclass_nms_out = fluid.layers.multiclass_nms(
bboxes=boxes,
scores=scores,
background_label=self.background_label,
score_threshold=self.score_threshold,
nms_top_k=self.nms_top_k,
nms_threshold=self.nms_threshold,
keep_top_k=self.keep_top_k,
normalized=self.normalized,
)
mutliclass_nms_out = multiclass_nms_out + 1.0
multiclass_nms_out = paddle.reshape(
multiclass_nms_out,
[self.bs, 1, self.keep_top_k, 6],
name='reshape',
)
out = nn.batch_norm(multiclass_nms_out, is_test=True)
boxes_data = (
np.arange(self.num_boxes * 4)
.reshape([self.bs, self.num_boxes, 4])
.astype('float32')
)
scores_data = (
np.arange(1 * self.num_classes * self.num_boxes)
.reshape([self.bs, self.num_classes, self.num_boxes])
.astype('float32')
)
self.feeds = {
'bboxes': boxes_data,
'scores': scores_data,
}
self.fetch_list = [out]
def run_test(self):
self.build()
self.check_output()
def run_test_all(self):
precision_opt = [
AnalysisConfig.Precision.Float32,
AnalysisConfig.Precision.Half,
]
serialize_opt = [False, True]
max_shape = {
'bboxes': [self.bs, self.num_boxes, 4],
'scores': [self.bs, self.num_classes, self.num_boxes],
}
opt_shape = max_shape
dynamic_shape_opt = [
None,
InferencePassTest.DynamicShapeParam(
{'bboxes': [1, 1, 4], 'scores': [1, 1, 1]},
max_shape,
opt_shape,
False,
),
]
for precision, serialize, dynamic_shape in itertools.product(
precision_opt, serialize_opt, dynamic_shape_opt
):
self.precision = precision
self.serialize = serialize
self.dynamic_shape_params = dynamic_shape
self.build()
self.check_output()
def check_output(self):
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
def test_base(self):
self.run_test()
def test_fp16(self):
self.precision = AnalysisConfig.Precision.Half
self.run_test()
def test_serialize(self):
self.serialize = True
self.run_test()
def test_dynamic(self):
max_shape = {
'bboxes': [self.bs, self.num_boxes, 4],
'scores': [self.bs, self.num_classes, self.num_boxes],
}
opt_shape = max_shape
self.dynamic_shape_params = InferencePassTest.DynamicShapeParam(
{'bboxes': [1, 1, 4], 'scores': [1, 1, 1]},
max_shape,
opt_shape,
False,
)
self.run_test()
def test_background(self):
self.background = 7
self.run_test()
def test_disable_varseqlen(self):
self.diable_tensorrt_varseqlen = False
self.run_test()
if __name__ == "__main__":
unittest.main()
......@@ -96,7 +96,6 @@ class TestDirectory(unittest.TestCase):
'paddle.static.nn.group_norm',
'paddle.static.nn.instance_norm',
'paddle.static.nn.layer_norm',
'paddle.static.nn.multi_box_head',
'paddle.static.nn.nce',
'paddle.static.nn.prelu',
'paddle.static.nn.row_conv',
......
......@@ -16,7 +16,6 @@ import math
import unittest
import numpy as np
from op_test import OpTest
'''
# Equivalent code
......@@ -314,148 +313,5 @@ def trans_lod(lod):
return new_lod
class TestGenerateMaskLabels(OpTest):
def set_data(self):
self.init_test_case()
self.make_generate_proposal_labels_out()
self.generate_gt_polys()
self.generate_groundtruth()
self.init_test_output()
self.inputs = {
'ImInfo': self.im_info,
'GtClasses': (self.gt_classes.astype(np.int32), self.gt_lod),
'IsCrowd': (self.is_crowd.astype(np.int32), self.gt_lod),
'LabelsInt32': (self.label_int32.astype(np.int32), self.rois_lod),
'GtSegms': (self.gt_polys.astype(np.float32), self.masks_lod),
'Rois': (self.rois.astype(np.float32), self.rois_lod),
}
self.attrs = {
'num_classes': self.num_classes,
'resolution': self.resolution,
}
self.outputs = {
'MaskRois': (self.mask_rois, [self.new_lod]),
'RoiHasMaskInt32': (self.roi_has_mask_int32, [self.new_lod]),
'MaskInt32': (self.mask_int32, [self.new_lod]),
}
def init_test_case(self):
self.num_classes = 81
self.resolution = 14
self.batch_size = 2
self.batch_size_per_im = 64
self.images_shape = [100, 200]
np.random.seed(0)
def make_generate_proposal_labels_out(self):
rois = []
self.rois_lod = [[]]
self.label_int32 = []
for bno in range(self.batch_size):
self.rois_lod[0].append(self.batch_size_per_im)
for i in range(self.batch_size_per_im):
xywh = np.random.rand(4)
xy1 = xywh[0:2] * 2
wh = xywh[2:4] * (self.images_shape[0] - xy1)
xy2 = xy1 + wh
roi = [xy1[0], xy1[1], xy2[0], xy2[1]]
rois.append(roi)
self.rois = np.array(rois).astype("float32")
for idx, roi_num in enumerate(self.rois_lod[0]):
for roi_id in range(roi_num):
class_id = np.random.random_integers(self.num_classes - 1)
if idx == 0:
# set an image with no foreground, to test the empty case
self.label_int32.append(0)
else:
self.label_int32.append(class_id)
label_np = np.array(self.label_int32)
self.label_int32 = label_np[:, np.newaxis]
def generate_gt_polys(self):
h, w = self.images_shape[0:2]
self.gt_polys = []
self.gt_polys_list = []
max_gt = 4
max_poly_num = 5
min_poly_size = 4
max_poly_size = 16
lod0 = []
lod1 = []
lod2 = []
for i in range(self.batch_size):
gt_num = np.random.randint(1, high=max_gt, size=1)[0]
lod0.append(gt_num)
ptss = []
for i in range(gt_num):
poly_num = np.random.randint(1, max_poly_num, size=1)[0]
lod1.append(poly_num)
pts = []
for j in range(poly_num):
poly_size = np.random.randint(
min_poly_size, max_poly_size, size=1
)[0]
x = np.random.rand(poly_size, 1) * w
y = np.random.rand(poly_size, 1) * h
xy = np.concatenate((x, y), axis=1)
pts.append(xy.flatten().tolist())
self.gt_polys.extend(xy.flatten().tolist())
lod2.append(poly_size)
ptss.append(pts)
self.gt_polys_list.append(ptss)
self.masks_lod = [lod0, lod1, lod2]
self.gt_lod = [lod0]
self.gt_polys = np.array(self.gt_polys).astype('float32').reshape(-1, 2)
def generate_groundtruth(self):
self.im_info = []
self.gt_classes = []
self.is_crowd = []
for roi_num in self.gt_lod[0]:
self.im_info.append(self.images_shape + [1.0])
for roi_id in range(roi_num):
class_id = np.random.random_integers(self.num_classes - 1)
self.gt_classes.append(class_id)
self.is_crowd.append(0)
self.im_info = np.array(self.im_info).astype(np.float32)
gt_classes_np = np.array(self.gt_classes)
self.gt_classes = gt_classes_np[:, np.newaxis]
is_crowd_np = np.array(self.is_crowd)
self.is_crowd = is_crowd_np[:, np.newaxis]
def init_test_output(self):
roi_lod = trans_lod(self.rois_lod[0])
gt_lod = trans_lod(self.gt_lod[0])
outs = generate_mask_labels(
self.num_classes,
self.im_info,
self.gt_classes,
self.is_crowd,
self.label_int32,
self.gt_polys_list,
self.resolution,
self.rois,
roi_lod,
gt_lod,
)
self.mask_rois = outs[0]
self.roi_has_mask_int32 = outs[1]
self.mask_int32 = outs[2]
self.new_lod = outs[3]
self.mask_rois = np.vstack(self.mask_rois)
self.roi_has_mask_int32 = np.hstack(self.roi_has_mask_int32)[
:, np.newaxis
]
self.mask_int32 = np.vstack(self.mask_int32)
def setUp(self):
self.op_type = "generate_mask_labels"
self.set_data()
def test_check_output(self):
self.check_output()
if __name__ == '__main__':
unittest.main()
......@@ -2806,16 +2806,6 @@ class TestBook(LayerTest):
x = layers.data(name="input", shape=[1], dtype='int32', lod_level=1)
out = layers.sequence_enumerate(input=x, win_size=2, pad_value=0)
def test_roi_perspective_transform(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
rois = layers.data(
name="rois", shape=[8], dtype="float32", lod_level=1
)
output = layers.roi_perspective_transform(x, rois, 7, 7, 0.6)
return output
def test_row_conv(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......@@ -2897,47 +2887,6 @@ class TestBook(LayerTest):
out = paddle.addmm(input=input, x=x, y=y)
return out
def test_retinanet_detection_output(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
bboxes = layers.data(
name='bboxes',
shape=[1, 21, 4],
append_batch_size=False,
dtype='float32',
)
scores = layers.data(
name='scores',
shape=[1, 21, 10],
append_batch_size=False,
dtype='float32',
)
anchors = layers.data(
name='anchors',
shape=[21, 4],
append_batch_size=False,
dtype='float32',
)
im_info = layers.data(
name="im_info",
shape=[1, 3],
append_batch_size=False,
dtype='float32',
)
nmsed_outs = layers.retinanet_detection_output(
bboxes=[bboxes, bboxes],
scores=[scores, scores],
anchors=[anchors, anchors],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0,
)
return nmsed_outs
def test_warpctc_with_padding(self):
# TODO(minqiyang): dygraph do not support lod now
with self.static_graph():
......
......@@ -19,8 +19,6 @@ import numpy as np
from op_test import OpTest
from test_multiclass_nms_op import iou
import paddle.fluid as fluid
def weight_merge(box1, box2, score1, score2):
for i in range(len(box1)):
......@@ -409,153 +407,5 @@ class TestLocalAwareNMSOp4Points(OpTest):
self.check_output()
class TestLocalityAwareNMSAPI(unittest.TestCase):
def test_api(self):
boxes = fluid.data(name='bboxes', shape=[None, 81, 8], dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81], dtype='float32')
fluid.layers.locality_aware_nms(
bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False,
)
class TestLocalityAwareNMSError(unittest.TestCase):
def test_error(self):
boxes = fluid.data(name='bboxes', shape=[None, 81, 8], dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81], dtype='float32')
boxes_int = fluid.data(
name='bboxes_int', shape=[None, 81, 8], dtype='int32'
)
scores_int = fluid.data(
name='scores_int', shape=[None, 1, 81], dtype='int32'
)
boxes_tmp = [1, 2]
scores_tmp = [1, 2]
# type of boxes and scores must be variable
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes_tmp,
scores,
0.5,
400,
200,
)
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores_tmp,
0.5,
400,
200,
)
# dtype of boxes and scores must in ['float32', 'float64']
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes_int,
scores,
0.5,
400,
200,
)
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores_int,
0.5,
400,
200,
)
score_threshold = int(1)
# type of score_threshold must be float
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
score_threshold,
400,
200,
)
nms_top_k = 400.5
# type of num_top_k must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
nms_top_k,
200,
)
keep_top_k = 200.5
# type of keep_top_k must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
keep_top_k,
)
nms_threshold = int(0)
# type of nms_threshold must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
200,
nms_threshold,
)
nms_eta = int(1)
# type of nms_eta must be float
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
200,
0.5,
nms_eta,
)
bg_label = 1.5
# type of background_label must be int
self.assertRaises(
TypeError,
fluid.layers.locality_aware_nms,
boxes,
scores,
0.5,
400,
200,
0.5,
1.0,
bg_label,
)
if __name__ == '__main__':
unittest.main()
......@@ -334,14 +334,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_bboxes_Variable():
# the bboxes type must be Variable
fluid.layers.matrix_nms(
bboxes=boxes_np,
scores=scores_data,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
paddle.vision.ops.matrix_nms(
bboxes=boxes_np,
scores=scores_data,
......@@ -353,14 +345,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_scores_Variable():
# the scores type must be Variable
fluid.layers.matrix_nms(
bboxes=boxes_data,
scores=scores_np,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
paddle.vision.ops.matrix_nms(
bboxes=boxes_data,
scores=scores_np,
......@@ -372,17 +356,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_empty():
# when all score are lower than threshold
try:
fluid.layers.matrix_nms(
bboxes=boxes_data,
scores=scores_data,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
except Exception as e:
self.fail(e)
try:
paddle.vision.ops.matrix_nms(
bboxes=boxes_data,
......@@ -397,17 +370,6 @@ class TestMatrixNMSError(unittest.TestCase):
def test_coverage():
# cover correct workflow
try:
fluid.layers.matrix_nms(
bboxes=boxes_data,
scores=scores_data,
score_threshold=score_threshold,
post_threshold=post_threshold,
nms_top_k=nms_top_k,
keep_top_k=keep_top_k,
)
except Exception as e:
self.fail(e)
try:
paddle.vision.ops.matrix_nms(
bboxes=boxes_data,
......
......@@ -19,14 +19,8 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid import (
Program,
_non_static_mode,
in_dygraph_mode,
program_guard,
)
from paddle.fluid import _non_static_mode, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
......@@ -738,39 +732,6 @@ class TestMulticlassNMS2LoDNoOutput(TestMulticlassNMS2LoDInput):
self.score_threshold = 2.0
class TestMulticlassNMSError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
M = 1200
N = 7
C = 21
BOX_SIZE = 4
boxes_np = np.random.random((M, C, BOX_SIZE)).astype('float32')
scores = np.random.random((N * M, C)).astype('float32')
scores = np.apply_along_axis(softmax, 1, scores)
scores = np.reshape(scores, (N, M, C))
scores_np = np.transpose(scores, (0, 2, 1))
boxes_data = fluid.data(
name='bboxes', shape=[M, C, BOX_SIZE], dtype='float32'
)
scores_data = fluid.data(
name='scores', shape=[N, C, M], dtype='float32'
)
def test_bboxes_Variable():
# the bboxes type must be Variable
fluid.layers.multiclass_nms(bboxes=boxes_np, scores=scores_data)
def test_scores_Variable():
# the bboxes type must be Variable
fluid.layers.multiclass_nms(bboxes=boxes_data, scores=scores_np)
self.assertRaises(TypeError, test_bboxes_Variable)
self.assertRaises(TypeError, test_scores_Variable)
class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
def setUp(self):
self.python_api = multiclass_nms3
......
......@@ -21,8 +21,6 @@ from test_anchor_generator_op import anchor_generator_in_python
from test_multiclass_nms_op import nms
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold):
......@@ -508,132 +506,6 @@ class TestRetinanetDetectionOutOpNo5(TestRetinanetDetectionOutOp1):
self.layer_w.append(2 ** (num_levels - i))
class TestRetinanetDetectionOutOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
bboxes_low1 = fluid.data(
name='bboxes_low1', shape=[1, 44, 4], dtype='float32'
)
bboxes_high1 = fluid.data(
name='bboxes_high1', shape=[1, 11, 4], dtype='float32'
)
scores_low1 = fluid.data(
name='scores_low1', shape=[1, 44, 10], dtype='float32'
)
scores_high1 = fluid.data(
name='scores_high1', shape=[1, 11, 10], dtype='float32'
)
anchors_low1 = fluid.data(
name='anchors_low1', shape=[44, 4], dtype='float32'
)
anchors_high1 = fluid.data(
name='anchors_high1', shape=[11, 4], dtype='float32'
)
im_info1 = fluid.data(
name="im_info1", shape=[1, 3], dtype='float32'
)
# The `bboxes` must be list, each element must be Variable and
# its Tensor data type must be one of float32 and float64.
def test_bboxes_type():
fluid.layers.retinanet_detection_output(
bboxes=bboxes_low1,
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_bboxes_type)
def test_bboxes_tensor_dtype():
bboxes_high2 = fluid.data(
name='bboxes_high2', shape=[1, 11, 4], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_high2, 5],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_bboxes_tensor_dtype)
# The `scores` must be list, each element must be Variable and its
# Tensor data type must be one of float32 and float64.
def test_scores_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=scores_low1,
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_scores_type)
def test_scores_tensor_dtype():
scores_high2 = fluid.data(
name='scores_high2', shape=[1, 11, 10], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_high2, 5],
anchors=[anchors_low1, anchors_high1],
im_info=im_info1,
)
self.assertRaises(TypeError, test_scores_tensor_dtype)
# The `anchors` must be list, each element must be Variable and its
# Tensor data type must be one of float32 and float64.
def test_anchors_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=anchors_low1,
im_info=im_info1,
)
self.assertRaises(TypeError, test_anchors_type)
def test_anchors_tensor_dtype():
anchors_high2 = fluid.data(
name='anchors_high2', shape=[11, 4], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_high2, 5],
im_info=im_info1,
)
self.assertRaises(TypeError, test_anchors_tensor_dtype)
# The `im_info` must be Variable and the data type of `im_info`
# Tensor must be one of float32 and float64.
def test_iminfo_type():
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=[2, 3, 4],
)
self.assertRaises(TypeError, test_iminfo_type)
def test_iminfo_tensor_dtype():
im_info2 = fluid.data(
name='im_info2', shape=[1, 3], dtype='int32'
)
fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low1, bboxes_high1],
scores=[scores_low1, scores_high1],
anchors=[anchors_low1, anchors_high1],
im_info=im_info2,
)
self.assertRaises(TypeError, test_iminfo_tensor_dtype)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -16,9 +16,6 @@ import unittest
from math import floor, sqrt
import numpy as np
from op_test import OpTest
from paddle import fluid
def gt_e(a, b):
......@@ -261,166 +258,5 @@ def roi_transform(
return out.astype("float32"), mask, matrix
class TestROIPoolOp(OpTest):
def set_data(self):
self.init_test_case()
self.make_rois()
self.inputs = {'X': self.x, 'ROIs': (self.rois, self.rois_lod)}
self.attrs = {
'spatial_scale': self.spatial_scale,
'transformed_height': self.transformed_height,
'transformed_width': self.transformed_width,
}
out, mask, transform_matrix = roi_transform(
self.x,
self.rois,
self.rois_lod,
self.transformed_height,
self.transformed_width,
self.spatial_scale,
)
self.outputs = {
'Out': out,
'Mask': mask,
'TransformMatrix': transform_matrix,
}
def init_test_case(self):
self.batch_size = 2
self.channels = 2
self.height = 8
self.width = 8
# n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
self.spatial_scale = 1.0 / 2.0
self.transformed_height = 2
self.transformed_width = 3
self.x = np.random.random(self.x_dim).astype('float32')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.randint(
0, self.width // self.spatial_scale - self.transformed_width
)
y1 = np.random.randint(
0,
self.height // self.spatial_scale - self.transformed_height,
)
x2 = np.random.randint(
x1 + self.transformed_width,
self.width // self.spatial_scale,
)
y2 = np.random.randint(
0,
self.height // self.spatial_scale - self.transformed_height,
)
x3 = np.random.randint(
x1 + self.transformed_width,
self.width // self.spatial_scale,
)
y3 = np.random.randint(
y1 + self.transformed_height,
self.height // self.spatial_scale,
)
x4 = np.random.randint(
0, self.width // self.spatial_scale - self.transformed_width
)
y4 = np.random.randint(
y1 + self.transformed_height,
self.height // self.spatial_scale,
)
roi = [x1, y1, x2, y2, x3, y3, x4, y4]
rois.append(roi)
self.rois_num = len(rois)
self.rois = np.array(rois).astype("float32")
def setUp(self):
self.op_type = "roi_perspective_transform"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.outputs['Out2InIdx'] = np.zeros(
[np.product(self.outputs['Out'].shape), 4]
).astype("int32")
self.outputs['Out2InWeights'] = np.zeros(
[np.product(self.outputs['Out'].shape), 4]
).astype("float32")
self.check_grad(['X'], 'Out')
def test_errors(self):
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(
name='rois', shape=[None, 8], lod_level=1, dtype='float32'
)
x_int = fluid.data(
name='x_int', shape=[100, 256, 28, 28], dtype='int32'
)
rois_int = fluid.data(
name='rois_int', shape=[None, 8], lod_level=1, dtype='int32'
)
x_tmp = [1, 2]
rois_tmp = [1, 2]
# type of intput and rois must be variable
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x_tmp, rois, 7, 7
)
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x, rois_tmp, 7, 7
)
# dtype of intput and rois must be float32
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x_int, rois, 7, 7
)
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x, rois_int, 7, 7
)
height = 7.5
width = 7.5
# type of transformed_height and transformed_width must be int
self.assertRaises(
TypeError,
fluid.layers.roi_perspective_transform,
x,
rois,
height,
7,
)
self.assertRaises(
TypeError, fluid.layers.roi_perspective_transform, x, rois, 7, width
)
scale = int(2)
# type of spatial_scale must be float
self.assertRaises(
TypeError,
fluid.layers.roi_perspective_transform,
x,
rois,
7,
7,
scale,
)
if __name__ == '__main__':
unittest.main()
......@@ -54,7 +54,6 @@ NO_FP64_CHECK_GRAD_OP_LIST = [
'reduce_max',
'reduce_min',
'reshape2',
'roi_perspective_transform',
'row_conv',
'scatter',
'sequence_conv',
......
......@@ -32,7 +32,6 @@ from .common import py_func # noqa: F401
from ...tensor.creation import create_parameter # noqa: F401
from ...fluid.layers import conv2d # noqa: F401
from ...fluid.layers import layer_norm # noqa: F401
from ...fluid.layers import multi_box_head # noqa: F401
from .loss import nce # noqa: F401
from .common import prelu # noqa: F401
from ...fluid.layers import row_conv # noqa: F401
......@@ -76,8 +75,8 @@ __all__ = [ # noqa
'group_norm',
'instance_norm',
'layer_norm',
'multi_box_head',
'nce',
'prelu',
'py_func',
'row_conv',
'spectral_norm',
......
......@@ -799,7 +799,7 @@ def conv3d(
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d, whose data type is
A Tensor representing the conv3d, whose data type is
the same with input. If act is None, the tensor variable storing the
convolution result, and if act is not None, the tensor variable storing
convolution and non-linearity activation result.
......@@ -1190,7 +1190,7 @@ def conv2d_transpose(
helper = LayerHelper(op_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
raise TypeError("Input of conv2d_transpose must be Tensor")
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
......@@ -1280,7 +1280,7 @@ def conv2d_transpose(
output_size
):
raise ValueError(
"filter_size should not be None when output_size is Variable or contain Variable in static mode."
"filter_size should not be None when output_size is Tensor or contain Tensor in static mode."
)
else:
output_size = utils.convert_shape_to_list(output_size)
......@@ -1497,7 +1497,7 @@ def conv3d_transpose(
`[batch_size, input_channels, input_height, input_width]`.
Returns:
A Variable holding Tensor representing the conv3d_transpose, whose data
A Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
......@@ -1546,7 +1546,7 @@ def conv3d_transpose(
l_type = "conv3d_transpose"
helper = LayerHelper(l_type, **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv3d_transpose must be Variable")
raise TypeError("Input of conv3d_transpose must be Tensor")
if len(input.shape) != 5:
raise ValueError(
"Input should be 5D tensor, but received input with the shape of {}".format(
......@@ -1785,7 +1785,7 @@ def deformable_conv(
float32, float64.
offset (Tensor): The input coordinate offset of deformable convolution layer.
A Tensor with type float32, float64.
Mask (Variable, Optional): The input mask of deformable convolution layer.
Mask (Tensor, Optional): The input mask of deformable convolution layer.
A Tensor with type float32, float64. It should be None when you use
deformable convolution v1.
num_filters(int): The number of filter. It is as same as the output
......@@ -1876,9 +1876,9 @@ def deformable_conv(
dtype = helper.input_dtype()
if not isinstance(input, paddle.static.Variable):
raise TypeError("Input of deformable_conv must be Variable")
raise TypeError("Input of deformable_conv must be Tensor")
if not isinstance(offset, paddle.static.Variable):
raise TypeError("Input Offset of deformable_conv must be Variable")
raise TypeError("Input Offset of deformable_conv must be Tensor")
if groups is None:
num_filter_channels = num_channels
......@@ -2155,9 +2155,9 @@ def bilinear_tensor_product(
- :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.
Args:
x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
x (Tensor): 2-D input tensor with shape [batch_size, M]. Data type
is float32 or float64.
y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
y (Tensor): 2-D input tensor with shape [batch_size, N]. Data type
should be same as **x**.
size (int): The dimension of this layer.
act (str|None): Activation to be applied to the output of this layer. Default None.
......@@ -2832,7 +2832,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
# Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
paddle.static.py_func(func=element_wise_add, x=[x,y], out=output)
exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program)
......@@ -2857,7 +2857,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
elif isinstance(x, tuple):
x = list(x)
elif not isinstance(x, (list, tuple, Variable)):
raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
raise TypeError('Input must be Tensor/list(Tensor)/tuple(Tensor)')
check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
if out is None:
out_list = []
......@@ -2868,9 +2868,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
elif isinstance(out, list):
out_list = out
else:
raise TypeError(
'Output must be Variable/list(Variable)/tuple(Variable)'
)
raise TypeError('Output must be Tensor/list(Tensor)/tuple(Tensor)')
fwd_func_id = PyFuncRegistry(func).id
bwd_func_id = (
......@@ -2895,7 +2893,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
for v in skip_vars_in_backward_input:
if v.name not in fwd_in_out:
raise ValueError(
'Variable {} is not found in forward inputs and outputs'.format(
'Tensor {} is not found in forward inputs and outputs'.format(
v.name
)
)
......
......@@ -702,7 +702,6 @@ SECONDARY_HIGH_PARALLEL_JOB_NEW = [
'test_uniform_random_bf16_op',
'test_custom_concat',
'test_weight_quantization_mobilenetv1',
'test_retinanet_detection_output',
'test_concat_mkldnn_op',
'test_gaussian_random_mkldnn_op',
'test_parallel_executor_seresnext_with_reduce_cpu',
......@@ -786,7 +785,6 @@ FOURTH_HIGH_PARALLEL_JOB_NEW = [
'test_lr_scheduler',
'test_generate_proposals_op',
'test_masked_select_op',
'test_trt_anchor_generator_op',
'test_imperative_ocr_attention_model',
'test_sentiment',
'test_chunk_op',
......@@ -1748,7 +1746,6 @@ CPU_PARALLEL_JOB = [
'test_rpn_target_assign_op',
'test_row_conv',
'test_rnn_memory_helper_op',
'test_retinanet_detection_output',
'test_reshape_transpose_matmul_mkldnn_fuse_pass',
'test_reshape_bf16_op',
'test_require_version',
......@@ -2506,7 +2503,6 @@ TETRAD_PARALLEL_JOB = [
'test_where_index',
'test_variance_layer',
'test_unsqueeze_op',
'test_trt_anchor_generator_op',
'test_translated_layer',
'test_tensor_shape',
'test_slice',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册