From 74d411e751f47655621a4f17bae35f7fe93c6af7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=82=85=E5=89=91=E5=AF=92?= Date: Mon, 28 Nov 2022 12:53:05 +0800 Subject: [PATCH] =?UTF-8?q?(fluid=E6=B8=85=E7=90=86)remove=20flatten=20in?= =?UTF-8?q?=20nn.py=20under=20fluid=20(#47940)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * remove flatten in nn.py under fluid * fix test case * fix test case * fix codestyle * fix codestyle * fix code style * remove extra test case * remove test case --- .../slim/tests/imperative_test_utils.py | 4 +- .../slim/tests/test_imperative_out_scale.py | 2 +- .../slim/tests/test_imperative_qat_lsq.py | 2 +- python/paddle/fluid/layers/detection.py | 20 +++- python/paddle/fluid/layers/nn.py | 93 ------------------- .../unittests/ipu/test_dy2static_fp16_ipu.py | 2 +- .../tests/unittests/ipu/test_dy2static_ipu.py | 2 +- .../unittests/ipu/test_flatten_op_ipu.py | 7 +- .../unittests/ipu/test_modelruntime_ipu.py | 2 +- .../tests/unittests/ipu/test_print_op_ipu.py | 2 +- .../ir/inference/test_trt_flatten_op.py | 5 +- ..._trt_transpose_flatten_concat_fuse_pass.py | 6 +- .../unittests/mlu/test_flatten2_op_mlu.py | 36 ------- .../fluid/tests/unittests/test_flatten2_op.py | 38 -------- .../unittests/test_imperative_layer_apply.py | 2 +- .../fluid/tests/unittests/test_layers.py | 2 +- python/paddle/tests/test_model.py | 2 +- 17 files changed, 39 insertions(+), 188 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py b/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py index 1a5f52b040..c0637bc7de 100644 --- a/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py +++ b/python/paddle/fluid/contrib/slim/tests/imperative_test_utils.py @@ -153,7 +153,7 @@ class ImperativeLenet(fluid.dygraph.Layer): x = self.quant_stub(inputs) x = self.features(x) - x = fluid.layers.flatten(x, 1) + x = paddle.flatten(x, 1, -1) x = self.add(x, paddle.to_tensor(0.0)) # For CI x = self.fc(x) return x @@ -238,7 +238,7 @@ class ImperativeLenetWithSkipQuant(fluid.dygraph.Layer): x = self.relu6_0(x) x = self.pool2d_1(x) - x = fluid.layers.flatten(x, 1) + x = paddle.flatten(x, 1, -1) x = self.linear_0(x) x = self.leaky_relu_0(x) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py index 02b19947ec..e978b63645 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py @@ -121,7 +121,7 @@ class ImperativeLenet(fluid.dygraph.Layer): def forward(self, inputs): x = self.features(inputs) - x = fluid.layers.flatten(x, 1) + x = paddle.flatten(x, 1, -1) x = self.fc(x) return x diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py index fbf1f4e7f5..1b54a5b55b 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py @@ -117,7 +117,7 @@ class ImperativeLenet(fluid.dygraph.Layer): def forward(self, inputs): x = self.features(inputs) - x = fluid.layers.flatten(x, 1) + x = paddle.flatten(x, 1, -1) x = self.fc(x) return x diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 1519b04243..543f63b639 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -1739,7 +1739,9 @@ def ssd_loss( conf_shape = nn.shape(confidence) def __reshape_to_2d(var): - return nn.flatten(x=var, axis=2) + out = paddle.flatten(var, 2, -1) + out = paddle.flatten(out, 0, 1) + return out # 1. Find matched bounding box by prior box. # 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. @@ -2335,8 +2337,15 @@ def multi_box_head( """ def _reshape_with_axis_(input, axis=1): - out = nn.flatten(x=input, axis=axis) - return out + # Note : axis!=0 in current references to this func + # if axis == 0: + # x = paddle.flatten(input, 0, -1) + # x = paddle.unsqueeze(x, 0) + # return x + # else: + x = paddle.flatten(input, axis, -1) + x = paddle.flatten(x, 0, axis - 1) + return x def _is_list_or_tuple_(data): return isinstance(data, list) or isinstance(data, tuple) @@ -2445,7 +2454,7 @@ def multi_box_head( ) mbox_loc = paddle.transpose(mbox_loc, perm=[0, 2, 3, 1]) - mbox_loc_flatten = nn.flatten(mbox_loc, axis=1) + mbox_loc_flatten = paddle.flatten(mbox_loc, 1, -1) mbox_locs.append(mbox_loc_flatten) # get conf @@ -2457,8 +2466,9 @@ def multi_box_head( padding=pad, stride=stride, ) + conf_loc = paddle.transpose(conf_loc, perm=[0, 2, 3, 1]) - conf_loc_flatten = nn.flatten(conf_loc, axis=1) + conf_loc_flatten = paddle.flatten(conf_loc, 1, -1) mbox_confs.append(conf_loc_flatten) if len(box_results) == 1: diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 821a974dd7..3e40a80216 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -109,7 +109,6 @@ __all__ = [ 'log', 'crop_tensor', 'prelu', - 'flatten', 'unique', 'unique_with_counts', 'elementwise_add', @@ -6842,98 +6841,6 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): return out -def flatten(x, axis=1, name=None): - r""" - **Flatten op** - - Flatten the input tensor into a 2D matrix. - - For Example: - - .. code-block:: text - - Case 1: - - Given - X.shape = (3, 100, 100, 4) - - and - axis = 2 - - We get: - Out.shape = (3 * 100, 4 * 100) - - Case 2: - - Given - X.shape = (3, 100, 100, 4) - - and - axis = 0 - - We get: - Out.shape = (1, 3 * 100 * 100 * 4) - - Args: - x (Variable): A tensor of rank >= axis. A tensor with type float32, - float64, int8, int32, int64, uint8. - axis (int): Indicate up to which input dimensions (exclusive) should - be flattened to the outer dimension of the output. - The value for axis must be in the range [0, R], where R - is the rank of the input tensor. Default: 1. - name(str, Optional): For details, please refer to :ref:`api_guide_Name`. - Generally, no setting is required. Default: None. - - Returns: - Variable: A 2D tensor with the contents of the input tensor, with input \ - dimensions up to axis flattened to the outer dimension of \ - the output and remaining input dimensions flattened into the \ - inner dimension of the output. A Tensor with type same as input x. - - Raises: - ValueError: If x is not a variable. - ValueError: If axis is not in range [0, rank(x)]. - - Examples: - - .. code-block:: python - - import paddle - import paddle.fluid as fluid - paddle.enable_static() - x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32") - # x shape is [4, 4, 3] - out = fluid.layers.flatten(x=x, axis=2) - # out shape is [16, 3] - """ - check_variable_and_dtype( - x, - 'x', - ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], - 'flatten', - ) - if _non_static_mode(): - return _legacy_C_ops.flatten2(x, 'axis', axis)[0] - - helper = LayerHelper('flatten', **locals()) - - if not (isinstance(x, Variable)): - raise ValueError("The input x should be a Variable") - - if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0: - raise ValueError("The axis should be a int, and in range [0, rank(x)]") - - out = helper.create_variable_for_type_inference(x.dtype) - x_shape = helper.create_variable_for_type_inference(x.dtype) - helper.append_op( - type='flatten2', - inputs={"X": x}, - outputs={'Out': out, 'XShape': x_shape}, - attrs={"axis": axis}, - ) - return out - - from paddle.fluid.framework import convert_np_dtype_to_dtype_ diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py index d5049979c0..8a13e5abb5 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_fp16_ipu.py @@ -31,7 +31,7 @@ class SimpleLayer(paddle.nn.Layer): def forward(self, x, target=None): x = self.conv(x) - x = paddle.fluid.layers.flatten(x, axis=1) + x = paddle.flatten(x, 1, -1) if target is not None: x = paddle.fluid.layers.softmax(x) loss = paddle.fluid.layers.cross_entropy(x, target) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py index 4e16b0efdf..fd1c762f20 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py @@ -46,7 +46,7 @@ class SimpleLayer(paddle.nn.Layer): @to_static() def forward(self, x, target=None): x = self.conv(x) - x = paddle.fluid.layers.flatten(x, axis=1) + x = paddle.flatten(x, 1, -1) if target is not None: if self.use_softmax: x = paddle.fluid.layers.softmax(x) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py index 58108b262e..fe89e81854 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_flatten_op_ipu.py @@ -47,7 +47,12 @@ class TestBase(IPUOpTest): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - out = paddle.fluid.layers.flatten(x=x, **self.attrs) + if self.attrs['axis'] == 0: + x = paddle.flatten(x, 0, -1) + out = paddle.unsqueeze(x, 0) + else: + x = paddle.flatten(x, self.attrs['axis'], -1) + out = paddle.flatten(x, 0, self.attrs['axis'] - 1) self.fetch_list = [out.name] def run_model(self, exec_mode): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_modelruntime_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_modelruntime_ipu.py index 383b2632aa..9fda7f780e 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_modelruntime_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_modelruntime_ipu.py @@ -30,7 +30,7 @@ class SimpleLayer(paddle.nn.Layer): def forward(self, x, target=None): x = self.conv(x) - x = paddle.fluid.layers.flatten(x, axis=1) + x = paddle.flatten(x, 1, -1) if target is not None: x = paddle.fluid.layers.softmax(x) loss = paddle.fluid.layers.cross_entropy(x, target) diff --git a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py index 6a0d384fa9..ccf0a38bbf 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_print_op_ipu.py @@ -117,7 +117,7 @@ class SimpleLayer(paddle.nn.Layer): def forward(self, x, target=None): x = self.conv(x) print(x) - x = paddle.fluid.layers.flatten(x, axis=1) + x = paddle.flatten(x, 1, -1) if target is not None: x = paddle.fluid.layers.softmax(x) loss = paddle.fluid.layers.cross_entropy(x, target) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py index 8beb2000c6..4ed648ed9c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_flatten_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from inference_pass_test import InferencePassTest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig, PassVersionChecker @@ -40,7 +41,7 @@ class TRTFlattenTest(InferencePassTest): self.fetch_list = [out] def append_flatten(self, data): - return fluid.layers.flatten(data, axis=1) + return paddle.flatten(data, 1, -1) def test_check_output(self): if core.is_compiled_with_cuda(): @@ -75,7 +76,7 @@ class TRTFlattenDynamicTest(InferencePassTest): self.fetch_list = [out] def append_flatten(self, data): - return fluid.layers.flatten(data, axis=1) + return paddle.flatten(data, 1, -1) def test_check_output(self): if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py index 8e0fa3a0eb..192274ef34 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -32,10 +32,12 @@ class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): data2 = fluid.data( name="data2", shape=[8, 32, 128], dtype="float32" ) + trans1 = paddle.transpose(data1, perm=[0, 2, 1]) trans2 = paddle.transpose(data2, perm=[0, 2, 1]) - flatt1 = fluid.layers.flatten(trans1) - flatt2 = fluid.layers.flatten(trans2) + flatt1 = paddle.flatten(trans1, 1, -1) + flatt2 = paddle.flatten(trans2, 1, -1) + concat_out = fluid.layers.concat([flatt1, flatt2], axis=1) # There is no parameters for above structure. # Hence, append a batch_norm to avoid failure caused by load_combined. diff --git a/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py index ace458ff42..c6ae69b2d5 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_flatten2_op_mlu.py @@ -75,41 +75,5 @@ class TestFlattenOpSixDims(TestFlattenOp): self.new_shape = (36, 16) -class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): - def execute_api(self, x, axis=1): - return fluid.layers.flatten(x, axis=axis) - - def test_static_api(self): - paddle.enable_static() - main_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data( - name="x", shape=[-1, 3, -1, -1], dtype='float32' - ) - out = self.execute_api(x, axis=2) - self.assertTrue((-1, -1) == out.shape) - - -class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - input_data = np.random.random((3, 2, 4, 5)).astype("float64") - - def test_Variable(): - # the input type must be Variable - fluid.layers.flatten(input_data, axis=1) - - self.assertRaises(TypeError, test_Variable) - - def test_type(): - # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = fluid.layers.data( - name='x2', shape=[3, 2, 4, 5], dtype='float16' - ) - fluid.layers.flatten(x2, axis=1) - - self.assertRaises(TypeError, test_type) - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_flatten2_op.py b/python/paddle/fluid/tests/unittests/test_flatten2_op.py index fe5aad118a..d67b873084 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten2_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten2_op.py @@ -14,8 +14,6 @@ import unittest import numpy as np -import paddle.fluid as fluid -import paddle from op_test import OpTest @@ -68,41 +66,5 @@ class TestFlattenOpSixDims(TestFlattenOp): self.new_shape = (36, 16) -class TestStaticFlattenInferShapePythonAPI(unittest.TestCase): - def execute_api(self, x, axis=1): - return fluid.layers.flatten(x, axis=axis) - - def test_static_api(self): - paddle.enable_static() - main_prog = paddle.static.Program() - with paddle.static.program_guard(main_prog, paddle.static.Program()): - x = paddle.static.data( - name="x", shape=[-1, 3, -1, -1], dtype='float32' - ) - out = self.execute_api(x, axis=2) - self.assertTrue((-1, -1) == out.shape) - - -class TestFlatten2OpError(unittest.TestCase): - def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - input_data = np.random.random((3, 2, 4, 5)).astype("float64") - - def test_Variable(): - # the input type must be Variable - fluid.layers.flatten(input_data, axis=1) - - self.assertRaises(TypeError, test_Variable) - - def test_type(): - # dtype must be float32, float64, int8, int32, int64, uint8. - x2 = fluid.layers.data( - name='x2', shape=[3, 2, 4, 5], dtype='float16' - ) - fluid.layers.flatten(x2, axis=1) - - self.assertRaises(TypeError, test_type) - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py index 88b91e76b1..156cac0b3a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py @@ -47,7 +47,7 @@ class LeNetDygraph(fluid.dygraph.Layer): x = self.features(inputs) if self.num_classes > 0: - x = fluid.layers.flatten(x, 1) + x = paddle.flatten(x, 1, -1) x = self.fc(x) return x diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 22b4e22061..aff461f878 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -4129,7 +4129,7 @@ class TestBook(LayerTest): shape=[4, 4, 3], dtype="float32", ) - out = layers.flatten(x, axis=1, name="flatten") + out = paddle.flatten(x, 1, -1, name="flatten") return out def test_linspace(self): diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 91ffb3377d..58c88e5648 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -61,7 +61,7 @@ class LeNetDygraph(paddle.nn.Layer): x = self.features(inputs) if self.num_classes > 0: - x = fluid.layers.flatten(x, 1) + x = paddle.flatten(x, 1, -1) x = self.fc(x) return x -- GitLab