未验证 提交 74d411e7 编写于 作者: 傅剑寒 提交者: GitHub

(fluid清理)remove flatten in nn.py under fluid (#47940)

* remove flatten in nn.py under fluid

* fix test case

* fix test case

* fix codestyle

* fix codestyle

* fix code style

* remove extra test case

* remove test case
上级 c39d1cff
......@@ -153,7 +153,7 @@ class ImperativeLenet(fluid.dygraph.Layer):
x = self.quant_stub(inputs)
x = self.features(x)
x = fluid.layers.flatten(x, 1)
x = paddle.flatten(x, 1, -1)
x = self.add(x, paddle.to_tensor(0.0)) # For CI
x = self.fc(x)
return x
......@@ -238,7 +238,7 @@ class ImperativeLenetWithSkipQuant(fluid.dygraph.Layer):
x = self.relu6_0(x)
x = self.pool2d_1(x)
x = fluid.layers.flatten(x, 1)
x = paddle.flatten(x, 1, -1)
x = self.linear_0(x)
x = self.leaky_relu_0(x)
......
......@@ -121,7 +121,7 @@ class ImperativeLenet(fluid.dygraph.Layer):
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = paddle.flatten(x, 1, -1)
x = self.fc(x)
return x
......
......@@ -117,7 +117,7 @@ class ImperativeLenet(fluid.dygraph.Layer):
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = paddle.flatten(x, 1, -1)
x = self.fc(x)
return x
......
......@@ -1739,7 +1739,9 @@ def ssd_loss(
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
out = paddle.flatten(var, 2, -1)
out = paddle.flatten(out, 0, 1)
return out
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
......@@ -2335,8 +2337,15 @@ def multi_box_head(
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
# Note : axis!=0 in current references to this func
# if axis == 0:
# x = paddle.flatten(input, 0, -1)
# x = paddle.unsqueeze(x, 0)
# return x
# else:
x = paddle.flatten(input, axis, -1)
x = paddle.flatten(x, 0, axis - 1)
return x
def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple)
......@@ -2445,7 +2454,7 @@ def multi_box_head(
)
mbox_loc = paddle.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_loc_flatten = paddle.flatten(mbox_loc, 1, -1)
mbox_locs.append(mbox_loc_flatten)
# get conf
......@@ -2457,8 +2466,9 @@ def multi_box_head(
padding=pad,
stride=stride,
)
conf_loc = paddle.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
conf_loc_flatten = paddle.flatten(conf_loc, 1, -1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
......
......@@ -109,7 +109,6 @@ __all__ = [
'log',
'crop_tensor',
'prelu',
'flatten',
'unique',
'unique_with_counts',
'elementwise_add',
......@@ -6842,98 +6841,6 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
return out
def flatten(x, axis=1, name=None):
r"""
**Flatten op**
Flatten the input tensor into a 2D matrix.
For Example:
.. code-block:: text
Case 1:
Given
X.shape = (3, 100, 100, 4)
and
axis = 2
We get:
Out.shape = (3 * 100, 4 * 100)
Case 2:
Given
X.shape = (3, 100, 100, 4)
and
axis = 0
We get:
Out.shape = (1, 3 * 100 * 100 * 4)
Args:
x (Variable): A tensor of rank >= axis. A tensor with type float32,
float64, int8, int32, int64, uint8.
axis (int): Indicate up to which input dimensions (exclusive) should
be flattened to the outer dimension of the output.
The value for axis must be in the range [0, R], where R
is the rank of the input tensor. Default: 1.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: A 2D tensor with the contents of the input tensor, with input \
dimensions up to axis flattened to the outer dimension of \
the output and remaining input dimensions flattened into the \
inner dimension of the output. A Tensor with type same as input x.
Raises:
ValueError: If x is not a variable.
ValueError: If axis is not in range [0, rank(x)].
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32")
# x shape is [4, 4, 3]
out = fluid.layers.flatten(x=x, axis=2)
# out shape is [16, 3]
"""
check_variable_and_dtype(
x,
'x',
['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
'flatten',
)
if _non_static_mode():
return _legacy_C_ops.flatten2(x, 'axis', axis)[0]
helper = LayerHelper('flatten', **locals())
if not (isinstance(x, Variable)):
raise ValueError("The input x should be a Variable")
if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
raise ValueError("The axis should be a int, and in range [0, rank(x)]")
out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='flatten2',
inputs={"X": x},
outputs={'Out': out, 'XShape': x_shape},
attrs={"axis": axis},
)
return out
from paddle.fluid.framework import convert_np_dtype_to_dtype_
......
......@@ -31,7 +31,7 @@ class SimpleLayer(paddle.nn.Layer):
def forward(self, x, target=None):
x = self.conv(x)
x = paddle.fluid.layers.flatten(x, axis=1)
x = paddle.flatten(x, 1, -1)
if target is not None:
x = paddle.fluid.layers.softmax(x)
loss = paddle.fluid.layers.cross_entropy(x, target)
......
......@@ -46,7 +46,7 @@ class SimpleLayer(paddle.nn.Layer):
@to_static()
def forward(self, x, target=None):
x = self.conv(x)
x = paddle.fluid.layers.flatten(x, axis=1)
x = paddle.flatten(x, 1, -1)
if target is not None:
if self.use_softmax:
x = paddle.fluid.layers.softmax(x)
......
......@@ -47,7 +47,12 @@ class TestBase(IPUOpTest):
x = paddle.static.data(
name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32'
)
out = paddle.fluid.layers.flatten(x=x, **self.attrs)
if self.attrs['axis'] == 0:
x = paddle.flatten(x, 0, -1)
out = paddle.unsqueeze(x, 0)
else:
x = paddle.flatten(x, self.attrs['axis'], -1)
out = paddle.flatten(x, 0, self.attrs['axis'] - 1)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
......@@ -30,7 +30,7 @@ class SimpleLayer(paddle.nn.Layer):
def forward(self, x, target=None):
x = self.conv(x)
x = paddle.fluid.layers.flatten(x, axis=1)
x = paddle.flatten(x, 1, -1)
if target is not None:
x = paddle.fluid.layers.softmax(x)
loss = paddle.fluid.layers.cross_entropy(x, target)
......
......@@ -117,7 +117,7 @@ class SimpleLayer(paddle.nn.Layer):
def forward(self, x, target=None):
x = self.conv(x)
print(x)
x = paddle.fluid.layers.flatten(x, axis=1)
x = paddle.flatten(x, 1, -1)
if target is not None:
x = paddle.fluid.layers.softmax(x)
loss = paddle.fluid.layers.cross_entropy(x, target)
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
......@@ -40,7 +41,7 @@ class TRTFlattenTest(InferencePassTest):
self.fetch_list = [out]
def append_flatten(self, data):
return fluid.layers.flatten(data, axis=1)
return paddle.flatten(data, 1, -1)
def test_check_output(self):
if core.is_compiled_with_cuda():
......@@ -75,7 +76,7 @@ class TRTFlattenDynamicTest(InferencePassTest):
self.fetch_list = [out]
def append_flatten(self, data):
return fluid.layers.flatten(data, axis=1)
return paddle.flatten(data, 1, -1)
def test_check_output(self):
if core.is_compiled_with_cuda():
......
......@@ -32,10 +32,12 @@ class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32"
)
trans1 = paddle.transpose(data1, perm=[0, 2, 1])
trans2 = paddle.transpose(data2, perm=[0, 2, 1])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
flatt1 = paddle.flatten(trans1, 1, -1)
flatt2 = paddle.flatten(trans2, 1, -1)
concat_out = fluid.layers.concat([flatt1, flatt2], axis=1)
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
......
......@@ -75,41 +75,5 @@ class TestFlattenOpSixDims(TestFlattenOp):
self.new_shape = (36, 16)
class TestStaticFlattenInferShapePythonAPI(unittest.TestCase):
def execute_api(self, x, axis=1):
return fluid.layers.flatten(x, axis=axis)
def test_static_api(self):
paddle.enable_static()
main_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, paddle.static.Program()):
x = paddle.static.data(
name="x", shape=[-1, 3, -1, -1], dtype='float32'
)
out = self.execute_api(x, axis=2)
self.assertTrue((-1, -1) == out.shape)
class TestFlatten2OpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_data = np.random.random((3, 2, 4, 5)).astype("float64")
def test_Variable():
# the input type must be Variable
fluid.layers.flatten(input_data, axis=1)
self.assertRaises(TypeError, test_Variable)
def test_type():
# dtype must be float32, float64, int8, int32, int64, uint8.
x2 = fluid.layers.data(
name='x2', shape=[3, 2, 4, 5], dtype='float16'
)
fluid.layers.flatten(x2, axis=1)
self.assertRaises(TypeError, test_type)
if __name__ == "__main__":
unittest.main()
......@@ -14,8 +14,6 @@
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle
from op_test import OpTest
......@@ -68,41 +66,5 @@ class TestFlattenOpSixDims(TestFlattenOp):
self.new_shape = (36, 16)
class TestStaticFlattenInferShapePythonAPI(unittest.TestCase):
def execute_api(self, x, axis=1):
return fluid.layers.flatten(x, axis=axis)
def test_static_api(self):
paddle.enable_static()
main_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, paddle.static.Program()):
x = paddle.static.data(
name="x", shape=[-1, 3, -1, -1], dtype='float32'
)
out = self.execute_api(x, axis=2)
self.assertTrue((-1, -1) == out.shape)
class TestFlatten2OpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_data = np.random.random((3, 2, 4, 5)).astype("float64")
def test_Variable():
# the input type must be Variable
fluid.layers.flatten(input_data, axis=1)
self.assertRaises(TypeError, test_Variable)
def test_type():
# dtype must be float32, float64, int8, int32, int64, uint8.
x2 = fluid.layers.data(
name='x2', shape=[3, 2, 4, 5], dtype='float16'
)
fluid.layers.flatten(x2, axis=1)
self.assertRaises(TypeError, test_type)
if __name__ == "__main__":
unittest.main()
......@@ -47,7 +47,7 @@ class LeNetDygraph(fluid.dygraph.Layer):
x = self.features(inputs)
if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = paddle.flatten(x, 1, -1)
x = self.fc(x)
return x
......
......@@ -4129,7 +4129,7 @@ class TestBook(LayerTest):
shape=[4, 4, 3],
dtype="float32",
)
out = layers.flatten(x, axis=1, name="flatten")
out = paddle.flatten(x, 1, -1, name="flatten")
return out
def test_linspace(self):
......
......@@ -61,7 +61,7 @@ class LeNetDygraph(paddle.nn.Layer):
x = self.features(inputs)
if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = paddle.flatten(x, 1, -1)
x = self.fc(x)
return x
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册