diff --git a/paddle/fluid/operators/flatten_op.cc b/paddle/fluid/operators/flatten_op.cc index d23beea7e4e62ee65f31c4dc903d80310ddfccbc..c94ce4174f2be32beae0547f6a8366fd2896e027 100644 --- a/paddle/fluid/operators/flatten_op.cc +++ b/paddle/fluid/operators/flatten_op.cc @@ -429,6 +429,7 @@ REGISTER_OPERATOR(flatten_contiguous_range_grad, REGISTER_OP_CPU_KERNEL( flatten, ops::FlattenKernel, ops::FlattenKernel, + ops::FlattenKernel, ops::FlattenKernel, ops::FlattenKernel, ops::FlattenKernel); @@ -436,12 +437,14 @@ REGISTER_OP_CPU_KERNEL( flatten_grad, ops::FlattenGradKernel, ops::FlattenGradKernel, + ops::FlattenGradKernel, ops::FlattenGradKernel, ops::FlattenGradKernel, ops::FlattenGradKernel); REGISTER_OP_CPU_KERNEL( flatten2, ops::Flatten2Kernel, ops::Flatten2Kernel, + ops::Flatten2Kernel, ops::Flatten2Kernel, ops::Flatten2Kernel, ops::Flatten2Kernel); @@ -449,6 +452,7 @@ REGISTER_OP_CPU_KERNEL( flatten2_grad, ops::Flatten2GradKernel, ops::Flatten2GradKernel, + ops::Flatten2GradKernel, ops::Flatten2GradKernel, ops::Flatten2GradKernel, ops::Flatten2GradKernel); @@ -458,6 +462,8 @@ REGISTER_OP_CPU_KERNEL( float>, ops::FlattenContiguousRangeKernel, + ops::FlattenContiguousRangeKernel, ops::FlattenContiguousRangeKernel, ops::FlattenContiguousRangeKernel, @@ -469,6 +475,8 @@ REGISTER_OP_CPU_KERNEL( float>, ops::FlattenContiguousRangeGradKernel, + ops::FlattenContiguousRangeGradKernel, ops::FlattenContiguousRangeGradKernel, ops::FlattenContiguousRangeGradKernel, ops::FlattenKernel, + ops::FlattenKernel, ops::FlattenKernel, ops::FlattenKernel, ops::FlattenKernel); @@ -26,12 +27,14 @@ REGISTER_OP_CUDA_KERNEL( flatten_grad, ops::FlattenGradKernel, ops::FlattenGradKernel, + ops::FlattenGradKernel, ops::FlattenGradKernel, ops::FlattenGradKernel, ops::FlattenGradKernel); REGISTER_OP_CUDA_KERNEL( flatten2, ops::Flatten2Kernel, ops::Flatten2Kernel, + ops::Flatten2Kernel, ops::Flatten2Kernel, ops::Flatten2Kernel, ops::Flatten2Kernel); @@ -39,6 +42,7 @@ REGISTER_OP_CUDA_KERNEL( flatten2_grad, ops::Flatten2GradKernel, ops::Flatten2GradKernel, + ops::Flatten2GradKernel, ops::Flatten2GradKernel, ops::Flatten2GradKernel, ops::Flatten2GradKernel); @@ -48,6 +52,8 @@ REGISTER_OP_CUDA_KERNEL( float>, ops::FlattenContiguousRangeKernel, + ops::FlattenContiguousRangeKernel, ops::FlattenContiguousRangeKernel, ops::FlattenContiguousRangeKernel, @@ -59,6 +65,8 @@ REGISTER_OP_CUDA_KERNEL( float>, ops::FlattenContiguousRangeGradKernel, + ops::FlattenContiguousRangeGradKernel, ops::FlattenContiguousRangeGradKernel, ops::FlattenContiguousRangeGradKernel= axis. A tensor with type float32, - float64, int8, int32, int64. + float64, int8, int32, int64, uint8. axis (int): Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R @@ -9962,14 +9962,17 @@ def flatten(x, axis=1, name=None): .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32") # x shape is [4, 4, 3] out = fluid.layers.flatten(x=x, axis=2) # out shape is [16, 3] """ check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten') + x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], + 'flatten') helper = LayerHelper('flatten', **locals()) if not (isinstance(x, Variable)): diff --git a/python/paddle/fluid/tests/unittests/test_flatten2_op.py b/python/paddle/fluid/tests/unittests/test_flatten2_op.py index 189a63a0868459c839782dac13c9bf462959927b..0d50c65558a91841160d8db155675d167499c38d 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten2_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten2_op.py @@ -81,7 +81,7 @@ class TestFlatten2OpError(unittest.TestCase): self.assertRaises(TypeError, test_Variable) def test_type(): - # dtype must be float32, float64, int8, int32, int64. + # dtype must be float32, float64, int8, int32, int64, uint8. x2 = fluid.layers.data( name='x2', shape=[3, 2, 4, 5], dtype='float16') fluid.layers.flatten(x2, axis=1) diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index 28803f5ac6232058f3fd0f3af5535366bab5c60d..d6cc6ecffc106bcb1b46c21c114968d37822d17e 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -166,7 +166,7 @@ class TestFlatten2OpError(unittest.TestCase): self.assertRaises(ValueError, test_ValueError3) def test_type(): - # dtype must be float32, float64, int8, int32, int64. + # dtype must be float32, float64, int8, int32, int64, uint8. x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] * image_shape[3]).reshape(image_shape) / 100. x2 = x2.astype('float16') diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 9bcda74d116892ee309d415ffaf144062e42a20e..377435a50008a6f0e664b5de0ccce79772584d9a 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -212,7 +212,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): Args: x (Tensor): A tensor of number of dimentions >= axis. A tensor with data type float32, - float64, int8, int32, int64. + float64, int8, int32, int64, uint8. start_axis (int): the start axis to flatten stop_axis (int): the stop axis to flatten name(str, Optional): For details, please refer to :ref:`api_guide_Name`. @@ -249,7 +249,8 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): raise ValueError("The input x should be a Tensor") check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten') + x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], + 'flatten') helper = LayerHelper('flatten', **locals()) x_dim = len(x.shape)