diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 05bb37ee421ff475432819faf8fcfcbd67bb776e..ce06a1ec3563ddb559ba9c100ea60d9aeaeda830 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -621,15 +621,18 @@ REGISTER_OPERATOR(reshape2_grad_grad, ops::Reshape2DoubleGradOp, REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int8_t, ops::ReshapeKernel, uint8_t, ops::ReshapeKernel, int, - ops::ReshapeKernel, int64_t, ops::ReshapeKernel); + ops::ReshapeKernel, int64_t, ops::ReshapeKernel, + bool, ops::ReshapeKernel); REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel, bool, ops::ReshapeGradKernel); REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, double, ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, int64_t, + ops::ReshapeDoubleGradKernel, bool, ops::ReshapeDoubleGradKernel); #ifdef PADDLE_WITH_CUDA @@ -641,15 +644,17 @@ REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, + ops::ReshapeGradKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel); + ops::ReshapeKernel, bool, ops::ReshapeKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, + ops::ReshapeGradKernel, bool, ops::ReshapeGradKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad_grad, float, @@ -657,6 +662,7 @@ REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad_grad, float, ops::ReshapeDoubleGradKernel, int, ops::ReshapeDoubleGradKernel, int64_t, ops::ReshapeDoubleGradKernel, plat::float16, + ops::ReshapeDoubleGradKernel, bool, ops::ReshapeDoubleGradKernel); #endif @@ -664,10 +670,11 @@ REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad_grad, float, REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, int64_t, ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel); + ops::ReshapeKernel, bool, ops::ReshapeKernel); REGISTER_OP_XPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel, plat::float16, - ops::ReshapeGradKernel); + ops::ReshapeGradKernel, + bool ops::ReshapeGradKernel); #endif diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 7b1628799a61d3ed643b3d592ba8feeb2f817f56..c2bb96ead2bf985efdf6d572bd09ecf3c091353e 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6119,7 +6119,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): return dygraph_utils._append_activation_in_dygraph(out, act) check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape') + x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', + 'bool'], 'reshape') check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index 295891605ce52337abaeea482b7f11210e7623d0..f41099bda39f8970e7e921dc265e9e751146ac8a 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -226,6 +226,24 @@ class TestReshapeUint8Op(TestReshapeInt8Op): self.dtype = np.uint8 +class TestReshapeOpBool(TestReshapeOp): + def setUp(self): + self.init_data() + self.op_type = "reshape2" + self.inputs = { + "X": np.random.choice( + [True, False], size=self.ori_shape) + } + self.attrs = {"shape": self.new_shape} + self.outputs = { + "Out": self.inputs["X"].reshape(self.infered_shape), + 'XShape': np.random.random(self.ori_shape).astype("float32") + } + + def test_check_grad(self): + pass + + # Test python API class TestReshapeAPI(unittest.TestCase): def _set_paddle_api(self): @@ -324,7 +342,7 @@ class TestReshapeOpError(unittest.TestCase): # The x dtype of reshape_op must be float16, float32, float64, int32 or int64. def test_x_dtype(): - x2 = self.data(name="x2", shape=[2, 25], dtype="bool") + x2 = self.data(name="x2", shape=[2, 25], dtype="int8") self.reshape(x2, shape=[2, 5, 5]) self.assertRaises(TypeError, test_x_dtype) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index ba7a1f1ef23f571b87ec76466f0b2ea05a42bd17..19b88e122e4e2371eb435958d937f71dc972bff3 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -1353,7 +1353,7 @@ def reshape(x, shape, name=None): the corresponding dimension of x. Args: - x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32`` or ``int64``. + x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32``, ``int64`` or ``bool`` shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1. The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``shape`` is an Tensor, it should be an 1-D Tensor .