diff --git a/paddle/fluid/operators/controlflow/compare_op.cu b/paddle/fluid/operators/controlflow/compare_op.cu index f03a11d906f4e730f14c1ea8caf960037803704f..4b9452d0f60e0396e4bc50bb5ea56e2f3131098e 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cu +++ b/paddle/fluid/operators/controlflow/compare_op.cu @@ -48,6 +48,7 @@ class CompareOpKernel REGISTER_OP_CUDA_KERNEL( \ op_type, \ ops::CompareOpKernel, void>, \ + ops::CompareOpKernel, void>, \ ops::CompareOpKernel, void>, \ ops::CompareOpKernel, void>, \ ops::CompareOpKernel, void>, \ diff --git a/paddle/fluid/operators/controlflow/compare_op.h b/paddle/fluid/operators/controlflow/compare_op.h index 957efbff1993792c1cc6162296dbdcf00abb61cf..be017a01ef3237fd8572e248d691daa97c999509 100644 --- a/paddle/fluid/operators/controlflow/compare_op.h +++ b/paddle/fluid/operators/controlflow/compare_op.h @@ -95,6 +95,9 @@ class CompareOpKernel ::paddle::operators::CompareOpKernel< \ ::paddle::platform::dev##DeviceContext, \ functor, inverse_functor>, \ + ::paddle::operators::CompareOpKernel< \ + ::paddle::platform::dev##DeviceContext, \ + functor, inverse_functor>, \ ::paddle::operators::CompareOpKernel< \ ::paddle::platform::dev##DeviceContext, \ functor, inverse_functor>, \ diff --git a/paddle/fluid/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc index 424c56119ecd974e9eeb2e8f746d33d7f147b880..f7f9d40991cd1965c4fae39421cc65c384f59c59 100644 --- a/paddle/fluid/operators/cumsum_op.cc +++ b/paddle/fluid/operators/cumsum_op.cc @@ -93,6 +93,7 @@ REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, ops::CumsumGradMaker); REGISTER_OP_CPU_KERNEL(cumsum, ops::CumKernel>, ops::CumKernel>, + ops::CumKernel>, ops::CumKernel>, ops::CumKernel>); diff --git a/paddle/fluid/operators/cumsum_op.cu b/paddle/fluid/operators/cumsum_op.cu index 977e301f13663b82918dfd6814fbd85583644813..3402f42521f54f315390fe2162309fb204fd9b00 100644 --- a/paddle/fluid/operators/cumsum_op.cu +++ b/paddle/fluid/operators/cumsum_op.cu @@ -320,5 +320,6 @@ namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( cumsum, ops::CumCUDAKernel, ops::CumCUDAKernel, + ops::CumCUDAKernel, ops::CumCUDAKernel, ops::CumCUDAKernel); diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index 98b47407b93af34ab69d554a55bca55654940907..8d9a2159069423cb3b51517016570057232d2c90 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -96,6 +96,7 @@ REGISTER_OP_CPU_KERNEL( elementwise_sub, ops::ElementwiseSubKernel, ops::ElementwiseSubKernel, + ops::ElementwiseSubKernel, ops::ElementwiseSubKernel, ops::ElementwiseSubKernel, ops::ElementwiseSubKernel, ops::ElementwiseSubGradKernel, + ops::ElementwiseSubGradKernel, ops::ElementwiseSubGradKernel, ops::ElementwiseSubGradKernel, ops::ElementwiseSubGradKernel, ops::ElementwiseSubDoubleGradKernel, + ops::ElementwiseSubDoubleGradKernel, ops::ElementwiseSubDoubleGradKernel, ops::ElementwiseSubDoubleGradKernel, ops::FillAnyLikeKernel, ops::FillAnyLikeKernel, ops::FillAnyLikeKernel, diff --git a/paddle/fluid/operators/fill_any_like_op.cu b/paddle/fluid/operators/fill_any_like_op.cu index 1d8c8ace60a6b5a5d380a75af5047ac064896c0f..3ebc0ad7c8ec53b5c3de68823d9ba943e49bd364 100644 --- a/paddle/fluid/operators/fill_any_like_op.cu +++ b/paddle/fluid/operators/fill_any_like_op.cu @@ -19,6 +19,7 @@ limitations under the License. */ namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( fill_any_like, + ops::FillAnyLikeKernel, ops::FillAnyLikeKernel, ops::FillAnyLikeKernel, ops::FillAnyLikeKernel, diff --git a/paddle/fluid/operators/gather_nd_op.cc b/paddle/fluid/operators/gather_nd_op.cc index 5d9c6ee963f0df835b011afda77163e0ba01dc7a..8a32e96e60052a2d43c0fad8b9b28226b5648afa 100644 --- a/paddle/fluid/operators/gather_nd_op.cc +++ b/paddle/fluid/operators/gather_nd_op.cc @@ -183,7 +183,9 @@ REGISTER_OPERATOR(gather_nd_grad, ops::GatherNdGradOp, REGISTER_OP_CPU_KERNEL(gather_nd, ops::GatherNdOpKernel, ops::GatherNdOpKernel, ops::GatherNdOpKernel, - ops::GatherNdOpKernel, ops::GatherNdOpKernel, + ops::GatherNdOpKernel, + ops::GatherNdOpKernel, + ops::GatherNdOpKernel, ops::GatherNdOpKernel); REGISTER_OP_CPU_KERNEL(gather_nd_grad, ops::GatherNdGradOpKernel, diff --git a/paddle/fluid/operators/gather_nd_op.cu b/paddle/fluid/operators/gather_nd_op.cu index 3a8099c0672ad8c2e6f0035ce93d09fe2e47cd5e..0de2798bf750915e99c9b60ed8ccb94d7d1201ab 100644 --- a/paddle/fluid/operators/gather_nd_op.cu +++ b/paddle/fluid/operators/gather_nd_op.cu @@ -103,6 +103,7 @@ REGISTER_OP_CUDA_KERNEL(gather_nd, ops::GatherNdOpCUDAKernel, ops::GatherNdOpCUDAKernel, ops::GatherNdOpCUDAKernel, ops::GatherNdOpCUDAKernel, + ops::GatherNdOpCUDAKernel, ops::GatherNdOpCUDAKernel, ops::GatherNdOpCUDAKernel); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc index 9a715eb98ef994f89e201656e8c371d819b11f19..cfafc11739948ba791654516147f49d6fda78bdf 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc @@ -116,6 +116,8 @@ REGISTER_OP_CPU_KERNEL( ops::SumFunctor>, ops::ReduceKernel, + ops::ReduceKernel, ops::ReduceKernel, ops::ReduceKernel, diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu index ea9a89bea97130b681c6683ae150299467862772..94ccb0965f06e9bd8b8264b7cec91220194487c4 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cu @@ -20,6 +20,7 @@ REGISTER_OP_CUDA_KERNEL( ops::ReduceCudaKernel, ops::ReduceCudaKernel, + ops::ReduceCudaKernel, ops::ReduceCudaKernel, ops::ReduceCudaKernel, ops::ReduceCudaKernel, kps::AddFunctor, diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 8f0cb4b38f6e8c888e72ca6a51244df41a3f8b16..77a6beacc956af623fc324ef4ebd1b9de6bb42b5 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -639,10 +639,12 @@ REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp, ops::ReshapeGradInplaceInferer); REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int, ops::ReshapeKernel, - int64_t, ops::ReshapeKernel); + ops::ReshapeKernel, int16_t, ops::ReshapeKernel, + int, ops::ReshapeKernel, int64_t, + ops::ReshapeKernel); REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, + double, ops::ReshapeGradKernel, int16_t, + ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel); REGISTER_OPERATOR(reshape2, ops::Reshape2Op, ops::Reshape2OpMaker, @@ -659,15 +661,15 @@ REGISTER_OPERATOR(reshape2_grad_grad, ops::Reshape2DoubleGradOp, #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, - ops::ReshapeKernel, int, ops::ReshapeKernel, - uint8_t, ops::ReshapeKernel, int64_t, - ops::ReshapeKernel, plat::float16, - ops::ReshapeKernel, plat::bfloat16, - ops::ReshapeKernel); + ops::ReshapeKernel, int16_t, ops::ReshapeKernel, + int, ops::ReshapeKernel, uint8_t, + ops::ReshapeKernel, int64_t, ops::ReshapeKernel, + plat::float16, ops::ReshapeKernel, + plat::bfloat16, ops::ReshapeKernel); REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, - double, ops::ReshapeGradKernel, int, - ops::ReshapeGradKernel, int64_t, - ops::ReshapeGradKernel, uint8_t, + double, ops::ReshapeGradKernel, int16_t, + ops::ReshapeKernel, int, ops::ReshapeGradKernel, + int64_t, ops::ReshapeGradKernel, uint8_t, ops::ReshapeGradKernel, plat::float16, ops::ReshapeGradKernel, plat::bfloat16, ops::ReshapeGradKernel); diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc index 00930bde4c677f421c6c741829efbd94f6876854..ceea8b2343cacf7a158e6caca2a57ec8881d53ec 100644 --- a/paddle/fluid/operators/unsqueeze_op.cc +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -362,6 +362,7 @@ REGISTER_OP_CPU_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, @@ -377,6 +378,7 @@ REGISTER_OP_CPU_KERNEL( ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, + ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, @@ -391,6 +393,7 @@ REGISTER_OP_CPU_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, @@ -406,6 +409,7 @@ REGISTER_OP_CPU_KERNEL( ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, + ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, diff --git a/paddle/fluid/operators/unsqueeze_op.cu.cc b/paddle/fluid/operators/unsqueeze_op.cu.cc index 9feb66e2a5f0b67cfc24eba23fe5c847447cb4a4..2dcc4d2152a5c82a8f344b96084e70ba4df25bdd 100644 --- a/paddle/fluid/operators/unsqueeze_op.cu.cc +++ b/paddle/fluid/operators/unsqueeze_op.cu.cc @@ -24,6 +24,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, @@ -41,6 +42,7 @@ REGISTER_OP_CUDA_KERNEL( plat::bfloat16>, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, + ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, @@ -56,6 +58,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, @@ -73,6 +76,7 @@ REGISTER_OP_CUDA_KERNEL( plat::bfloat16>, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, + ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, diff --git a/paddle/fluid/operators/where_index_op.cc b/paddle/fluid/operators/where_index_op.cc index 1fae6eac90e71f03fcdcc53ee70fd5488a4ca54c..2bffeb500ce50e3bc5a3d72a085da826d06e849d 100644 --- a/paddle/fluid/operators/where_index_op.cc +++ b/paddle/fluid/operators/where_index_op.cc @@ -57,6 +57,7 @@ REGISTER_OP_WITHOUT_GRADIENT(where_index, ops::WhereIndexOp, ops::WhereIndexOpMaker); REGISTER_OP_CPU_KERNEL(where_index, ops::CPUWhereIndexKernel, ops::CPUWhereIndexKernel, + ops::CPUWhereIndexKernel, ops::CPUWhereIndexKernel, ops::CPUWhereIndexKernel, ops::CPUWhereIndexKernel); diff --git a/paddle/fluid/operators/where_index_op.cu b/paddle/fluid/operators/where_index_op.cu index 01890764251ad0503286c362b0d46d89324315c3..785ba68da2ea85eff175d180b48f603e077cc1a0 100644 --- a/paddle/fluid/operators/where_index_op.cu +++ b/paddle/fluid/operators/where_index_op.cu @@ -158,6 +158,7 @@ class CUDAWhereIndexKernel : public framework::OpKernel { namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(where_index, ops::CUDAWhereIndexKernel, ops::CUDAWhereIndexKernel, + ops::CUDAWhereIndexKernel, ops::CUDAWhereIndexKernel, ops::CUDAWhereIndexKernel, ops::CUDAWhereIndexKernel); diff --git a/paddle/pten/kernels/cpu/elementwise_grad_kernel.cc b/paddle/pten/kernels/cpu/elementwise_grad_kernel.cc index 002b575341a1601d093680d6854ed2cfc262e788..4d324f097ad2d5b0a1cd3ae71e19de7b15800265 100644 --- a/paddle/pten/kernels/cpu/elementwise_grad_kernel.cc +++ b/paddle/pten/kernels/cpu/elementwise_grad_kernel.cc @@ -132,6 +132,7 @@ PT_REGISTER_KERNEL(add_grad, pten::AddGradKernel, float, double, + int16_t, int, int64_t, pten::dtype::complex, @@ -143,6 +144,7 @@ PT_REGISTER_KERNEL(add_double_grad, pten::AddDoubleGradKernel, float, double, + int16_t, int, int64_t, pten::dtype::complex, @@ -154,6 +156,7 @@ PT_REGISTER_KERNEL(add_triple_grad, pten::AddTripleGradKernel, float, double, + int16_t, int, int64_t, pten::dtype::complex, @@ -165,6 +168,7 @@ PT_REGISTER_KERNEL(subtract_grad, pten::SubtractGradKernel, float, double, + int16_t, int, int64_t, pten::dtype::complex, @@ -176,6 +180,7 @@ PT_REGISTER_KERNEL(subtract_double_grad, pten::SubtractDoubleGradKernel, float, double, + int16_t, int, int64_t, pten::dtype::complex, diff --git a/paddle/pten/kernels/cpu/full_kernel.cc b/paddle/pten/kernels/cpu/full_kernel.cc index 93e3ee27f87bc2e7fedfc198b24241d1498f9cbf..8486aed3b0c4d17225a8a203015ad7ae7242db76 100644 --- a/paddle/pten/kernels/cpu/full_kernel.cc +++ b/paddle/pten/kernels/cpu/full_kernel.cc @@ -95,6 +95,7 @@ PT_REGISTER_KERNEL(full_like, pten::FullLikeKernel, float, double, + int16_t, int, int64_t, bool, diff --git a/paddle/pten/kernels/cpu/math_kernel.cc b/paddle/pten/kernels/cpu/math_kernel.cc index 70e90587123fe882279fd5cec37717132abd6f09..5429da3aa41564ff27af0dc6d1c34a5f48a8975d 100644 --- a/paddle/pten/kernels/cpu/math_kernel.cc +++ b/paddle/pten/kernels/cpu/math_kernel.cc @@ -124,6 +124,7 @@ PT_REGISTER_KERNEL(add_raw, pten::AddRawKernel, float, double, + int16_t, int, int64_t, complex64, @@ -134,6 +135,7 @@ PT_REGISTER_KERNEL(subtract_raw, pten::SubtractRawKernel, float, double, + int16_t, int, int64_t, complex64, @@ -167,6 +169,7 @@ PT_REGISTER_KERNEL(sum_raw, float, double, pten::dtype::float16, + int16_t, int, int64_t, complex64, diff --git a/paddle/pten/kernels/flatten_kernel.cc b/paddle/pten/kernels/flatten_kernel.cc index 0ae6cd1b9c35efa57b20e8d4f6a07408544e8166..7ba0553932ad79e872faf441052415c591912e4e 100644 --- a/paddle/pten/kernels/flatten_kernel.cc +++ b/paddle/pten/kernels/flatten_kernel.cc @@ -56,6 +56,7 @@ PT_REGISTER_KERNEL(flatten, double, uint8_t, int8_t, + int16_t, int, int64_t) {} @@ -67,6 +68,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape, double, uint8_t, int8_t, + int16_t, int, int64_t) {} @@ -80,6 +82,7 @@ PT_REGISTER_KERNEL(flatten, double, uint8_t, int8_t, + int16_t, int, int64_t) {} @@ -92,6 +95,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape, double, uint8_t, int8_t, + int16_t, int, int64_t) {} #endif @@ -104,6 +108,7 @@ PT_REGISTER_KERNEL(flatten, float, pten::dtype::float16, int8_t, + int16_t, int, int64_t) {} @@ -114,6 +119,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape, float, pten::dtype::float16, int8_t, + int16_t, int, int64_t) {} #endif diff --git a/paddle/pten/kernels/gpu/full_kernel.cu b/paddle/pten/kernels/gpu/full_kernel.cu index 1a8e40373fcb347e7c38eefe4b00f30832c09578..937d398be769b33215a6bce5c8531d0db17f5cb4 100644 --- a/paddle/pten/kernels/gpu/full_kernel.cu +++ b/paddle/pten/kernels/gpu/full_kernel.cu @@ -119,6 +119,7 @@ PT_REGISTER_KERNEL(full_like, pten::FullLikeKernel, float, double, + int16_t, int, int64_t, bool, diff --git a/paddle/pten/kernels/gpu/math_kernel.cu b/paddle/pten/kernels/gpu/math_kernel.cu index 387defc9f418699238acea74b2a926758d266d46..3b7122ba1b9164c19b98eaf4b9707301234fb54b 100644 --- a/paddle/pten/kernels/gpu/math_kernel.cu +++ b/paddle/pten/kernels/gpu/math_kernel.cu @@ -101,6 +101,7 @@ PT_REGISTER_KERNEL(add_raw, pten::AddRawKernel, float, double, + int16_t, int, int64_t, float16, @@ -112,6 +113,7 @@ PT_REGISTER_KERNEL(subtract_raw, pten::SubtractRawKernel, float, double, + int16_t, int, int64_t, float16, @@ -148,6 +150,7 @@ PT_REGISTER_KERNEL(sum_raw, float, double, float16, + int16_t, int, int64_t, complex64, diff --git a/paddle/pten/kernels/math_kernel.cc b/paddle/pten/kernels/math_kernel.cc index 2356fb34bf1b7148d69ce154e4bc38b37aaf3eef..009e911580743cedb897659753b3799cb46e4504 100644 --- a/paddle/pten/kernels/math_kernel.cc +++ b/paddle/pten/kernels/math_kernel.cc @@ -92,6 +92,7 @@ PT_REGISTER_KERNEL(sum, float, double, pten::dtype::float16, + int16_t, int, int64_t, complex64, @@ -105,6 +106,7 @@ PT_REGISTER_KERNEL(add, pten::AddKernel, float, double, + int16_t, int, int64_t, complex64, @@ -115,6 +117,7 @@ PT_REGISTER_KERNEL(subtract, pten::SubtractKernel, float, double, + int16_t, int, int64_t, complex64, @@ -158,6 +161,7 @@ PT_REGISTER_KERNEL(sum, float, double, pten::dtype::float16, + int16_t, int, int64_t, complex64, @@ -170,6 +174,7 @@ PT_REGISTER_KERNEL(add, pten::AddKernel, float, double, + int16_t, int, int64_t, pten::dtype::float16, @@ -181,6 +186,7 @@ PT_REGISTER_KERNEL(subtract, pten::SubtractKernel, float, double, + int16_t, int, int64_t, pten::dtype::float16, diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1c357c6fa74d5dc80c0005bc82d8ce47b5068ddd..d2653b75eafba2675c9e1b7de86873c72cdd0db2 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6276,7 +6276,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): return dygraph_utils._append_activation_in_dygraph(out, act) check_variable_and_dtype(x, 'x', [ - 'float16', 'float32', 'float64', 'int32', 'int64', 'bool', 'uint16' + 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', 'bool', + 'uint16' ], 'reshape') check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') @@ -6456,10 +6457,10 @@ def unsqueeze(input, axes, name=None): return out check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') - check_variable_and_dtype( - input, 'input', - ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'], - 'unsqueeze') + check_variable_and_dtype(input, 'input', [ + 'float16', 'float32', 'float64', 'bool', 'int8', 'int16', 'int32', + 'int64' + ], 'unsqueeze') helper = LayerHelper("unsqueeze2", **locals()) inputs = {"X": input} attrs = {} @@ -8539,9 +8540,9 @@ def gather_nd(input, index, name=None): """ if in_dygraph_mode(): return _C_ops.gather_nd(input, index) - check_variable_and_dtype(input, 'input', - ['bool', 'float32', 'float64', 'int32', 'int64'], - 'gather_np') + check_variable_and_dtype( + input, 'input', + ['bool', 'float32', 'float64', 'int16', 'int32', 'int64'], 'gather_np') check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np') helper = LayerHelper('gather_nd', **locals()) dtype = helper.input_dtype() diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 4b4b4d0d61c41565bdd87cd34ced3549b1282d74..76414ea942465d1e7a54084a2e4ee31b9ee41a2d 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -250,12 +250,12 @@ def cast(x, dtype): return out check_variable_and_dtype(x, 'x', [ - 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8', - 'uint16' + 'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64', + 'uint8', 'uint16' ], 'cast') check_dtype(dtype, 'dtype', [ - 'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int64', - 'uint8', 'uint16' + 'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', + 'int64', 'uint8', 'uint16' ], 'cast') helper = LayerHelper('cast', **locals()) diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 948e344e4c158abd1e4bdee2a62e83a29aa0d6ea..988e7df41af743e0c10c6bb6bf320d62ee1551bf 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -109,15 +109,6 @@ class TestCastOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') - # The input dtype of cast_op must be bool, float16, float32, float64, int32, int64, uint8. - x2 = fluid.layers.data(name='x2', shape=[4], dtype='int16') - self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32') - - def test_dtype_type(): - x4 = fluid.layers.data(name='x4', shape=[4], dtype='int32') - output = fluid.layers.cast(x=x4, dtype='int16') - - self.assertRaises(TypeError, test_dtype_type) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 3f3b1ee6703643d4d8ae9c683397512462e5d867..be6abb17c3c316d30bdcfa5539ecd1e2549280a5 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -81,12 +81,6 @@ class TestFullOpError(unittest.TestCase): x=input_data, fill_value=2, dtype='uint4') - self.assertRaises( - TypeError, - paddle.full_like, - x=input_data, - fill_value=2, - dtype='int16') if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py index 1633d827722897ce3b3c5d1925bc094999c59544..2447408296948276677884b09a801ed4843baf4b 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_cast_op_xpu.py @@ -67,15 +67,6 @@ class TestCastOpError(unittest.TestCase): x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.XPUPlace(0)) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') - # The input dtype of cast_op must be float32, int32, int64. - x2 = fluid.layers.data(name='x2', shape=[4], dtype='int16') - self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32') - - def test_dtype_type(): - x4 = fluid.layers.data(name='x4', shape=[4], dtype='int32') - output = fluid.layers.cast(x=x4, dtype='int16') - - self.assertRaises(TypeError, test_dtype_type) if __name__ == '__main__': diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index dd56b391d10ff8dc47abaa0dc963b49d4e7961a9..934ccfa72640f0f6f5e6e7fea0206394144e7895 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -219,11 +219,13 @@ def full_like(x, fill_value, dtype=None, name=None): helper = LayerHelper("full_like", **locals()) check_variable_and_dtype( - x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + x, 'x', + ['bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64'], 'full_like') - check_dtype(dtype, 'dtype', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], - 'full_like/zeros_like/ones_like') + check_dtype( + dtype, 'dtype', + ['bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64'], + 'full_like/zeros_like/ones_like') out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 4a1f7f5dc9900ca0fbf2c450e14dc5e839febe39..d8ebae9d6bf3944a39834167ddd15270dd171a89 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -672,7 +672,8 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): if not in_dygraph_mode(): check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'], + x, 'x', + ['float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'uint8'], 'flatten') x_dim = len(x.shape) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a476a8ccd120a63bbffd15564567f4357d100786..ba8a4d7f11990ccaad2f387bac9c3477cf9ed825 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -885,7 +885,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', - 'int32', 'int64', 'complex64', 'complex128', + 'int16', 'int32', 'int64', 'complex64', 'complex128', u'bool', u'float16', u'float32', u'float64', u'int32', u'int64', u'complex64', u'complex128'], 'sum')