diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 85d73278fd56fa5dd8625a01384fa7b58832a779..ba5884108401c98b6a9388f870925dd979f509f1 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -412,7 +412,7 @@ std::vector CastPyArg2Ints(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); value.reserve(len); PyObject* item = nullptr; @@ -488,7 +488,7 @@ std::vector CastPyArg2Longs(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { @@ -567,7 +567,7 @@ std::vector CastPyArg2Floats(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { @@ -642,7 +642,7 @@ std::vector CastPyArg2Float64s(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 0d82eeb4b16a16f3b131fcd2f8976e731fa096d9..80200ad527276c9d13eb5c4420e63e746c5c67fd 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -2663,6 +2663,12 @@ void SearchsortedInferMeta(const MetaTensor& sorted_sequence, MetaTensor* out) { auto sequences_dims = sorted_sequence.dims(); auto values_dims = value.dims(); + PADDLE_ENFORCE_GE( + sequences_dims.size(), + 1, + phi::errors::InvalidArgument( + "Input sequences's dimension(%d) must be greater or equal than 1", + sequences_dims.size())); bool flag = true; if (sequences_dims.size() != values_dims.size()) { diff --git a/paddle/phi/kernels/cpu/broadcast_kernel.cc b/paddle/phi/kernels/cpu/broadcast_kernel.cc index a99b0835d35d60b6cb71eef4cd974323f4408533..880361d86511d91dfa9db3961e06be055e732719 100644 --- a/paddle/phi/kernels/cpu/broadcast_kernel.cc +++ b/paddle/phi/kernels/cpu/broadcast_kernel.cc @@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx, const DenseTensor& x, int root, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be broadcast must not empyt.")); + #if defined(PADDLE_WITH_GLOO) dev_ctx.template Alloc(out); auto comm_context = diff --git a/paddle/phi/kernels/cpu/dot_kernel.cc b/paddle/phi/kernels/cpu/dot_kernel.cc index 5fc3d299a6b41aba86a7b35726ab6005384d2752..18d8d86028da9ce9a849c299ca1aba5531f6a45d 100644 --- a/paddle/phi/kernels/cpu/dot_kernel.cc +++ b/paddle/phi/kernels/cpu/dot_kernel.cc @@ -27,6 +27,9 @@ void DotKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { + if (out->numel() <= 0) { + return; + } auto const *x_ptr = x.data(), *x_ptr_ = &x_ptr[0]; auto const *y_ptr = y.data(), *y_ptr_ = &y_ptr[0]; T* z = dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/cpu/eig_kernel.cc b/paddle/phi/kernels/cpu/eig_kernel.cc index 3c68c303fa67a284646d316ff2fcc36a294f1f4f..0ff953c594fb2e7e2ec0183a4325e77eae853cd8 100644 --- a/paddle/phi/kernels/cpu/eig_kernel.cc +++ b/paddle/phi/kernels/cpu/eig_kernel.cc @@ -24,6 +24,10 @@ void EigKernel(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out_w, DenseTensor* out_v) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + errors::InvalidArgument("EigKernel input tensor is empty.")); if (!IsComplexType(x.dtype())) { dev_ctx.template Alloc>(out_w); dev_ctx.template Alloc>(out_v); diff --git a/paddle/phi/kernels/cpu/reduce_kernel.cc b/paddle/phi/kernels/cpu/reduce_kernel.cc index a368e85bff967213ed076145c04c40ceb6ebb5a4..d4650733f49830e5372d4da266981cad6293f39f 100644 --- a/paddle/phi/kernels/cpu/reduce_kernel.cc +++ b/paddle/phi/kernels/cpu/reduce_kernel.cc @@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx, int root, int reduce_type, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); #if defined(PADDLE_WITH_GLOO) out->Resize(x.dims()); dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/cpu/top_k_kernel.cc b/paddle/phi/kernels/cpu/top_k_kernel.cc index 1394cf62d119127d8ff5bd1cf88598ceb9407a95..8ba69f31adbe228ae3a7c2e9e919dde81aefff58 100644 --- a/paddle/phi/kernels/cpu/top_k_kernel.cc +++ b/paddle/phi/kernels/cpu/top_k_kernel.cc @@ -153,6 +153,12 @@ void TopkKernel(const Context& dev_ctx, } int k = k_scalar.to(); + PADDLE_ENFORCE_GE( + x.numel(), + k, + errors::InvalidArgument( + "x has only %d element, can not find %d top values.", x.numel(), k)); + if (k_scalar.FromTensor()) { auto out_dims = out->dims(); // accroding to axis to set K value in the dim diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index e88dbf0f7ccdb39f8396970bf2d1eddff8be269f..842ad48160890ef112568eea3084063b34ea573b 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -122,7 +122,6 @@ struct cpu_gather_scatter_functor { self_idx = is_scatter_like ? replace_index : index_idx; src_idx = is_scatter_like ? index_idx : replace_index; - reduce_op((tensor_t*)(self_data + self_idx), // NOLINT (tensor_t*)(src_data + src_idx)); // NOLINT index_idx++; diff --git a/paddle/phi/kernels/funcs/reduce_function.h b/paddle/phi/kernels/funcs/reduce_function.h index 5e738d431dfa6007320342082b8bf8e90ac7a848..cb51ba9caf11010e5d3cbd49af23b1595937fb8d 100644 --- a/paddle/phi/kernels/funcs/reduce_function.h +++ b/paddle/phi/kernels/funcs/reduce_function.h @@ -988,6 +988,10 @@ void ReduceKernel(const KPDevice& dev_ctx, const TransformOp& transform, const std::vector& origin_reduce_dims, bool is_mean = false) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); #ifdef PADDLE_WITH_XPU_KP auto stream = dev_ctx.x_context()->xpu_stream; #else @@ -1298,6 +1302,11 @@ void ReduceKernelImpl(const Context& dev_ctx, const std::vector& dims, bool keep_dim, bool reduce_all) { + PADDLE_ENFORCE_GT( + input.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); + dev_ctx.template Alloc(output); if (reduce_all) { diff --git a/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h b/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h index 27155d8630526635f36807dd0ba3e0bdd8f9a509..b66bf39b99e98cdbcd7a477c180ce9213600ba78 100644 --- a/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h +++ b/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h @@ -32,6 +32,11 @@ void RepeatsTensor2IndexTensor(const Context& ctx, int64_t index_size = 0; for (int i = 0; i < repeats.dims()[0]; i++) { + PADDLE_ENFORCE_GE(repeats_data[i], + 0, + phi::errors::InvalidArgument( + "repeats must grater or equal than 0, but got %d", + repeats_data[i])); index_size += repeats_data[i]; } std::vector index_vec(index_size); diff --git a/paddle/phi/kernels/gpu/broadcast_kernel.cu b/paddle/phi/kernels/gpu/broadcast_kernel.cu index 324f8c38e36326277a458fa917b03420c483f5d6..c878b5885262ab7e16ee672c4b2edf427afb17be 100644 --- a/paddle/phi/kernels/gpu/broadcast_kernel.cu +++ b/paddle/phi/kernels/gpu/broadcast_kernel.cu @@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx, const DenseTensor& x, int root, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be broadcast must not empyt.")); + #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) dev_ctx.template Alloc(out); gpuStream_t stream = dev_ctx.stream(); diff --git a/paddle/phi/kernels/gpu/dot_kernel.cu b/paddle/phi/kernels/gpu/dot_kernel.cu index 72679b518997f1e977940d8d70aa235e437c4795..224dffd06401c77afd1f98ace543d7323ad8a74d 100644 --- a/paddle/phi/kernels/gpu/dot_kernel.cu +++ b/paddle/phi/kernels/gpu/dot_kernel.cu @@ -31,6 +31,9 @@ void DotKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { + if (out->numel() <= 0) { + return; + } dev_ctx.template Alloc(out); if (out->dims().size() == 0) { auto eigen_out = phi::EigenScalar::From(*out); diff --git a/paddle/phi/kernels/gpu/lerp_kernel.cu b/paddle/phi/kernels/gpu/lerp_kernel.cu index 17964760990cc34cc99d214aaac34277aebd24ba..75f321c8c96d086e1fea25ee2143912f0312049c 100644 --- a/paddle/phi/kernels/gpu/lerp_kernel.cu +++ b/paddle/phi/kernels/gpu/lerp_kernel.cu @@ -51,6 +51,16 @@ void LerpKernel(const Context &ctx, const DenseTensor &y, const DenseTensor &weight, DenseTensor *out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input x must not empyt.")); + + PADDLE_ENFORCE_GT( + y.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input y must not empyt.")); + int rank = out->dims().size(); PADDLE_ENFORCE_GE( rank, diff --git a/paddle/phi/kernels/gpu/reduce_kernel.cu b/paddle/phi/kernels/gpu/reduce_kernel.cu index 87b5e61bda7c8caf8328379c9c9721ef1a2fd80a..ffe721c06b3bc9051334cb6748d1fde3ad4801cb 100644 --- a/paddle/phi/kernels/gpu/reduce_kernel.cu +++ b/paddle/phi/kernels/gpu/reduce_kernel.cu @@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx, int root, int reduce_type, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) out->Resize(x.dims()); dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/gpu/top_k_kernel.cu b/paddle/phi/kernels/gpu/top_k_kernel.cu index bef328ec21a2030077da469aae02eb01b5d446f2..c5ac9f244d9682b01afe47a690eaaa160c64dcb0 100644 --- a/paddle/phi/kernels/gpu/top_k_kernel.cu +++ b/paddle/phi/kernels/gpu/top_k_kernel.cu @@ -77,6 +77,11 @@ void TopkKernel(const Context& dev_ctx, if (axis < 0) axis += in_dims.size(); int k = k_scalar.to(); + PADDLE_ENFORCE_GE( + x.numel(), + k, + errors::InvalidArgument( + "x has only %d element, can not find %d top values.", x.numel(), k)); if (k_scalar.FromTensor()) { phi::DDim out_dims = out->dims(); out_dims[axis] = k; diff --git a/paddle/phi/kernels/impl/lerp_kernel_impl.h b/paddle/phi/kernels/impl/lerp_kernel_impl.h index ad41b4e26367ad2216e35c1ab8290696b09e039f..64af32173fc457b07eb7b2b15fd03402da875377 100644 --- a/paddle/phi/kernels/impl/lerp_kernel_impl.h +++ b/paddle/phi/kernels/impl/lerp_kernel_impl.h @@ -83,6 +83,16 @@ void LerpKernel(const Context& ctx, const DenseTensor& y, const DenseTensor& weight, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input x must not empyt.")); + + PADDLE_ENFORCE_GT( + y.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input y must not empyt.")); + int rank = out->dims().size(); PADDLE_ENFORCE_GE( rank, diff --git a/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h b/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h index b6050810640083bcec81c4d894f094c5335f2150..9ac7ac6072db448ceefd5022cfeccf5a914f5c86 100644 --- a/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h +++ b/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h @@ -58,6 +58,11 @@ void RepeatInterleaveKernel(const Context& ctx, int repeats, int dim, DenseTensor* out) { + PADDLE_ENFORCE_GT(repeats, + 0, + phi::errors::InvalidArgument( + "repeats must grater than 0, but got %d", repeats)); + auto place = ctx.GetPlace(); auto cpu_place = phi::CPUPlace(); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 12b780b8d45c596baddd99d7a79175ad000050b4..785396fd953e282519b10c454956078293fcabb4 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -543,6 +543,8 @@ def unstack(x, axis=0, num=None): raise ValueError( '`axis` must be in the range [-{0}, {0})'.format(x.ndim) ) + if num is not None and (num < 0 or num > x.shape[axis]): + raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})') if in_dynamic_mode(): if num is None: num = x.shape[axis] @@ -4372,7 +4374,6 @@ def repeat_interleave(x, repeats, axis=None, name=None): if axis is None: x = paddle.flatten(x) axis = 0 - if in_dynamic_mode(): if isinstance(repeats, Variable): return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis)