未验证 提交 19da5c0c 编写于 作者: W wanghuancoder 提交者: GitHub

fix security bug (#55782)

* fix security bug
上级 db700d10
...@@ -412,7 +412,7 @@ std::vector<int> CastPyArg2Ints(PyObject* obj, ...@@ -412,7 +412,7 @@ std::vector<int> CastPyArg2Ints(PyObject* obj,
i)); i));
} }
} }
} else if (PySequence_Check(obj)) { } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj); Py_ssize_t len = PySequence_Size(obj);
value.reserve(len); value.reserve(len);
PyObject* item = nullptr; PyObject* item = nullptr;
...@@ -488,7 +488,7 @@ std::vector<int64_t> CastPyArg2Longs(PyObject* obj, ...@@ -488,7 +488,7 @@ std::vector<int64_t> CastPyArg2Longs(PyObject* obj,
i)); i));
} }
} }
} else if (PySequence_Check(obj)) { } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj); Py_ssize_t len = PySequence_Size(obj);
PyObject* item = nullptr; PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) { for (Py_ssize_t i = 0; i < len; i++) {
...@@ -567,7 +567,7 @@ std::vector<float> CastPyArg2Floats(PyObject* obj, ...@@ -567,7 +567,7 @@ std::vector<float> CastPyArg2Floats(PyObject* obj,
i)); i));
} }
} }
} else if (PySequence_Check(obj)) { } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj); Py_ssize_t len = PySequence_Size(obj);
PyObject* item = nullptr; PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) { for (Py_ssize_t i = 0; i < len; i++) {
...@@ -642,7 +642,7 @@ std::vector<double> CastPyArg2Float64s(PyObject* obj, ...@@ -642,7 +642,7 @@ std::vector<double> CastPyArg2Float64s(PyObject* obj,
i)); i));
} }
} }
} else if (PySequence_Check(obj)) { } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj); Py_ssize_t len = PySequence_Size(obj);
PyObject* item = nullptr; PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) { for (Py_ssize_t i = 0; i < len; i++) {
......
...@@ -2663,6 +2663,12 @@ void SearchsortedInferMeta(const MetaTensor& sorted_sequence, ...@@ -2663,6 +2663,12 @@ void SearchsortedInferMeta(const MetaTensor& sorted_sequence,
MetaTensor* out) { MetaTensor* out) {
auto sequences_dims = sorted_sequence.dims(); auto sequences_dims = sorted_sequence.dims();
auto values_dims = value.dims(); auto values_dims = value.dims();
PADDLE_ENFORCE_GE(
sequences_dims.size(),
1,
phi::errors::InvalidArgument(
"Input sequences's dimension(%d) must be greater or equal than 1",
sequences_dims.size()));
bool flag = true; bool flag = true;
if (sequences_dims.size() != values_dims.size()) { if (sequences_dims.size() != values_dims.size()) {
......
...@@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx, ...@@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
int root, int root,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be broadcast must not empyt."));
#if defined(PADDLE_WITH_GLOO) #if defined(PADDLE_WITH_GLOO)
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
auto comm_context = auto comm_context =
......
...@@ -27,6 +27,9 @@ void DotKernel(const Context& dev_ctx, ...@@ -27,6 +27,9 @@ void DotKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& y, const DenseTensor& y,
DenseTensor* out) { DenseTensor* out) {
if (out->numel() <= 0) {
return;
}
auto const *x_ptr = x.data<T>(), *x_ptr_ = &x_ptr[0]; auto const *x_ptr = x.data<T>(), *x_ptr_ = &x_ptr[0];
auto const *y_ptr = y.data<T>(), *y_ptr_ = &y_ptr[0]; auto const *y_ptr = y.data<T>(), *y_ptr_ = &y_ptr[0];
T* z = dev_ctx.template Alloc<T>(out); T* z = dev_ctx.template Alloc<T>(out);
......
...@@ -24,6 +24,10 @@ void EigKernel(const Context& dev_ctx, ...@@ -24,6 +24,10 @@ void EigKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
DenseTensor* out_w, DenseTensor* out_w,
DenseTensor* out_v) { DenseTensor* out_v) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
errors::InvalidArgument("EigKernel input tensor is empty."));
if (!IsComplexType(x.dtype())) { if (!IsComplexType(x.dtype())) {
dev_ctx.template Alloc<phi::dtype::Complex<T>>(out_w); dev_ctx.template Alloc<phi::dtype::Complex<T>>(out_w);
dev_ctx.template Alloc<phi::dtype::Complex<T>>(out_v); dev_ctx.template Alloc<phi::dtype::Complex<T>>(out_v);
......
...@@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx, ...@@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx,
int root, int root,
int reduce_type, int reduce_type,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
#if defined(PADDLE_WITH_GLOO) #if defined(PADDLE_WITH_GLOO)
out->Resize(x.dims()); out->Resize(x.dims());
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
......
...@@ -153,6 +153,12 @@ void TopkKernel(const Context& dev_ctx, ...@@ -153,6 +153,12 @@ void TopkKernel(const Context& dev_ctx,
} }
int k = k_scalar.to<int>(); int k = k_scalar.to<int>();
PADDLE_ENFORCE_GE(
x.numel(),
k,
errors::InvalidArgument(
"x has only %d element, can not find %d top values.", x.numel(), k));
if (k_scalar.FromTensor()) { if (k_scalar.FromTensor()) {
auto out_dims = out->dims(); auto out_dims = out->dims();
// accroding to axis to set K value in the dim // accroding to axis to set K value in the dim
......
...@@ -122,7 +122,6 @@ struct cpu_gather_scatter_functor { ...@@ -122,7 +122,6 @@ struct cpu_gather_scatter_functor {
self_idx = is_scatter_like ? replace_index : index_idx; self_idx = is_scatter_like ? replace_index : index_idx;
src_idx = is_scatter_like ? index_idx : replace_index; src_idx = is_scatter_like ? index_idx : replace_index;
reduce_op((tensor_t*)(self_data + self_idx), // NOLINT reduce_op((tensor_t*)(self_data + self_idx), // NOLINT
(tensor_t*)(src_data + src_idx)); // NOLINT (tensor_t*)(src_data + src_idx)); // NOLINT
index_idx++; index_idx++;
......
...@@ -988,6 +988,10 @@ void ReduceKernel(const KPDevice& dev_ctx, ...@@ -988,6 +988,10 @@ void ReduceKernel(const KPDevice& dev_ctx,
const TransformOp& transform, const TransformOp& transform,
const std::vector<int>& origin_reduce_dims, const std::vector<int>& origin_reduce_dims,
bool is_mean = false) { bool is_mean = false) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
#ifdef PADDLE_WITH_XPU_KP #ifdef PADDLE_WITH_XPU_KP
auto stream = dev_ctx.x_context()->xpu_stream; auto stream = dev_ctx.x_context()->xpu_stream;
#else #else
...@@ -1298,6 +1302,11 @@ void ReduceKernelImpl(const Context& dev_ctx, ...@@ -1298,6 +1302,11 @@ void ReduceKernelImpl(const Context& dev_ctx,
const std::vector<int64_t>& dims, const std::vector<int64_t>& dims,
bool keep_dim, bool keep_dim,
bool reduce_all) { bool reduce_all) {
PADDLE_ENFORCE_GT(
input.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
dev_ctx.template Alloc<OutT>(output); dev_ctx.template Alloc<OutT>(output);
if (reduce_all) { if (reduce_all) {
......
...@@ -32,6 +32,11 @@ void RepeatsTensor2IndexTensor(const Context& ctx, ...@@ -32,6 +32,11 @@ void RepeatsTensor2IndexTensor(const Context& ctx,
int64_t index_size = 0; int64_t index_size = 0;
for (int i = 0; i < repeats.dims()[0]; i++) { for (int i = 0; i < repeats.dims()[0]; i++) {
PADDLE_ENFORCE_GE(repeats_data[i],
0,
phi::errors::InvalidArgument(
"repeats must grater or equal than 0, but got %d",
repeats_data[i]));
index_size += repeats_data[i]; index_size += repeats_data[i];
} }
std::vector<RepeatsT> index_vec(index_size); std::vector<RepeatsT> index_vec(index_size);
......
...@@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx, ...@@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
int root, int root,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be broadcast must not empyt."));
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
gpuStream_t stream = dev_ctx.stream(); gpuStream_t stream = dev_ctx.stream();
......
...@@ -31,6 +31,9 @@ void DotKernel(const Context& dev_ctx, ...@@ -31,6 +31,9 @@ void DotKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& y, const DenseTensor& y,
DenseTensor* out) { DenseTensor* out) {
if (out->numel() <= 0) {
return;
}
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
if (out->dims().size() == 0) { if (out->dims().size() == 0) {
auto eigen_out = phi::EigenScalar<T>::From(*out); auto eigen_out = phi::EigenScalar<T>::From(*out);
......
...@@ -51,6 +51,16 @@ void LerpKernel(const Context &ctx, ...@@ -51,6 +51,16 @@ void LerpKernel(const Context &ctx,
const DenseTensor &y, const DenseTensor &y,
const DenseTensor &weight, const DenseTensor &weight,
DenseTensor *out) { DenseTensor *out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input x must not empyt."));
PADDLE_ENFORCE_GT(
y.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input y must not empyt."));
int rank = out->dims().size(); int rank = out->dims().size();
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
rank, rank,
......
...@@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx, ...@@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx,
int root, int root,
int reduce_type, int reduce_type,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
out->Resize(x.dims()); out->Resize(x.dims());
dev_ctx.template Alloc<T>(out); dev_ctx.template Alloc<T>(out);
......
...@@ -77,6 +77,11 @@ void TopkKernel(const Context& dev_ctx, ...@@ -77,6 +77,11 @@ void TopkKernel(const Context& dev_ctx,
if (axis < 0) axis += in_dims.size(); if (axis < 0) axis += in_dims.size();
int k = k_scalar.to<int>(); int k = k_scalar.to<int>();
PADDLE_ENFORCE_GE(
x.numel(),
k,
errors::InvalidArgument(
"x has only %d element, can not find %d top values.", x.numel(), k));
if (k_scalar.FromTensor()) { if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims(); phi::DDim out_dims = out->dims();
out_dims[axis] = k; out_dims[axis] = k;
......
...@@ -83,6 +83,16 @@ void LerpKernel(const Context& ctx, ...@@ -83,6 +83,16 @@ void LerpKernel(const Context& ctx,
const DenseTensor& y, const DenseTensor& y,
const DenseTensor& weight, const DenseTensor& weight,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input x must not empyt."));
PADDLE_ENFORCE_GT(
y.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input y must not empyt."));
int rank = out->dims().size(); int rank = out->dims().size();
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
rank, rank,
......
...@@ -58,6 +58,11 @@ void RepeatInterleaveKernel(const Context& ctx, ...@@ -58,6 +58,11 @@ void RepeatInterleaveKernel(const Context& ctx,
int repeats, int repeats,
int dim, int dim,
DenseTensor* out) { DenseTensor* out) {
PADDLE_ENFORCE_GT(repeats,
0,
phi::errors::InvalidArgument(
"repeats must grater than 0, but got %d", repeats));
auto place = ctx.GetPlace(); auto place = ctx.GetPlace();
auto cpu_place = phi::CPUPlace(); auto cpu_place = phi::CPUPlace();
......
...@@ -543,6 +543,8 @@ def unstack(x, axis=0, num=None): ...@@ -543,6 +543,8 @@ def unstack(x, axis=0, num=None):
raise ValueError( raise ValueError(
'`axis` must be in the range [-{0}, {0})'.format(x.ndim) '`axis` must be in the range [-{0}, {0})'.format(x.ndim)
) )
if num is not None and (num < 0 or num > x.shape[axis]):
raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})')
if in_dynamic_mode(): if in_dynamic_mode():
if num is None: if num is None:
num = x.shape[axis] num = x.shape[axis]
...@@ -4372,7 +4374,6 @@ def repeat_interleave(x, repeats, axis=None, name=None): ...@@ -4372,7 +4374,6 @@ def repeat_interleave(x, repeats, axis=None, name=None):
if axis is None: if axis is None:
x = paddle.flatten(x) x = paddle.flatten(x)
axis = 0 axis = 0
if in_dynamic_mode(): if in_dynamic_mode():
if isinstance(repeats, Variable): if isinstance(repeats, Variable):
return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis) return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册