Skip to content

Commit

Permalink
fix security bug (#55782)
Browse files Browse the repository at this point in the history
* fix security bug
  • Loading branch information
wanghuancoder authored Aug 2, 2023
1 parent db700d1 commit 19da5c0
Show file tree
Hide file tree
Showing 18 changed files with 90 additions and 6 deletions.
8 changes: 4 additions & 4 deletions paddle/fluid/pybind/op_function_common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ std::vector<int> CastPyArg2Ints(PyObject* obj,
i));
}
}
} else if (PySequence_Check(obj)) {
} else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj);
value.reserve(len);
PyObject* item = nullptr;
Expand Down Expand Up @@ -488,7 +488,7 @@ std::vector<int64_t> CastPyArg2Longs(PyObject* obj,
i));
}
}
} else if (PySequence_Check(obj)) {
} else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
Expand Down Expand Up @@ -567,7 +567,7 @@ std::vector<float> CastPyArg2Floats(PyObject* obj,
i));
}
}
} else if (PySequence_Check(obj)) {
} else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
Expand Down Expand Up @@ -642,7 +642,7 @@ std::vector<double> CastPyArg2Float64s(PyObject* obj,
i));
}
}
} else if (PySequence_Check(obj)) {
} else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) {
Py_ssize_t len = PySequence_Size(obj);
PyObject* item = nullptr;
for (Py_ssize_t i = 0; i < len; i++) {
Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2663,6 +2663,12 @@ void SearchsortedInferMeta(const MetaTensor& sorted_sequence,
MetaTensor* out) {
auto sequences_dims = sorted_sequence.dims();
auto values_dims = value.dims();
PADDLE_ENFORCE_GE(
sequences_dims.size(),
1,
phi::errors::InvalidArgument(
"Input sequences's dimension(%d) must be greater or equal than 1",
sequences_dims.size()));

bool flag = true;
if (sequences_dims.size() != values_dims.size()) {
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/cpu/broadcast_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx,
const DenseTensor& x,
int root,
DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be broadcast must not empyt."));

#if defined(PADDLE_WITH_GLOO)
dev_ctx.template Alloc<T>(out);
auto comm_context =
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/cpu/dot_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ void DotKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
if (out->numel() <= 0) {
return;
}
auto const *x_ptr = x.data<T>(), *x_ptr_ = &x_ptr[0];
auto const *y_ptr = y.data<T>(), *y_ptr_ = &y_ptr[0];
T* z = dev_ctx.template Alloc<T>(out);
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/eig_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ void EigKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out_w,
DenseTensor* out_v) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
errors::InvalidArgument("EigKernel input tensor is empty."));
if (!IsComplexType(x.dtype())) {
dev_ctx.template Alloc<phi::dtype::Complex<T>>(out_w);
dev_ctx.template Alloc<phi::dtype::Complex<T>>(out_v);
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/reduce_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx,
int root,
int reduce_type,
DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
#if defined(PADDLE_WITH_GLOO)
out->Resize(x.dims());
dev_ctx.template Alloc<T>(out);
Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/kernels/cpu/top_k_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,12 @@ void TopkKernel(const Context& dev_ctx,
}

int k = k_scalar.to<int>();
PADDLE_ENFORCE_GE(
x.numel(),
k,
errors::InvalidArgument(
"x has only %d element, can not find %d top values.", x.numel(), k));

if (k_scalar.FromTensor()) {
auto out_dims = out->dims();
// accroding to axis to set K value in the dim
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/kernels/funcs/gather_scatter_functor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ struct cpu_gather_scatter_functor {

self_idx = is_scatter_like ? replace_index : index_idx;
src_idx = is_scatter_like ? index_idx : replace_index;

reduce_op((tensor_t*)(self_data + self_idx), // NOLINT
(tensor_t*)(src_data + src_idx)); // NOLINT
index_idx++;
Expand Down
9 changes: 9 additions & 0 deletions paddle/phi/kernels/funcs/reduce_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -988,6 +988,10 @@ void ReduceKernel(const KPDevice& dev_ctx,
const TransformOp& transform,
const std::vector<int>& origin_reduce_dims,
bool is_mean = false) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
#ifdef PADDLE_WITH_XPU_KP
auto stream = dev_ctx.x_context()->xpu_stream;
#else
Expand Down Expand Up @@ -1298,6 +1302,11 @@ void ReduceKernelImpl(const Context& dev_ctx,
const std::vector<int64_t>& dims,
bool keep_dim,
bool reduce_all) {
PADDLE_ENFORCE_GT(
input.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));

dev_ctx.template Alloc<OutT>(output);

if (reduce_all) {
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ void RepeatsTensor2IndexTensor(const Context& ctx,

int64_t index_size = 0;
for (int i = 0; i < repeats.dims()[0]; i++) {
PADDLE_ENFORCE_GE(repeats_data[i],
0,
phi::errors::InvalidArgument(
"repeats must grater or equal than 0, but got %d",
repeats_data[i]));
index_size += repeats_data[i];
}
std::vector<RepeatsT> index_vec(index_size);
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/gpu/broadcast_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx,
const DenseTensor& x,
int root,
DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be broadcast must not empyt."));

#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
dev_ctx.template Alloc<T>(out);
gpuStream_t stream = dev_ctx.stream();
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/kernels/gpu/dot_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ void DotKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
if (out->numel() <= 0) {
return;
}
dev_ctx.template Alloc<T>(out);
if (out->dims().size() == 0) {
auto eigen_out = phi::EigenScalar<T>::From(*out);
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/gpu/lerp_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,16 @@ void LerpKernel(const Context &ctx,
const DenseTensor &y,
const DenseTensor &weight,
DenseTensor *out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input x must not empyt."));

PADDLE_ENFORCE_GT(
y.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input y must not empyt."));

int rank = out->dims().size();
PADDLE_ENFORCE_GE(
rank,
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/gpu/reduce_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx,
int root,
int reduce_type,
DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("Tensor need be reduced must not empyt."));
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
out->Resize(x.dims());
dev_ctx.template Alloc<T>(out);
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/gpu/top_k_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ void TopkKernel(const Context& dev_ctx,
if (axis < 0) axis += in_dims.size();

int k = k_scalar.to<int>();
PADDLE_ENFORCE_GE(
x.numel(),
k,
errors::InvalidArgument(
"x has only %d element, can not find %d top values.", x.numel(), k));
if (k_scalar.FromTensor()) {
phi::DDim out_dims = out->dims();
out_dims[axis] = k;
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/impl/lerp_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,16 @@ void LerpKernel(const Context& ctx,
const DenseTensor& y,
const DenseTensor& weight,
DenseTensor* out) {
PADDLE_ENFORCE_GT(
x.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input x must not empyt."));

PADDLE_ENFORCE_GT(
y.numel(),
0,
phi::errors::InvalidArgument("LerpKernel's input y must not empyt."));

int rank = out->dims().size();
PADDLE_ENFORCE_GE(
rank,
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ void RepeatInterleaveKernel(const Context& ctx,
int repeats,
int dim,
DenseTensor* out) {
PADDLE_ENFORCE_GT(repeats,
0,
phi::errors::InvalidArgument(
"repeats must grater than 0, but got %d", repeats));

auto place = ctx.GetPlace();
auto cpu_place = phi::CPUPlace();

Expand Down
3 changes: 2 additions & 1 deletion python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,8 @@ def unstack(x, axis=0, num=None):
raise ValueError(
'`axis` must be in the range [-{0}, {0})'.format(x.ndim)
)
if num is not None and (num < 0 or num > x.shape[axis]):
raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})')
if in_dynamic_mode():
if num is None:
num = x.shape[axis]
Expand Down Expand Up @@ -4372,7 +4374,6 @@ def repeat_interleave(x, repeats, axis=None, name=None):
if axis is None:
x = paddle.flatten(x)
axis = 0

if in_dynamic_mode():
if isinstance(repeats, Variable):
return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis)
Expand Down

0 comments on commit 19da5c0

Please sign in to comment.