From 0339b6b89f0ba5026f3492aa815fcefc40221869 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Fri, 28 Jul 2023 07:28:17 +0000 Subject: [PATCH 1/8] fix security bug --- paddle/fluid/pybind/op_function_common.cc | 8 ++++---- paddle/phi/infermeta/binary.cc | 14 ++++++++++++++ paddle/phi/infermeta/unary.cc | 12 +++++++++++- paddle/phi/kernels/cpu/dot_kernel.cc | 3 +++ paddle/phi/kernels/cpu/eig_kernel.cc | 4 ++++ paddle/phi/kernels/cpu/top_k_kernel.cc | 6 ++++++ paddle/phi/kernels/funcs/gather_scatter_functor.cc | 7 ++++++- paddle/phi/kernels/funcs/reduce_function.h | 5 +++++ paddle/phi/kernels/gpu/dot_kernel.cu | 3 +++ paddle/phi/kernels/gpu/top_k_kernel.cu | 5 +++++ paddle/phi/kernels/impl/nextafter_kernel_impl.h | 8 ++++++++ 11 files changed, 69 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index b4b96c02bb2271..cfbf5cfac6b403 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -416,7 +416,7 @@ std::vector CastPyArg2Ints(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); value.reserve(len); PyObject* item = nullptr; @@ -492,7 +492,7 @@ std::vector CastPyArg2Longs(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { @@ -571,7 +571,7 @@ std::vector CastPyArg2Floats(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { @@ -646,7 +646,7 @@ std::vector CastPyArg2Float64s(PyObject* obj, i)); } } - } else if (PySequence_Check(obj)) { + } else if (PySequence_Check(obj) && !PyObject_TypeCheck(obj, p_tensor_type)) { Py_ssize_t len = PySequence_Size(obj); PyObject* item = nullptr; for (Py_ssize_t i = 0; i < len; i++) { diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 719a5f2f130af7..a7452b5566a35c 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -2636,6 +2636,20 @@ void SearchsortedInferMeta(const MetaTensor& sorted_sequence, MetaTensor* out) { auto sequences_dims = sorted_sequence.dims(); auto values_dims = value.dims(); + PADDLE_ENFORCE_GE( + sequences_dims.size(), + 1, + phi::errors::InvalidArgument( + "Input sequences's dimension(%d) must be greater or equal than 1", + sequences_dims.size())); + + PADDLE_ENFORCE_GE(values_dims.size(), + sequences_dims.size(), + phi::errors::InvalidArgument( + "Input values's dimension(%d) must be greater or equal " + "than sequences's dimension(%s)", + values_dims.size(), + sequences_dims.size())); bool flag = true; if (sequences_dims.size() != values_dims.size()) { diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 0d2a7ca8d26c0e..94aee15b964cc3 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2271,7 +2271,17 @@ void ModeInferMeta(const MetaTensor& x, input_dims.size(), 0, errors::InvalidArgument("input of ModeOp must have >= 0d shape")); - if (axis < 0) axis += dim_size; + if (axis < 0) { + axis += dim_size; + PADDLE_ENFORCE_GE(axis, + 0, + phi::errors::InvalidArgument( + "the axis must be [-%d, %d), but received %d .", + dim_size, + dim_size, + axis - dim_size)); + } + std::vector dimvec; for (int64_t i = 0; i < axis; i++) { dimvec.emplace_back(input_dims[i]); diff --git a/paddle/phi/kernels/cpu/dot_kernel.cc b/paddle/phi/kernels/cpu/dot_kernel.cc index 5fc3d299a6b41a..18d8d86028da9c 100644 --- a/paddle/phi/kernels/cpu/dot_kernel.cc +++ b/paddle/phi/kernels/cpu/dot_kernel.cc @@ -27,6 +27,9 @@ void DotKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { + if (out->numel() <= 0) { + return; + } auto const *x_ptr = x.data(), *x_ptr_ = &x_ptr[0]; auto const *y_ptr = y.data(), *y_ptr_ = &y_ptr[0]; T* z = dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/cpu/eig_kernel.cc b/paddle/phi/kernels/cpu/eig_kernel.cc index 3c68c303fa67a2..0ff953c594fb2e 100644 --- a/paddle/phi/kernels/cpu/eig_kernel.cc +++ b/paddle/phi/kernels/cpu/eig_kernel.cc @@ -24,6 +24,10 @@ void EigKernel(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out_w, DenseTensor* out_v) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + errors::InvalidArgument("EigKernel input tensor is empty.")); if (!IsComplexType(x.dtype())) { dev_ctx.template Alloc>(out_w); dev_ctx.template Alloc>(out_v); diff --git a/paddle/phi/kernels/cpu/top_k_kernel.cc b/paddle/phi/kernels/cpu/top_k_kernel.cc index 1394cf62d11912..8ba69f31adbe22 100644 --- a/paddle/phi/kernels/cpu/top_k_kernel.cc +++ b/paddle/phi/kernels/cpu/top_k_kernel.cc @@ -153,6 +153,12 @@ void TopkKernel(const Context& dev_ctx, } int k = k_scalar.to(); + PADDLE_ENFORCE_GE( + x.numel(), + k, + errors::InvalidArgument( + "x has only %d element, can not find %d top values.", x.numel(), k)); + if (k_scalar.FromTensor()) { auto out_dims = out->dims(); // accroding to axis to set K value in the dim diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index e88dbf0f7ccdb3..f8d371912ae77d 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -122,7 +122,12 @@ struct cpu_gather_scatter_functor { self_idx = is_scatter_like ? replace_index : index_idx; src_idx = is_scatter_like ? index_idx : replace_index; - + PADDLE_ENFORCE( + (self_idx > 0 && self_idx < self_size), + errors::InvalidArgument("Wrong gather index for output.")); + PADDLE_ENFORCE( + (src_idx > 0 && src_idx < src_size), + errors::InvalidArgument("Wrong gather index for input.")); reduce_op((tensor_t*)(self_data + self_idx), // NOLINT (tensor_t*)(src_data + src_idx)); // NOLINT index_idx++; diff --git a/paddle/phi/kernels/funcs/reduce_function.h b/paddle/phi/kernels/funcs/reduce_function.h index 5e738d431dfa60..ac7145108a8388 100644 --- a/paddle/phi/kernels/funcs/reduce_function.h +++ b/paddle/phi/kernels/funcs/reduce_function.h @@ -1298,6 +1298,11 @@ void ReduceKernelImpl(const Context& dev_ctx, const std::vector& dims, bool keep_dim, bool reduce_all) { + PADDLE_ENFORCE_GT( + input.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); + dev_ctx.template Alloc(output); if (reduce_all) { diff --git a/paddle/phi/kernels/gpu/dot_kernel.cu b/paddle/phi/kernels/gpu/dot_kernel.cu index 72679b518997f1..224dffd06401c7 100644 --- a/paddle/phi/kernels/gpu/dot_kernel.cu +++ b/paddle/phi/kernels/gpu/dot_kernel.cu @@ -31,6 +31,9 @@ void DotKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { + if (out->numel() <= 0) { + return; + } dev_ctx.template Alloc(out); if (out->dims().size() == 0) { auto eigen_out = phi::EigenScalar::From(*out); diff --git a/paddle/phi/kernels/gpu/top_k_kernel.cu b/paddle/phi/kernels/gpu/top_k_kernel.cu index bef328ec21a203..c5ac9f244d9682 100644 --- a/paddle/phi/kernels/gpu/top_k_kernel.cu +++ b/paddle/phi/kernels/gpu/top_k_kernel.cu @@ -77,6 +77,11 @@ void TopkKernel(const Context& dev_ctx, if (axis < 0) axis += in_dims.size(); int k = k_scalar.to(); + PADDLE_ENFORCE_GE( + x.numel(), + k, + errors::InvalidArgument( + "x has only %d element, can not find %d top values.", x.numel(), k)); if (k_scalar.FromTensor()) { phi::DDim out_dims = out->dims(); out_dims[axis] = k; diff --git a/paddle/phi/kernels/impl/nextafter_kernel_impl.h b/paddle/phi/kernels/impl/nextafter_kernel_impl.h index 6d54009282528e..01f92457e1a546 100644 --- a/paddle/phi/kernels/impl/nextafter_kernel_impl.h +++ b/paddle/phi/kernels/impl/nextafter_kernel_impl.h @@ -76,6 +76,14 @@ void NextafterKernel(const Context& ctx, auto y_data = y.data(); auto x_numel = x.numel(); + PADDLE_ENFORCE_EQ( + x.dims(), + y.dims(), + errors::InvalidArgument( + "x and y must have same shape, but x.shape = %s, y.shape = %s.", + x.dims(), + y.dims())); + phi::funcs::ForRange for_range(ctx, x_numel); phi::NextafterFunctor functor(x_data, y_data, out_data, x_numel); for_range(functor); From 013e5963d4712a99b58c0a496f8c25806e8e0f80 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Fri, 28 Jul 2023 08:52:31 +0000 Subject: [PATCH 2/8] refine --- paddle/phi/kernels/cpu/reduce_kernel.cc | 4 ++++ paddle/phi/kernels/funcs/reduce_function.h | 4 ++++ paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h | 5 +++++ paddle/phi/kernels/gpu/reduce_kernel.cu | 4 ++++ paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h | 5 +++++ python/paddle/tensor/manipulation.py | 3 ++- 6 files changed, 24 insertions(+), 1 deletion(-) diff --git a/paddle/phi/kernels/cpu/reduce_kernel.cc b/paddle/phi/kernels/cpu/reduce_kernel.cc index a368e85bff9672..d4650733f49830 100644 --- a/paddle/phi/kernels/cpu/reduce_kernel.cc +++ b/paddle/phi/kernels/cpu/reduce_kernel.cc @@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx, int root, int reduce_type, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); #if defined(PADDLE_WITH_GLOO) out->Resize(x.dims()); dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/funcs/reduce_function.h b/paddle/phi/kernels/funcs/reduce_function.h index ac7145108a8388..cb51ba9caf1101 100644 --- a/paddle/phi/kernels/funcs/reduce_function.h +++ b/paddle/phi/kernels/funcs/reduce_function.h @@ -988,6 +988,10 @@ void ReduceKernel(const KPDevice& dev_ctx, const TransformOp& transform, const std::vector& origin_reduce_dims, bool is_mean = false) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); #ifdef PADDLE_WITH_XPU_KP auto stream = dev_ctx.x_context()->xpu_stream; #else diff --git a/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h b/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h index 27155d86305266..16635fa05d7aa0 100644 --- a/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h +++ b/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h @@ -32,6 +32,11 @@ void RepeatsTensor2IndexTensor(const Context& ctx, int64_t index_size = 0; for (int i = 0; i < repeats.dims()[0]; i++) { + PADDLE_ENFORCE_GT( + repeats_data[i], + 0, + phi::errors::InvalidArgument("repeats must grater than 0, but got %d", + repeats_data[i])); index_size += repeats_data[i]; } std::vector index_vec(index_size); diff --git a/paddle/phi/kernels/gpu/reduce_kernel.cu b/paddle/phi/kernels/gpu/reduce_kernel.cu index 87b5e61bda7c8c..ffe721c06b3bc9 100644 --- a/paddle/phi/kernels/gpu/reduce_kernel.cu +++ b/paddle/phi/kernels/gpu/reduce_kernel.cu @@ -29,6 +29,10 @@ void ReduceKernel(const Context& dev_ctx, int root, int reduce_type, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be reduced must not empyt.")); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) out->Resize(x.dims()); dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h b/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h index b6050810640083..9ac7ac6072db44 100644 --- a/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h +++ b/paddle/phi/kernels/impl/repeat_interleave_kernel_impl.h @@ -58,6 +58,11 @@ void RepeatInterleaveKernel(const Context& ctx, int repeats, int dim, DenseTensor* out) { + PADDLE_ENFORCE_GT(repeats, + 0, + phi::errors::InvalidArgument( + "repeats must grater than 0, but got %d", repeats)); + auto place = ctx.GetPlace(); auto cpu_place = phi::CPUPlace(); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index d71f1b09a2f9da..1610d199da0be1 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -545,6 +545,8 @@ def unstack(x, axis=0, num=None): raise ValueError( '`axis` must be in the range [-{0}, {0})'.format(x.ndim) ) + if num < 0 or num > x.shape[axis]: + raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})') if in_dynamic_mode(): if num is None: num = x.shape[axis] @@ -4374,7 +4376,6 @@ def repeat_interleave(x, repeats, axis=None, name=None): if axis is None: x = paddle.flatten(x) axis = 0 - if in_dynamic_mode(): if isinstance(repeats, Variable): return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis) From 2d55c4254a9ae98ca4a5409791b447f1420b638b Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Fri, 28 Jul 2023 09:23:30 +0000 Subject: [PATCH 3/8] refine --- paddle/phi/kernels/cpu/broadcast_kernel.cc | 5 +++++ paddle/phi/kernels/gpu/broadcast_kernel.cu | 5 +++++ paddle/phi/kernels/gpu/lerp_kernel.cu | 10 ++++++++++ paddle/phi/kernels/impl/lerp_kernel_impl.h | 10 ++++++++++ 4 files changed, 30 insertions(+) diff --git a/paddle/phi/kernels/cpu/broadcast_kernel.cc b/paddle/phi/kernels/cpu/broadcast_kernel.cc index a99b0835d35d60..880361d86511d9 100644 --- a/paddle/phi/kernels/cpu/broadcast_kernel.cc +++ b/paddle/phi/kernels/cpu/broadcast_kernel.cc @@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx, const DenseTensor& x, int root, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be broadcast must not empyt.")); + #if defined(PADDLE_WITH_GLOO) dev_ctx.template Alloc(out); auto comm_context = diff --git a/paddle/phi/kernels/gpu/broadcast_kernel.cu b/paddle/phi/kernels/gpu/broadcast_kernel.cu index 324f8c38e36326..c878b5885262ab 100644 --- a/paddle/phi/kernels/gpu/broadcast_kernel.cu +++ b/paddle/phi/kernels/gpu/broadcast_kernel.cu @@ -28,6 +28,11 @@ void BroadcastKernel(const Context& dev_ctx, const DenseTensor& x, int root, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("Tensor need be broadcast must not empyt.")); + #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) dev_ctx.template Alloc(out); gpuStream_t stream = dev_ctx.stream(); diff --git a/paddle/phi/kernels/gpu/lerp_kernel.cu b/paddle/phi/kernels/gpu/lerp_kernel.cu index 17964760990cc3..75f321c8c96d08 100644 --- a/paddle/phi/kernels/gpu/lerp_kernel.cu +++ b/paddle/phi/kernels/gpu/lerp_kernel.cu @@ -51,6 +51,16 @@ void LerpKernel(const Context &ctx, const DenseTensor &y, const DenseTensor &weight, DenseTensor *out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input x must not empyt.")); + + PADDLE_ENFORCE_GT( + y.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input y must not empyt.")); + int rank = out->dims().size(); PADDLE_ENFORCE_GE( rank, diff --git a/paddle/phi/kernels/impl/lerp_kernel_impl.h b/paddle/phi/kernels/impl/lerp_kernel_impl.h index ad41b4e26367ad..64af32173fc457 100644 --- a/paddle/phi/kernels/impl/lerp_kernel_impl.h +++ b/paddle/phi/kernels/impl/lerp_kernel_impl.h @@ -83,6 +83,16 @@ void LerpKernel(const Context& ctx, const DenseTensor& y, const DenseTensor& weight, DenseTensor* out) { + PADDLE_ENFORCE_GT( + x.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input x must not empyt.")); + + PADDLE_ENFORCE_GT( + y.numel(), + 0, + phi::errors::InvalidArgument("LerpKernel's input y must not empyt.")); + int rank = out->dims().size(); PADDLE_ENFORCE_GE( rank, From c2740b35f3671554707c1e3af3ffe9de38264449 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 31 Jul 2023 03:05:59 +0000 Subject: [PATCH 4/8] refine --- paddle/phi/kernels/funcs/gather_scatter_functor.cc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index f8d371912ae77d..842ad48160890e 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -122,12 +122,6 @@ struct cpu_gather_scatter_functor { self_idx = is_scatter_like ? replace_index : index_idx; src_idx = is_scatter_like ? index_idx : replace_index; - PADDLE_ENFORCE( - (self_idx > 0 && self_idx < self_size), - errors::InvalidArgument("Wrong gather index for output.")); - PADDLE_ENFORCE( - (src_idx > 0 && src_idx < src_size), - errors::InvalidArgument("Wrong gather index for input.")); reduce_op((tensor_t*)(self_data + self_idx), // NOLINT (tensor_t*)(src_data + src_idx)); // NOLINT index_idx++; From 5da013dfb08c32e5ee2b2444d74c2906db2433bf Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 31 Jul 2023 06:25:39 +0000 Subject: [PATCH 5/8] refine --- paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h b/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h index 16635fa05d7aa0..b66bf39b99e98c 100644 --- a/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h +++ b/paddle/phi/kernels/funcs/repeat_tensor2index_tensor.h @@ -32,11 +32,11 @@ void RepeatsTensor2IndexTensor(const Context& ctx, int64_t index_size = 0; for (int i = 0; i < repeats.dims()[0]; i++) { - PADDLE_ENFORCE_GT( - repeats_data[i], - 0, - phi::errors::InvalidArgument("repeats must grater than 0, but got %d", - repeats_data[i])); + PADDLE_ENFORCE_GE(repeats_data[i], + 0, + phi::errors::InvalidArgument( + "repeats must grater or equal than 0, but got %d", + repeats_data[i])); index_size += repeats_data[i]; } std::vector index_vec(index_size); From 2f0379472bdb7fa89c526fe2cd52c211f485b79a Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 31 Jul 2023 07:47:36 +0000 Subject: [PATCH 6/8] refine --- paddle/phi/infermeta/binary.cc | 8 -------- paddle/phi/kernels/impl/nextafter_kernel_impl.h | 8 -------- python/paddle/tensor/manipulation.py | 2 +- 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index a7452b5566a35c..17ba912564254b 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -2643,14 +2643,6 @@ void SearchsortedInferMeta(const MetaTensor& sorted_sequence, "Input sequences's dimension(%d) must be greater or equal than 1", sequences_dims.size())); - PADDLE_ENFORCE_GE(values_dims.size(), - sequences_dims.size(), - phi::errors::InvalidArgument( - "Input values's dimension(%d) must be greater or equal " - "than sequences's dimension(%s)", - values_dims.size(), - sequences_dims.size())); - bool flag = true; if (sequences_dims.size() != values_dims.size()) { flag = false; diff --git a/paddle/phi/kernels/impl/nextafter_kernel_impl.h b/paddle/phi/kernels/impl/nextafter_kernel_impl.h index 01f92457e1a546..6d54009282528e 100644 --- a/paddle/phi/kernels/impl/nextafter_kernel_impl.h +++ b/paddle/phi/kernels/impl/nextafter_kernel_impl.h @@ -76,14 +76,6 @@ void NextafterKernel(const Context& ctx, auto y_data = y.data(); auto x_numel = x.numel(); - PADDLE_ENFORCE_EQ( - x.dims(), - y.dims(), - errors::InvalidArgument( - "x and y must have same shape, but x.shape = %s, y.shape = %s.", - x.dims(), - y.dims())); - phi::funcs::ForRange for_range(ctx, x_numel); phi::NextafterFunctor functor(x_data, y_data, out_data, x_numel); for_range(functor); diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 1610d199da0be1..73bad53c06e5db 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -545,7 +545,7 @@ def unstack(x, axis=0, num=None): raise ValueError( '`axis` must be in the range [-{0}, {0})'.format(x.ndim) ) - if num < 0 or num > x.shape[axis]: + if num is not None and (num < 0 or num > x.shape[axis]): raise ValueError(f'`num` must be in the range [0, {x.shape[axis]})') if in_dynamic_mode(): if num is None: From 6cb29418f592eb01265e672c3a8fb297b95d214c Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Mon, 31 Jul 2023 08:57:02 +0000 Subject: [PATCH 7/8] refine --- paddle/phi/infermeta/unary.cc | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 94aee15b964cc3..3d847609fac382 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2271,16 +2271,6 @@ void ModeInferMeta(const MetaTensor& x, input_dims.size(), 0, errors::InvalidArgument("input of ModeOp must have >= 0d shape")); - if (axis < 0) { - axis += dim_size; - PADDLE_ENFORCE_GE(axis, - 0, - phi::errors::InvalidArgument( - "the axis must be [-%d, %d), but received %d .", - dim_size, - dim_size, - axis - dim_size)); - } std::vector dimvec; for (int64_t i = 0; i < axis; i++) { From ea96186a33792857a82d62d0efb72e323cb42da1 Mon Sep 17 00:00:00 2001 From: Wang Huan Date: Tue, 1 Aug 2023 07:18:13 +0000 Subject: [PATCH 8/8] security_fix --- paddle/phi/infermeta/unary.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 6a44c591e5622f..417b6f358fcaab 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2272,7 +2272,7 @@ void ModeInferMeta(const MetaTensor& x, input_dims.size(), 0, errors::InvalidArgument("input of ModeOp must have >= 0d shape")); - + if (axis < 0) axis += dim_size; std::vector dimvec; for (int64_t i = 0; i < axis; i++) { dimvec.emplace_back(input_dims[i]);