From af23092298c6ac7104b2a6c44c376eeb32cbaae8 Mon Sep 17 00:00:00 2001 From: hess <111584409+shuaihehe@users.noreply.github.com> Date: Wed, 3 Apr 2024 11:22:48 +0800 Subject: [PATCH] fix (#63193) --- paddle/cinn/pybind/framework.cc | 28 +++++++++++++++++++++++----- paddle/cinn/pybind/frontend.cc | 27 ++++++++++++++++++--------- paddle/cinn/pybind/ir/ir.cc | 9 ++++++++- 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/paddle/cinn/pybind/framework.cc b/paddle/cinn/pybind/framework.cc index 5122a61d9fc7b..50d1dc23221f7 100644 --- a/paddle/cinn/pybind/framework.cc +++ b/paddle/cinn/pybind/framework.cc @@ -78,7 +78,12 @@ void BindFramework(pybind11::module *m) { input_output_names, key, target); - CHECK_EQ(funcs.size(), 1U); + PADDLE_ENFORCE_EQ(funcs.size(), + 1U, + phi::errors::InvalidArgument( + "The size of funcs is incorrect." + "Expected size is 1, but receive %d.", + funcs.size())); func = funcs[0]; return func; }); @@ -103,8 +108,11 @@ void BindFramework(pybind11::module *m) { }) .def("get_attr", [](NodeAttr &self, const std::string &key) { - CHECK_EQ(self.attr_store.count(key), 1) - << "Didn't find value with key [" << key << "]."; + PADDLE_ENFORCE_EQ(self.attr_store.count(key), + 1, + phi::errors::InvalidArgument( + "Didn't find value with key [%d].", + self.attr_store.count(key))); return self.attr_store[key]; }) .def("__str__", [](NodeAttr &self) { return utils::GetStreamCnt(self); }); @@ -194,12 +202,22 @@ void BindFramework(pybind11::module *m) { << "currently only support float32 data type as input"; hlir::framework::shape_t shape; std::copy_n(array.shape(), array.ndim(), std::back_inserter(shape)); - CHECK_EQ( + PADDLE_ENFORCE_EQ( std::accumulate(shape.begin(), shape.end(), 1, [](int32_t a, int32_t b) { return a * b; }), - self->shape().numel()); + self->shape().numel(), + phi::errors::InvalidArgument( + "The product of all elements in the shape container and " + "shape numel is not equal," + "where the product of all elements in the shape " + "container:%d but shape numel:%d.", + std::accumulate(shape.begin(), + shape.end(), + 1, + [](int32_t a, int32_t b) { return a * b; }), + self->shape().numel())); auto *data = self->mutable_data(target, self->type()); if (target.arch == Target::Arch::X86) { std::memcpy(data, diff --git a/paddle/cinn/pybind/frontend.cc b/paddle/cinn/pybind/frontend.cc index f7eaf01a59f07..b6ba2590f3dad 100644 --- a/paddle/cinn/pybind/frontend.cc +++ b/paddle/cinn/pybind/frontend.cc @@ -219,9 +219,12 @@ void BindFrontend(pybind11::module *m) { auto in_tensor = scope->GetTensor(tensor_inputs[i]->id); auto dtype = tensor_inputs[i]->type; auto *data = in_tensor->mutable_data(target, dtype); - CHECK_EQ(input_data[i].size(), in_tensor->shape().numel()) - << "The size of tensor [" << tensor_inputs[i]->id - << "] is different with the input data's size! Please check."; + PADDLE_ENFORCE_EQ(input_data[i].size(), + in_tensor->shape().numel(), + phi::errors::InvalidArgument( + "The size of tensor [%d] is different with " + "the input data's size! Please check.", + tensor_inputs[i]->id)); if (target.arch == Target::Arch::NVGPU) { #ifdef CINN_WITH_CUDA CUDA_CALL(cudaMemcpy(data, @@ -314,9 +317,12 @@ void BindFrontend(pybind11::module *m) { for (size_t i = 0; i < tensor_inputs.size(); i++) { auto in_tensor = scope->GetTensor(tensor_inputs[i]->id); auto *data = in_tensor->mutable_data(target); - CHECK_EQ(input_data[i].size(), in_tensor->shape().numel()) - << "The size of tensor [" << tensor_inputs[i]->id - << "] is different with the input data's size! Please check."; + PADDLE_ENFORCE_EQ(input_data[i].size(), + in_tensor->shape().numel(), + phi::errors::InvalidArgument( + "The size of tensor [%d] is different with " + "the input data's size! Please check.", + tensor_inputs[i]->id)); if (target.arch == Target::Arch::NVGPU) { #ifdef CINN_WITH_CUDA CUDA_CALL(cudaMemcpy(reinterpret_cast(data), @@ -365,9 +371,12 @@ void BindFrontend(pybind11::module *m) { for (size_t i = 0; i < tensor_inputs.size(); i++) { auto in_tensor = scope->GetTensor(tensor_inputs[i]->id); auto *data = in_tensor->mutable_data(target); - CHECK_EQ(input_data[i].size(), in_tensor->shape().numel()) - << "The size of tensor [" << tensor_inputs[i]->id - << "] is different with the input data's size! Please check."; + PADDLE_ENFORCE_EQ(input_data[i].size(), + in_tensor->shape().numel(), + phi::errors::InvalidArgument( + "The size of tensor [%d] is different with " + "the input data's size! Please check.", + tensor_inputs[i]->id)); if (target.arch == Target::Arch::NVGPU) { #ifdef CINN_WITH_CUDA CUDA_CALL(cudaMemcpy(reinterpret_cast(data), diff --git a/paddle/cinn/pybind/ir/ir.cc b/paddle/cinn/pybind/ir/ir.cc index d9f9bd5fcdf7f..42e8e157998cd 100644 --- a/paddle/cinn/pybind/ir/ir.cc +++ b/paddle/cinn/pybind/ir/ir.cc @@ -33,7 +33,14 @@ void TensorStore(Expr tensor, Expr value, const std::vector& indices) { std::vector AxisMap(const std::string& kinds, const std::vector& iter_expression) { std::vector rets; - CHECK_EQ(kinds.size(), iter_expression.size()); + PADDLE_ENFORCE_EQ( + kinds.size(), + iter_expression.size(), + phi::errors::InvalidArgument( + "The size of kinds and iter expression in AxisMap is not equal," + "where kinds size:%d but iter expression size:%d.", + kinds.size(), + iter_expression.size())); int n = iter_expression.size(); rets.reserve(n); for (int i = 0; i < n; i++) {