From eeac70a6c59b895f728834dca8399428166d7362 Mon Sep 17 00:00:00 2001 From: JeffWangYH <78253082+Jeff114514@users.noreply.github.com> Date: Thu, 1 Aug 2024 09:52:52 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Error=20Message=20BUAA=E3=80=91add=20e?= =?UTF-8?q?rror=20description=20for=20few=20files=20(#66774)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add msg * fix msg * second * third * fix ci approval * Update schedule_desc.cc * Update test_phi_tensor.cc * codestyle * codestyle --- paddle/cinn/ir/schedule/schedule_desc.cc | 39 +++- test/cpp/phi/api/test_phi_tensor.cc | 229 +++++++++++++++++++---- test/cpp/phi/core/test_dense_tensor.cc | 212 ++++++++++++++++++--- 3 files changed, 407 insertions(+), 73 deletions(-) diff --git a/paddle/cinn/ir/schedule/schedule_desc.cc b/paddle/cinn/ir/schedule/schedule_desc.cc index 61d78052a8f040..64174f6610046a 100644 --- a/paddle/cinn/ir/schedule/schedule_desc.cc +++ b/paddle/cinn/ir/schedule/schedule_desc.cc @@ -98,7 +98,11 @@ class PackedStepContext { input_range_.size(), ::common::errors::InvalidArgument("idx overranges")); const auto& range = input_range_.at(idx); - CHECK(range.second - range.first == 1) << "not single param"; + + PADDLE_ENFORCE_EQ(range.second - range.first, + 1, + ::common::errors::InvalidArgument( + "Input is not single param, idx: %d.", idx)); return inputs_[range.first]; } @@ -136,7 +140,11 @@ class PackedStepContext { size_t input_idx = 0; for (auto&& param_name : step_kind->inputs_) { auto arg_it = desc.inputs.find(param_name); - CHECK(arg_it != desc.inputs.end()) << "Can't find param:" << param_name; + PADDLE_ENFORCE_NE( + arg_it, + desc.inputs.end(), + ::common::errors::InvalidArgument( + "Can't find param: %s while building inputs", param_name)); auto&& args = arg_it->second; inputs_.insert(inputs_.end(), std::make_move_iterator(args.begin()), @@ -149,8 +157,10 @@ class PackedStepContext { size_t attr_idx = 0; for (auto&& attr_name : step_kind->attrs_) { auto attr_it = desc.attrs.find(attr_name); - CHECK(attr_it != desc.attrs.end()) - << "Can't find attribute:" << attr_name; + PADDLE_ENFORCE_NE(attr_it, + desc.attrs.end(), + ::common::errors::InvalidArgument( + "Can't find attribute: %s", attr_name)); attrs_.emplace_back(attr_it->second); ++attr_idx; } @@ -694,8 +704,10 @@ proto::ScheduleDesc ScheduleDesc::ToProto() const { expr_desc->set_parameter(param_name); for (auto&& expr : param2exprs.second) { auto expr_it = expr2name.find(expr); - CHECK(expr_it != expr2name.end()) - << "Can't find expr of param_name: " << param_name; + PADDLE_ENFORCE_NE(expr_it, + expr2name.end(), + ::common::errors::InvalidArgument( + "Can't find expr of param_name: %s", param_name)); expr_desc->add_arguments(expr_it->second); } } @@ -738,17 +750,26 @@ std::vector ScheduleDesc::ReplayWithProto( VLOG(4) << "Replay step:\n" << step_proto.DebugString(); ScheduleDesc::Step step; step.type = step_proto.type(); - CHECK(!step.type.empty()) << "Name of StepKind is empty"; + PADDLE_ENFORCE_NE( + step.type.empty(), + true, + ::common::errors::InvalidArgument("Name of StepKind is empty")); if (without_post_schedule && step.type == "TagPostSchedule") { break; } const StepKindInfo* step_kind = StepKindRegistry::Global()->Find(step.type); - CHECK(step_kind) << "Can't find StepKind:" << step.type; + PADDLE_ENFORCE_NE(step_kind, + nullptr, + ::common::errors::InvalidArgument( + "Can't find StepKind: %s", step.type)); for (auto&& param2args : step_proto.inputs()) { for (auto&& arg : param2args.arguments()) { auto arg_it = name2expr.find(arg); - CHECK(arg_it != name2expr.end()) << "Cant't find argument:" << arg; + PADDLE_ENFORCE_NE( + arg_it, + name2expr.end(), + ::common::errors::InvalidArgument("Cant't find argument: %s", arg)); step.inputs[param2args.parameter()].emplace_back(arg_it->second); } } diff --git a/test/cpp/phi/api/test_phi_tensor.cc b/test/cpp/phi/api/test_phi_tensor.cc index cbf029088a9d59..8d023c74f06220 100644 --- a/test/cpp/phi/api/test_phi_tensor.cc +++ b/test/cpp/phi/api/test_phi_tensor.cc @@ -48,7 +48,11 @@ template void TestCopyTensor() { auto t1 = InitCPUTensorForTest(); auto t1_cpu_cp = t1.copy_to(phi::CPUPlace(), /*blocking=*/false); - CHECK((phi::CPUPlace() == t1_cpu_cp.place())); + PADDLE_ENFORCE_EQ(t1_cpu_cp.place(), + phi::CPUPlace(), + phi::errors::InvalidArgument("t1_cpu_cp should copy to " + "CPUPlace, but got %s", + t1_cpu_cp.place())); for (int64_t i = 0; i < t1.size(); i++) { PADDLE_ENFORCE_EQ( t1_cpu_cp.template data()[i], @@ -59,12 +63,25 @@ void TestCopyTensor() { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) VLOG(2) << "Do GPU copy test"; auto t1_gpu_cp = t1_cpu_cp.copy_to(phi::GPUPlace(), /*blocking=*/false); - CHECK((phi::GPUPlace() == t1_gpu_cp.place())); + PADDLE_ENFORCE_EQ(t1_gpu_cp.place(), + phi::GPUPlace(), + phi::errors::InvalidArgument("t1_gpu_cp should copy to " + "GPUPlace, but got %s", + t1_gpu_cp.place())); auto t1_gpu_cp_cp = t1_gpu_cp.copy_to(phi::GPUPlace(), /*blocking=*/false); - CHECK((phi::GPUPlace() == t1_gpu_cp_cp.place())); + PADDLE_ENFORCE_EQ(t1_gpu_cp_cp.place(), + phi::GPUPlace(), + phi::errors::InvalidArgument("t1_gpu_cp_cp should copy to " + "GPUPlace, but got %s", + t1_gpu_cp_cp.place())); auto t1_gpu_cp_cp_cpu = t1_gpu_cp_cp.copy_to(phi::CPUPlace(), /*blocking=*/false); - CHECK((phi::CPUPlace() == t1_gpu_cp_cp_cpu.place())); + PADDLE_ENFORCE_EQ( + t1_gpu_cp_cp_cpu.place(), + phi::CPUPlace(), + phi::errors::InvalidArgument("t1_gpu_cp_cp_cpu should copy to " + "CPUPlace, but got %s", + t1_gpu_cp_cp_cpu.place())); for (int64_t i = 0; i < t1.size(); i++) { PADDLE_ENFORCE_EQ( t1_gpu_cp_cp_cpu.template data()[i], @@ -81,11 +98,17 @@ void TestAPIPlace() { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) auto t1 = paddle::experimental::empty( tensor_shape, DataType::FLOAT32, phi::GPUPlace()); - CHECK((phi::GPUPlace() == t1.place())); + PADDLE_ENFORCE_EQ(t1.place(), + phi::GPUPlace(), + phi::errors::InvalidArgument( + "t1 should copy to GPUPlace, but got %s", t1.place())); #endif auto t2 = paddle::experimental::empty( tensor_shape, DataType::FLOAT32, phi::CPUPlace()); - CHECK((phi::CPUPlace() == t2.place())); + PADDLE_ENFORCE_EQ(t2.place(), + phi::CPUPlace(), + phi::errors::InvalidArgument( + "t2 should copy to CPUPlace, but got %s", t2.place())); } void TestAPISizeAndShape() { @@ -94,8 +117,13 @@ void TestAPISizeAndShape() { PADDLE_ENFORCE_EQ( t1.size(), 25, - phi::errors::InvalidArgument("t1.size should be equal to 25 ")); - CHECK(t1.shape() == tensor_shape); + phi::errors::InvalidArgument("t1.size should be equal to 25, " + "but got %d", + t1.size())); + PADDLE_ENFORCE_EQ(t1.shape(), + tensor_shape, + phi::errors::InvalidArgument( + "t1.shape should be equal to tensor_shape, ")); } void TestAPISlice() { @@ -106,19 +134,43 @@ void TestAPISlice() { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) auto t1 = paddle::experimental::empty( tensor_shape_origin1, DataType::FLOAT32, phi::GPUPlace()); - CHECK(t1.slice(0, 5).shape() == tensor_shape_origin1); - CHECK(t1.slice(0, 3).shape() == tensor_shape_sub1); + PADDLE_ENFORCE_EQ( + t1.slice(0, 5).shape(), + tensor_shape_origin1, + phi::errors::InvalidArgument("t1.slice(0, 5).shape should be equal to " + "{5, 5}")); + PADDLE_ENFORCE_EQ( + t1.slice(0, 3).shape(), + tensor_shape_sub1, + phi::errors::InvalidArgument("t1.slice(0, 3).shape should be equal to " + "{3, 5}")); auto t2 = paddle::experimental::empty( tensor_shape_origin2, DataType::FLOAT32, phi::GPUPlace()); - CHECK(t2.slice(4, 5).shape() == tensor_shape_sub2); + PADDLE_ENFORCE_EQ( + t2.slice(4, 5).shape(), + tensor_shape_sub2, + phi::errors::InvalidArgument("t2.slice(4, 5).shape should be equal to " + "{1, 5, 5}")); #endif auto t3 = paddle::experimental::empty( tensor_shape_origin1, DataType::FLOAT32, phi::CPUPlace()); - CHECK(t3.slice(0, 5).shape() == tensor_shape_origin1); - CHECK(t3.slice(0, 3).shape() == tensor_shape_sub1); + PADDLE_ENFORCE_EQ( + t3.slice(0, 5).shape(), + tensor_shape_origin1, + phi::errors::InvalidArgument("t3.slice(0, 5).shape should be equal to " + "{5, 5}")); + PADDLE_ENFORCE_EQ( + t3.slice(0, 3).shape(), + tensor_shape_sub1, + phi::errors::InvalidArgument("t3.slice(0, 3).shape should be equal to " + "{3, 5}")); auto t4 = paddle::experimental::empty( tensor_shape_origin2, DataType::FLOAT32, phi::CPUPlace()); - CHECK(t4.slice(4, 5).shape() == tensor_shape_sub2); + PADDLE_ENFORCE_EQ( + t4.slice(4, 5).shape(), + tensor_shape_sub2, + phi::errors::InvalidArgument("t4.slice(4, 5).shape should be equal to " + "{1, 5, 5}")); // Test writing function for sliced tensor auto t = InitCPUTensorForTest(); @@ -152,11 +204,21 @@ void TestCast(paddle::DataType data_type) { DataType dtype = phi::CppTypeToDataType::Type(); auto t1 = paddle::experimental::empty(tensor_shape, dtype, phi::CPUPlace()); auto t2 = t1.cast(data_type); - CHECK(t2.type() == data_type); + PADDLE_ENFORCE_EQ( + t2.type(), + data_type, + phi::errors::InvalidArgument("t2.type() should be equal to data_type, " + "but got %s", + t2.type())); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) auto tg1 = paddle::experimental::empty(tensor_shape, dtype, phi::GPUPlace()); auto tg2 = tg1.cast(data_type); - CHECK(tg2.type() == data_type); + PADDLE_ENFORCE_EQ( + tg2.type(), + data_type, + phi::errors::InvalidArgument("tg2.type() should be equal to data_type, " + "but got %s", + tg2.type())); #endif } @@ -207,39 +269,117 @@ void GroupTestCast() { } void GroupTestDtype() { - CHECK(TestDtype() == paddle::DataType::BOOL); - CHECK(TestDtype() == paddle::DataType::INT8); - CHECK(TestDtype() == paddle::DataType::UINT8); - CHECK(TestDtype() == paddle::DataType::INT16); - CHECK(TestDtype() == paddle::DataType::INT32); - CHECK(TestDtype() == paddle::DataType::INT64); - CHECK(TestDtype() == paddle::DataType::FLOAT16); - CHECK(TestDtype() == paddle::DataType::FLOAT32); - CHECK(TestDtype() == paddle::DataType::FLOAT64); - CHECK(TestDtype() == paddle::DataType::COMPLEX64); - CHECK(TestDtype() == paddle::DataType::COMPLEX128); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::BOOL, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::BOOL, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::INT8, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::INT8, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::UINT8, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::UINT8, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::INT16, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::INT16, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::INT32, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::INT32, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::INT64, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::INT64, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ(TestDtype(), + paddle::DataType::FLOAT16, + phi::errors::InvalidArgument( + "TestDtype() should be equal to " + "paddle::DataType::FLOAT16, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::FLOAT32, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::FLOAT32, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ( + TestDtype(), + paddle::DataType::FLOAT64, + phi::errors::InvalidArgument("TestDtype() should be equal to " + "paddle::DataType::FLOAT64, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ(TestDtype(), + paddle::DataType::COMPLEX64, + phi::errors::InvalidArgument( + "TestDtype() should be equal to " + "paddle::DataType::COMPLEX64, but got %s", + TestDtype())); + PADDLE_ENFORCE_EQ(TestDtype(), + paddle::DataType::COMPLEX128, + phi::errors::InvalidArgument( + "TestDtype() should be equal to " + "paddle::DataType::COMPLEX128, but got %s", + TestDtype())); } void TestInitialized() { auto test_tensor = paddle::experimental::empty({1, 1}); - CHECK(test_tensor.is_initialized() == true); + PADDLE_ENFORCE_EQ(test_tensor.is_initialized(), + true, + phi::errors::InvalidArgument( + "test_tensor should be initialized, but got %s", + test_tensor.is_initialized())); float* tensor_data = test_tensor.data(); for (int i = 0; i < test_tensor.size(); i++) { tensor_data[i] = 0.5; } for (int i = 0; i < test_tensor.size(); i++) { - CHECK(tensor_data[i] == 0.5); + PADDLE_ENFORCE_EQ( + tensor_data[i], + 0.5, + phi::errors::InvalidArgument("tensor_data[%d] should be equal to 0.5, " + "but got %f", + i, + tensor_data[i])); } } void TestDataInterface() { // Test DenseTensor auto test_tensor = paddle::experimental::empty({1, 1}); - CHECK(test_tensor.is_initialized() == true); + PADDLE_ENFORCE_EQ(test_tensor.is_initialized(), + true, + phi::errors::InvalidArgument( + "test_tensor should be initialized, but got %s", + test_tensor.is_initialized())); void* tensor_ptr = test_tensor.data(); - CHECK(tensor_ptr != nullptr); + PADDLE_ENFORCE_NE( + tensor_ptr, + nullptr, + phi::errors::InvalidArgument("test_tensor should not be NULL, but got %p", + tensor_ptr)); const void* const_tensor_ptr = test_tensor.data(); - CHECK(const_tensor_ptr != nullptr); + PADDLE_ENFORCE_NE( + const_tensor_ptr, + nullptr, + phi::errors::InvalidArgument("const_tensor should not be NULL, " + "but got %p", + const_tensor_ptr)); // Test SelectedRows std::vector rows = {0}; std::shared_ptr selected_rows = @@ -248,16 +388,33 @@ void TestDataInterface() { selected_rows->mutable_value()->mutable_data(phi::CPUPlace())[0] = static_cast(10.0f); paddle::Tensor sr_tensor = paddle::Tensor(selected_rows); - CHECK(sr_tensor.is_initialized() == true); + PADDLE_ENFORCE_EQ(sr_tensor.is_initialized(), + true, + phi::errors::InvalidArgument( + "sr_tensor should be initialized, but got %s", + sr_tensor.is_initialized())); tensor_ptr = sr_tensor.data(); - CHECK(tensor_ptr != nullptr); + PADDLE_ENFORCE_NE(tensor_ptr, + nullptr, + phi::errors::InvalidArgument( + "tensor should not be NULL, but got %p", tensor_ptr)); const_tensor_ptr = sr_tensor.data(); - CHECK(const_tensor_ptr != nullptr); + PADDLE_ENFORCE_NE( + const_tensor_ptr, + nullptr, + phi::errors::InvalidArgument("const_tensor should not be NULL, " + "but got %p", + const_tensor_ptr)); } void TestJudgeTensorType() { Tensor test_tensor(phi::CPUPlace(), {1, 1}); - CHECK(test_tensor.is_dense_tensor() == true); + PADDLE_ENFORCE_EQ( + test_tensor.is_dense_tensor(), + true, + phi::errors::InvalidArgument("test_tensor should be a dense tensor, " + "but got %s", + test_tensor.is_dense_tensor())); } TEST(PhiTensor, All) { diff --git a/test/cpp/phi/core/test_dense_tensor.cc b/test/cpp/phi/core/test_dense_tensor.cc index a244b155d54cb0..f426004bf56d60 100644 --- a/test/cpp/phi/core/test_dense_tensor.cc +++ b/test/cpp/phi/core/test_dense_tensor.cc @@ -28,44 +28,181 @@ TEST(dense_tensor, meta) { const LoD lod{}; DenseTensorMeta meta_0; - CHECK(!meta_0.valid()); + PADDLE_ENFORCE_EQ( + meta_0.valid(), + false, + phi::errors::InvalidArgument("Fail in default DenseTensorMeta. Expected " + "meta_0 to be invalid, but got: %s", + meta_0.valid())); DenseTensorMeta meta_1(dtype, dims); - CHECK(meta_1.dtype == dtype); - CHECK(meta_1.dims == dims); - CHECK(meta_1.valid()); + PADDLE_ENFORCE_EQ( + meta_1.dtype, + dtype, + phi::errors::InvalidArgument("Fail in DenseTensorMeta with dtype and " + "dims. Expected dtype: %s, but got: %s", + dtype, + meta_1.dtype)); + PADDLE_ENFORCE_EQ( + meta_1.dims, + dims, + phi::errors::InvalidArgument("Fail in DenseTensorMeta with dtype and " + "dims. Expected dims: %s, but got: %s", + dims, + meta_1.dims)); + PADDLE_ENFORCE_EQ(meta_1.valid(), + true, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype and dims. Expected " + "meta_1 to be valid, but got: %s", + meta_1.valid())); DenseTensorMeta meta_2(dtype, dims, layout); - CHECK(meta_2.dtype == dtype); - CHECK(meta_2.dims == dims); - CHECK(meta_2.layout == layout); - CHECK(meta_2.valid()); + PADDLE_ENFORCE_EQ(meta_2.dtype, + dtype, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims and layout. " + "Expected dtype: %s, but got: %s", + dtype, + meta_2.dtype)); + PADDLE_ENFORCE_EQ( + meta_2.dims, + dims, + phi::errors::InvalidArgument("Fail in DenseTensorMeta with dtype, dims " + "and layout. Expected dims: %s, but got: %s", + dims, + meta_2.dims)); + PADDLE_ENFORCE_EQ(meta_2.layout, + layout, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims and layout. " + "Expected layout: %s, but got: %s", + layout, + meta_2.layout)); + PADDLE_ENFORCE_EQ(meta_2.valid(), + true, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims and layout. " + "Expected meta_2 to be valid, but got: %s", + meta_2.valid())); DenseTensorMeta meta_3(dtype, dims, layout, lod); - CHECK(meta_3.dtype == dtype); - CHECK(meta_3.dims == dims); - CHECK(meta_3.layout == layout); - CHECK(meta_3.lod == lod); - CHECK(meta_3.valid()); + PADDLE_ENFORCE_EQ(meta_3.dtype, + dtype, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims, layout and " + "lod. Expected dtype: %s, but got: %s", + dtype, + meta_3.dtype)); + PADDLE_ENFORCE_EQ(meta_3.dims, + dims, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims, layout and " + "lod. Expected dims: %s, but got: %s", + dims, + meta_3.dims)); + PADDLE_ENFORCE_EQ(meta_3.layout, + layout, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims, layout and " + "lod. Expected layout: %s, but got: %s", + layout, + meta_3.layout)); + PADDLE_ENFORCE_EQ(meta_3.lod, + lod, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims, layout and " + "lod. Expected lod: %s, but got: %s", + lod, + meta_3.lod)); + PADDLE_ENFORCE_EQ(meta_3.valid(), + true, + phi::errors::InvalidArgument( + "Fail in DenseTensorMeta with dtype, dims, layout and " + "lod. Expected meta_3 to be valid, but got: %s", + meta_3.valid())); DenseTensorMeta meta_4(meta_3); - CHECK(meta_4.dtype == dtype); - CHECK(meta_4.dims == dims); - CHECK(meta_4.layout == layout); - CHECK(meta_4.lod == lod); - CHECK(meta_4.valid()); + PADDLE_ENFORCE_EQ( + meta_4.dtype, + dtype, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected dtype: %s, but got: %s", + dtype, + meta_4.dtype)); + PADDLE_ENFORCE_EQ( + meta_4.dims, + dims, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected dims: %s, but got: %s", + dims, + meta_4.dims)); + PADDLE_ENFORCE_EQ( + meta_4.layout, + layout, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected layout: %s, but got: %s", + layout, + meta_4.layout)); + PADDLE_ENFORCE_EQ( + meta_4.lod, + lod, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected lod: %s, but got: %s", + lod, + meta_4.lod)); + PADDLE_ENFORCE_EQ( + meta_4.valid(), + true, + phi::errors::InvalidArgument("Fail in copy DenseTensorMeta. Expected " + "meta_4 to be valid, but got: %s", + meta_4.valid())); DenseTensorMeta meta_5(meta_4); - CHECK(meta_5.dtype == dtype); - CHECK(meta_5.dims == dims); - CHECK(meta_5.layout == layout); - CHECK(meta_5.lod == lod); - CHECK(meta_5.valid()); + PADDLE_ENFORCE_EQ( + meta_5.dtype, + dtype, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected dtype: %s, but got: %s", + dtype, + meta_5.dtype)); + PADDLE_ENFORCE_EQ( + meta_5.dims, + dims, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected dims: %s, but got: %s", + dims, + meta_5.dims)); + PADDLE_ENFORCE_EQ( + meta_5.layout, + layout, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected layout: %s, but got: %s", + layout, + meta_5.layout)); + PADDLE_ENFORCE_EQ( + meta_5.lod, + lod, + phi::errors::InvalidArgument( + "Fail in copy DenseTensorMeta. Expected lod: %s, but got: %s", + lod, + meta_5.lod)); + PADDLE_ENFORCE_EQ( + meta_5.valid(), + true, + phi::errors::InvalidArgument("Fail in copy DenseTensorMeta. Expected " + "meta_5 to be valid, but got: %s", + meta_5.valid())); } TEST(dense_tensor, def_ctor) { DenseTensor tensor_0; - CHECK(tensor_0.valid()); + PADDLE_ENFORCE_EQ( + tensor_0.valid(), + true, + phi::errors::InvalidArgument("Fail in default DenseTensor. Expected " + "tensor_0 to be valid, but got: %s", + tensor_0.valid())); } TEST(dense_tensor, ctor) { @@ -109,9 +246,19 @@ TEST(dense_tensor, resize) { auto* alloc = fancy_allocator.get(); DenseTensor tensor_0(alloc, meta); - CHECK_EQ(tensor_0.capacity(), 2u); + PADDLE_ENFORCE_EQ( + tensor_0.capacity(), + 2u, + phi::errors::InvalidArgument( + "Fail to initialize DenseTensor. Expected capacity: 2, but got: %s", + tensor_0.capacity())); tensor_0.ResizeAndAllocate({1, 2, 3}); - CHECK_EQ(tensor_0.capacity(), 6u); + PADDLE_ENFORCE_EQ( + tensor_0.capacity(), + 6u, + phi::errors::InvalidArgument( + "Fail to resize DenseTensor. Expected capacity: 6, but got: %s", + tensor_0.capacity())); } TEST(dense_tensor, shallow_copy) { @@ -126,7 +273,11 @@ TEST(dense_tensor, shallow_copy) { DenseTensor tensor_0(alloc, meta); DenseTensor tensor_1(tensor_0); - CHECK(tensor_0.meta() == tensor_1.meta()); + PADDLE_ENFORCE_EQ(tensor_0.meta(), + tensor_1.meta(), + phi::errors::InvalidArgument( + "Fail to copy DenseTensor. Expected tensor_0 and " + "tensor_1 to have the same meta")); } TEST(dense_tensor, storage_properties) { @@ -145,8 +296,13 @@ TEST(dense_tensor, storage_properties) { } catch (common::enforce::EnforceNotMet& error) { caught_exception = true; } - EXPECT_TRUE(caught_exception); + PADDLE_ENFORCE_EQ(caught_exception, + true, + phi::errors::InvalidArgument( + "Fail to get storage properties. Expected an exception " + "to be thrown for OneDNNStorageProperties")); #endif } + } // namespace tests } // namespace phi