From a6303a87bfdbd99d0a27d575e97e211fb8937685 Mon Sep 17 00:00:00 2001 From: ooooo <3164076421@qq.com> Date: Sun, 3 Aug 2025 09:30:48 +0800 Subject: [PATCH] refine error message --- .../eager/accumulation/accumulation_node.cc | 4 ++-- .../eager_manual/nodes/multiply_node.cc | 8 ++++---- .../nodes/sync_batch_norm_node.cc | 8 ++++---- paddle/fluid/eager/general_grad.h | 2 +- .../fluid/eager/to_static/run_program_impl.cc | 2 +- .../ir/elementwiseadd_transpose_pass.cc | 2 +- .../ir/preln_skip_layernorm_fuse_pass.cc | 4 ++-- .../framework/ir/transfer_layout_elim_pass.cc | 4 ++-- .../interpreter/interpreter_util.cc | 2 +- paddle/fluid/framework/op_registry.cc | 2 +- paddle/fluid/imperative/reducer.cc | 14 +++++++------- .../inference/api/details/zero_copy_tensor.cc | 2 +- paddle/fluid/inference/tensorrt/op_teller.cc | 2 +- .../operators/hierarchical_sigmoid_op.cc | 19 +++++++++---------- paddle/fluid/operators/lookup_table_op.cc | 2 +- 15 files changed, 38 insertions(+), 39 deletions(-) diff --git a/paddle/fluid/eager/accumulation/accumulation_node.cc b/paddle/fluid/eager/accumulation/accumulation_node.cc index e1f10500047199..47744c75651501 100644 --- a/paddle/fluid/eager/accumulation/accumulation_node.cc +++ b/paddle/fluid/eager/accumulation/accumulation_node.cc @@ -161,12 +161,12 @@ GradNodeAccumulation::operator()( VLOG(3) << "Running AD API Grad: GradNodeAccumulation"; PADDLE_ENFORCE(grads.size() == 1, common::errors::Fatal( - "GradNodeAccumulation should take exactly 1 grad tensor" + "GradNodeAccumulation should take exactly 1 grad tensor. " "However received: %d slot.", grads.size())); PADDLE_ENFORCE(grads[0].size() == 1, common::errors::Fatal( - "GradNodeAccumulation should take exactly 1 grad tensor" + "GradNodeAccumulation should take exactly 1 grad tensor. " "However received: %d in slot %d .", grads[0].size(), 0)); diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc index 8e22386ac2e8c3..048a6a85808ed6 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/multiply_node.cc @@ -484,8 +484,8 @@ MultiplyDoubleGradNode::operator()( if (need_skip) { if (trace_backward) { PADDLE_THROW(common::errors::Unavailable( - "The Op multiply_double_grad doesn't have any grad" - "op. If you don't intend calculating higher order" + "The Op multiply_double_grad doesn't have any grad " + "op. If you don't intend calculating higher order " "derivatives, please set `create_graph`to False.")); } } @@ -669,8 +669,8 @@ MultiplyGradNode::operator()( // Create Grad Node if (trace_backward) { PADDLE_THROW(common::errors::Unavailable( - "The Op multiply_grad doesn't have any grad" - "op. If you don't intend calculating higher order" + "The Op multiply_grad doesn't have any grad " + "op. If you don't intend calculating higher order " "derivatives, please set `create_graph`to False.")); } VLOG(4) << "Finish AD API GRAD: multiply_grad"; diff --git a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc index c3c742828077b3..80ed28d3113a21 100644 --- a/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc +++ b/paddle/fluid/eager/api/manual/eager_manual/nodes/sync_batch_norm_node.cc @@ -201,8 +201,8 @@ SyncBatchNormGradNode::operator()( // Create Grad Node if (trace_backward) { PADDLE_THROW(common::errors::Unavailable( - "The Op sync_batch_norm_grad doesn't have any grad" - "op. If you don't intend calculating higher order" + "The Op sync_batch_norm_grad doesn't have any grad " + "op. If you don't intend calculating higher order " "derivatives, please set `create_graph`to False.")); } VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad"; @@ -442,8 +442,8 @@ SyncBatchNormGradNode::operator()( // Create Grad Node if (trace_backward) { PADDLE_THROW(common::errors::Unavailable( - "The Op sync_batch_norm_grad doesn't have any grad" - "op. If you don't intend calculating higher order" + "The Op sync_batch_norm_grad doesn't have any grad " + "op. If you don't intend calculating higher order " "derivatives, please set `create_graph`to False.")); } VLOG(4) << "Finish AD API GRAD: sync_batch_norm_grad"; diff --git a/paddle/fluid/eager/general_grad.h b/paddle/fluid/eager/general_grad.h index eaaa46c2f58528..913a521cc901b2 100644 --- a/paddle/fluid/eager/general_grad.h +++ b/paddle/fluid/eager/general_grad.h @@ -304,7 +304,7 @@ class GeneralGrad { PADDLE_ENFORCE_NOT_NULL( target_node, - common::errors::Fatal("There is no grad op for inputs:[%d] or it's" + common::errors::Fatal("There is no grad op for inputs:[%d] or it's " "stop_gradient=True.", i)); diff --git a/paddle/fluid/eager/to_static/run_program_impl.cc b/paddle/fluid/eager/to_static/run_program_impl.cc index 57a6930d808a99..30581b16ddc89b 100644 --- a/paddle/fluid/eager/to_static/run_program_impl.cc +++ b/paddle/fluid/eager/to_static/run_program_impl.cc @@ -1070,7 +1070,7 @@ void LegacyRunProgramGradImpl( if (!cache.Has(cache_key)) { phi::RecordEvent record_event( "create_new_interpretercore", phi::TracerEventType::UserDefined, 1); - VLOG(2) << "No interpretercore cache, so create a new interpretercore" + VLOG(2) << "No interpretercore cache, so create a new interpretercore " "for program: " << program_id; details::ShareTensorsIntoScope(out_grad, global_inner_scope); diff --git a/paddle/fluid/framework/ir/elementwiseadd_transpose_pass.cc b/paddle/fluid/framework/ir/elementwiseadd_transpose_pass.cc index 543de342fa0e25..f76227cebbc025 100644 --- a/paddle/fluid/framework/ir/elementwiseadd_transpose_pass.cc +++ b/paddle/fluid/framework/ir/elementwiseadd_transpose_pass.cc @@ -153,7 +153,7 @@ int ElementwiseAddTransposeFusePass::ApplyEleTransPattern( << "found that shape_attr[3](channel size) mod 8 !=0 for reshape op " "in elementwise_add_transpose, " "currently, the elementwiseadd transpose pass only support " - "channel size mod 8 == 0 for khwc8 trt format" + "channel size mod 8 == 0 for khwc8 trt format. " "Therefore, the fusion will be stopped."; return; } diff --git a/paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc index ba7d33405fcfdc..c1f434255b0414 100644 --- a/paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc @@ -113,9 +113,9 @@ void PrelnSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { graph->Has(framework::ir::kMultiheadMatmulPass) && !pos_id.empty() && !mask_id.empty() && with_dynamic_shape)) { VLOG(3) << "preln_skip_layernorm_fuse_pass need: use_trt, enable_int8, " - "with_interleaved" + "with_interleaved, " "use_varseqlen, preln_embedding_eltwise_layernorm_fuse_pass, " - "trt_multihead_matmul_fuse_pass" + "trt_multihead_matmul_fuse_pass, " "set pos_id, set mask_id, with_dynamic_shape. Stop this pass, " "please " "reconfig."; diff --git a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc index 69c0b9ea231b38..202da4382f1b42 100644 --- a/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc +++ b/paddle/fluid/framework/ir/transfer_layout_elim_pass.cc @@ -92,8 +92,8 @@ void TransferLayoutElimPass::PutTransferlayoutAfterOp( PADDLE_ENFORCE_EQ( var2_shape.size() >= 4L, true, - common::errors::InvalidArgument("var2_shape.size is too small" - "expected no small than 4L" + common::errors::InvalidArgument("var2_shape.size is too small, " + "expected no small than 4L, " "received %d", var2_shape.size())); auto new_var2_shape = var2_shape; diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc index a7829325109635..c25e0d7c70d2a2 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc @@ -632,7 +632,7 @@ void BuildOpFuncList(const phi::Place& place, "conditional_block", "conditional_block_grad", "pylayer", - "pylayer_grad" + "pylayer_grad", "recurrent_grad", "while", "while_grad"}; diff --git a/paddle/fluid/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc index ca1e1b5296d7bc..f448f26df80e42 100644 --- a/paddle/fluid/framework/op_registry.cc +++ b/paddle/fluid/framework/op_registry.cc @@ -109,7 +109,7 @@ static VariableNameMap ConvertOpDescVarsToVarNameMap( std::unique_ptr OpRegistry::CreateOp( const proto::OpDesc& op_desc) { - VLOG(1) << "CreateOp directly from OpDesc is deprecated. It should only be" + VLOG(1) << "CreateOp directly from OpDesc is deprecated. It should only be " "used in unit tests. Use CreateOp(const OpDesc& op_desc) " "instead."; VariableNameMap inputs = ConvertOpDescVarsToVarNameMap(op_desc.inputs()); diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index 259307c5d63102..8ba590e525cfb5 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -652,13 +652,13 @@ void Reducer::PrepareForBackward( // concat + allreduce + split is emitted in turn according to next_group_. // 3, FinalizeBackward: after the end, synchronize each stream. void Reducer::AddDistHook(size_t var_index) { - PADDLE_ENFORCE_LT( - var_index, - variable_locators_.size(), - common::errors::OutOfRange("Out of bounds variable index. it must be less" - "than %d, but it is %d", - variable_locators_.size(), - var_index)); + PADDLE_ENFORCE_LT(var_index, + variable_locators_.size(), + common::errors::OutOfRange( + "Out of bounds variable index. it must be less " + "than %d, but it is %d", + variable_locators_.size(), + var_index)); // gradient synchronization is not required when grad_need_hooks_ is false. if (!grad_need_hooks_) { diff --git a/paddle/fluid/inference/api/details/zero_copy_tensor.cc b/paddle/fluid/inference/api/details/zero_copy_tensor.cc index 8a3ae4ab94131d..bb639eeff1eec2 100644 --- a/paddle/fluid/inference/api/details/zero_copy_tensor.cc +++ b/paddle/fluid/inference/api/details/zero_copy_tensor.cc @@ -372,7 +372,7 @@ void Tensor::CopyStringsFromCpu(const paddle_infer::Strings *data) { 0, common::errors::PreconditionNotMet( "You should call Tensor::Reshape(const " - "std::size_t &shape)function before copying" + "std::size_t &shape) function before copying " "the string data from cpu.")); *tensor = *data; } diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 4acc898470f48b..5d504c71ff1033 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -1430,7 +1430,7 @@ struct SimpleOpTypeSetTeller : public Teller { input_type != framework::proto::VarType::FP32 && input_type != framework::proto::VarType::FP64) { VLOG(3) << "the fill_any_like only supports " - "int32/int64/float32/float64 by" + "int32/int64/float32/float64 by " "trt8.4 below"; return false; } diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.cc b/paddle/fluid/operators/hierarchical_sigmoid_op.cc index 6c15538cb72b91..99b89d91e77508 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.cc +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.cc @@ -88,18 +88,18 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { "(phi::DenseTensor, required), The parameters of hierarchical " "sigmoid operator, each of them is a 2-D tensor, the shape is" "[K, D]. Which K is the num of non-leaf node in Path Tree"); - AddInput("Label", - "(phi::DenseTensor, required), The labels of training data. It's a" - "tensor with shape [N, 1]."); AddInput( - "PathTable", - "(phi::DenseTensor, optional), The Path Table from root to current word" - "it should have shape like [N, L], L is the length of the Path") + "Label", + "(phi::DenseTensor, required), The labels of training data. It's a " + "tensor with shape [N, 1]."); + AddInput("PathTable", + "(phi::DenseTensor, optional), The Path Table from root to " + "current word, it should have shape like [N, L], L is the length " + "of the Path") .AsDispensable(); AddInput("PathCode", "(phi::DenseTensor, optional), The Code on each Node of the Path " - "from root " - "to current word" + "from root to current word, " "it should have shape like [N, L], L is the length of the Path") .AsDispensable(); AddInput("Bias", @@ -118,8 +118,7 @@ class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { .AsIntermediate(); AddOutput("W_Out", "(phi::DenseTensor, optional) using input 'W' as Output to make " - "it mutable" - "When we are using prefetch") + "it mutable when we are using prefetch") .AsIntermediate(); AddAttr("num_classes", "(int, optional), The number of classes") .SetDefault(2); diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index a69c75ecffcf99..a0a89c7f6bf7bb 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -109,7 +109,7 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("entry_config", "embedding sparse feature entry config, " " probability entry / counting " - " this can only be used in distributed training" + " this can only be used in distributed training " "entry") .SetDefault("");