diff --git a/_typos.toml b/_typos.toml index 1f913a25de8ad6..388fc9be999c40 100644 --- a/_typos.toml +++ b/_typos.toml @@ -4,6 +4,9 @@ extend-exclude = [ "third_party", "patches", "build", + # Skip `intermidiate` check in these files + "test/cpp/eager/task_tests/CMakeLists.txt", + "test/cpp/eager/task_tests/hook_test_intermidiate.cc", ] [default] @@ -122,23 +125,6 @@ insid = 'insid' insepection = 'insepection' intall = 'intall' instanciate = 'instanciate' -Instrution = 'Instrution' -INSTUCTION = 'INSTUCTION' -instuction = 'instuction' -interger = 'interger' -intermidiate = 'intermidiate' -itermediate = 'itermediate' -interal = 'interal' -internel = 'internel' -instrinsics = 'instrinsics' -intristic = 'intristic' -invalied = 'invalied' -Invaid = 'Invaid' -invaild = 'invaild' -invalide = 'invalide' -iteratable = 'iteratable' -interated = 'interated' -Iteraion = 'Iteraion' occured = 'occured' Ocurred = 'Ocurred' occures = 'occures' diff --git a/paddle/cinn/hlir/framework/graph_compiler_util.h b/paddle/cinn/hlir/framework/graph_compiler_util.h index 57ac78ed9943ad..c8f82b829153fa 100644 --- a/paddle/cinn/hlir/framework/graph_compiler_util.h +++ b/paddle/cinn/hlir/framework/graph_compiler_util.h @@ -49,7 +49,7 @@ enum class CompilationStatus { // An error occurred during codegen and jit. CODEGEN_JIT_FAIL = 2, // An error occurred during build instruction. - INSTUCTION_FAIL = 3, + INSTRUCTION_FAIL = 3, // An error occurred during build runtime program. PROGRAM_FAIL = 4, // Compile successfully. diff --git a/paddle/cinn/ir/lowered_func.h b/paddle/cinn/ir/lowered_func.h index 0c2856451b3973..0c83f48db88322 100644 --- a/paddle/cinn/ir/lowered_func.h +++ b/paddle/cinn/ir/lowered_func.h @@ -173,7 +173,7 @@ struct _LoweredFunc_ : public IrNode { * The output buffer will be resized to the size required, we leave all the * expression here. The allocation and deallocation expressions will insert * into the head and tail of the function's body. It supports lazy - * allocation/deallocation if the corresponding intristic methods support. + * allocation/deallocation if the corresponding intrinsic methods support. * * Currently, we assume that all the input and output buffers should locate in * heap, no other memory type is allowed. diff --git a/paddle/cinn/operator_fusion/fusion_tracker/interpreter.cc b/paddle/cinn/operator_fusion/fusion_tracker/interpreter.cc index 4823577cc3553d..c8463c5220711b 100644 --- a/paddle/cinn/operator_fusion/fusion_tracker/interpreter.cc +++ b/paddle/cinn/operator_fusion/fusion_tracker/interpreter.cc @@ -249,7 +249,7 @@ std::vector FusionInterpreter::Run() { break; default: PADDLE_THROW( - ::common::errors::Unavailable("Unsupported Fusion Instrution")); + ::common::errors::Unavailable("Unsupported Fusion Instruction")); } } diff --git a/paddle/cinn/runtime/cuda/CMakeLists.txt b/paddle/cinn/runtime/cuda/CMakeLists.txt index f2935085fc3198..aaf942e1d742f8 100755 --- a/paddle/cinn/runtime/cuda/CMakeLists.txt +++ b/paddle/cinn/runtime/cuda/CMakeLists.txt @@ -11,8 +11,8 @@ gather_srcs( cuda_util.cc cuda_intrinsics.cc cuda_intrinsics_reduce.cc - cuda_instrinsics_float16.cc - cuda_instrinsics_bfloat16.cc) + cuda_intrinsics_float16.cc + cuda_intrinsics_bfloat16.cc) cinn_nv_test(test_cuda_module SRCS cuda_module_test.cc DEPS cinncore) cinn_nv_library(cuda_runtime SRCS cinn_cuda_runtime_source.cuh) diff --git a/paddle/cinn/runtime/cuda/cuda_instrinsics_bfloat16.cc b/paddle/cinn/runtime/cuda/cuda_intrinsics_bfloat16.cc similarity index 100% rename from paddle/cinn/runtime/cuda/cuda_instrinsics_bfloat16.cc rename to paddle/cinn/runtime/cuda/cuda_intrinsics_bfloat16.cc diff --git a/paddle/cinn/runtime/cuda/cuda_instrinsics_float16.cc b/paddle/cinn/runtime/cuda/cuda_intrinsics_float16.cc similarity index 100% rename from paddle/cinn/runtime/cuda/cuda_instrinsics_float16.cc rename to paddle/cinn/runtime/cuda/cuda_intrinsics_float16.cc diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index cfca693121c1a2..3274fb1d37038c 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -2576,8 +2576,8 @@ def GenerateNodeDefinition( optional_inplace_var_name.append( transformed_tensor_name ) - tensor_wrapper_intermidiate_tensor_str = ( - f"(&this->{tensor_wrapper_name})->get_intermidiate_tensor()" + tensor_wrapper_intermediate_tensor_str = ( + f"(&this->{tensor_wrapper_name})->get_intermediate_tensor()" ) inplace_check_str += CHECK_BACKWARD_INPLACE_TEMPLATE.format( transformed_tensor_name, @@ -2587,7 +2587,7 @@ def GenerateNodeDefinition( transformed_tensor_name, transformed_tensor_name, transformed_tensor_name, - tensor_wrapper_intermidiate_tensor_str, + tensor_wrapper_intermediate_tensor_str, transformed_tensor_name, transformed_tensor_name, transformed_tensor_name, diff --git a/paddle/fluid/eager/tensor_wrapper.h b/paddle/fluid/eager/tensor_wrapper.h index 1272489fe62531..b74dbdccbb655c 100644 --- a/paddle/fluid/eager/tensor_wrapper.h +++ b/paddle/fluid/eager/tensor_wrapper.h @@ -71,7 +71,7 @@ class TensorWrapper { static_cast(tensor.impl().get()); // TODO(jiabin): It's not a good idea to set memory size to zero, find // another way and change this. - intermidiate_tensor_.set_impl(std::make_shared( + intermediate_tensor_.set_impl(std::make_shared( std::make_shared(nullptr, 0, tensor.place()), dense_tensor->meta())); } else if (phi::distributed::DistTensor::classof(tensor.impl().get())) { @@ -84,7 +84,7 @@ class TensorWrapper { *no_buffer_dist_tensor->unsafe_mutable_value() = phi::DenseTensor( std::make_shared(nullptr, 0, tensor.place()), dist_tensor->value().meta()); - intermidiate_tensor_.set_impl(no_buffer_dist_tensor); + intermediate_tensor_.set_impl(no_buffer_dist_tensor); } else { PADDLE_THROW(common::errors::Fatal( "Unrecognized tensor type for no_need_buffer feature")); @@ -95,7 +95,7 @@ class TensorWrapper { tensor.is_dense_tensor() && tensor.initialized()) { phi::DenseTensor* dense_tensor = static_cast(tensor.impl().get()); - intermidiate_tensor_.set_impl(std::make_shared( + intermediate_tensor_.set_impl(std::make_shared( std::make_shared(nullptr, 0, tensor.place()), dense_tensor->meta())); auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook(); @@ -103,7 +103,7 @@ class TensorWrapper { packed_value_ = (*pack_hook)(tensor); } else if (egr::SavedTensorsHooks::GetInstance().IsEnable() && tensor.is_dist_tensor() && tensor.initialized()) { - intermidiate_tensor_.set_impl( + intermediate_tensor_.set_impl( std::make_shared( tensor.dims(), static_cast(tensor.impl().get()) @@ -115,14 +115,14 @@ class TensorWrapper { std::make_shared(nullptr, 0, tensor.place()), dense_tensor.meta()); *(static_cast( - intermidiate_tensor_.impl().get()) + intermediate_tensor_.impl().get()) ->unsafe_mutable_value()) = tmp; auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook(); unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook(); packed_value_ = (*pack_hook)(tensor); } else { #endif - intermidiate_tensor_.set_impl(tensor.impl()); + intermediate_tensor_.set_impl(tensor.impl()); #ifndef PADDLE_NO_PYTHON } #endif @@ -130,22 +130,22 @@ class TensorWrapper { if (VLOG_IS_ON(7)) { // TODO(jiabin): This may has server performance issue - intermidiate_tensor_.set_name(tensor.name() + "@Saved"); + intermediate_tensor_.set_name(tensor.name() + "@Saved"); } if (tensor_autograd_meta) { auto autograd_meta = std::make_shared(*tensor_autograd_meta); autograd_meta->ResetGradNode(); - intermidiate_tensor_.set_autograd_meta(autograd_meta); + intermediate_tensor_.set_autograd_meta(autograd_meta); weak_grad_node_ = tensor_autograd_meta->GetMutableGradNode(); } } paddle::Tensor recover() { - VLOG(6) << "Recover tensor: " << intermidiate_tensor_.name() + VLOG(6) << "Recover tensor: " << intermediate_tensor_.name() << " for wrapper"; - if (!intermidiate_tensor_.defined()) { + if (!intermediate_tensor_.defined()) { VLOG(6) << "Return NULL tensor Here. "; return paddle::Tensor(); } @@ -168,19 +168,19 @@ class TensorWrapper { "for egr::TensorWrapper::recover")); } - if (intermidiate_tensor_.is_dense_tensor()) { - VLOG(6) << "intermidiate_tensor_ is DenseTensor"; - static_cast(intermidiate_tensor_.impl().get()) + if (intermediate_tensor_.is_dense_tensor()) { + VLOG(6) << "intermediate_tensor_ is DenseTensor"; + static_cast(intermediate_tensor_.impl().get()) ->ResetHolder(src_dense_tensor->Holder()); - } else if (intermidiate_tensor_.is_dist_tensor()) { - VLOG(6) << "intermidiate_tensor_ is DistTensor"; + } else if (intermediate_tensor_.is_dist_tensor()) { + VLOG(6) << "intermediate_tensor_ is DistTensor"; static_cast( - intermidiate_tensor_.impl().get()) + intermediate_tensor_.impl().get()) ->unsafe_mutable_value() ->ResetHolder(src_dense_tensor->Holder()); } else { PADDLE_THROW( - common::errors::Fatal("Unrecognized intermidiate_tensor_ type for " + common::errors::Fatal("Unrecognized intermediate_tensor_ type for " "egr::TensorWrapper::recover")); } } else { @@ -190,7 +190,7 @@ class TensorWrapper { } #endif - paddle::Tensor recovered_tensor = intermidiate_tensor_; + paddle::Tensor recovered_tensor = intermediate_tensor_; std::shared_ptr new_grad_node = weak_grad_node_.lock(); if (new_grad_node) { @@ -200,7 +200,7 @@ class TensorWrapper { VLOG(7) << "Recovered TensorWrapper with Empty GradNode"; } auto* intermediate_autograd_meta = - EagerUtils::nullable_autograd_meta(intermidiate_tensor_); + EagerUtils::nullable_autograd_meta(intermediate_tensor_); if (intermediate_autograd_meta) { auto p_ab_autograd_meta = @@ -214,9 +214,9 @@ class TensorWrapper { return recovered_tensor; } - paddle::Tensor get_intermidiate_tensor() { return intermidiate_tensor_; } + paddle::Tensor get_intermediate_tensor() { return intermediate_tensor_; } - void clear() { intermidiate_tensor_.reset(); } + void clear() { intermediate_tensor_.reset(); } private: void check_inplace_version() { @@ -225,15 +225,15 @@ class TensorWrapper { "no_need_buffer_ is true."; return; } - if (intermidiate_tensor_.impl()) { + if (intermediate_tensor_.impl()) { phi::DenseTensor* dense_tensor = nullptr; - if (phi::DenseTensor::classof(intermidiate_tensor_.impl().get())) { + if (phi::DenseTensor::classof(intermediate_tensor_.impl().get())) { dense_tensor = - static_cast(intermidiate_tensor_.impl().get()); + static_cast(intermediate_tensor_.impl().get()); } else if (phi::distributed::DistTensor::classof( - intermidiate_tensor_.impl().get())) { + intermediate_tensor_.impl().get())) { dense_tensor = static_cast( - intermidiate_tensor_.impl().get()) + intermediate_tensor_.impl().get()) ->unsafe_mutable_value(); } else { return; @@ -253,21 +253,21 @@ class TensorWrapper { "Please fix your code to void calling an inplace operator " "after using the Tensor which will used in gradient " "computation.", - intermidiate_tensor_.name(), + intermediate_tensor_.name(), tensor_version, wrapper_version_snapshot)); VLOG(7) << " The wrapper_version_snapshot of Tensor '" - << intermidiate_tensor_.name() << "' is [ " + << intermediate_tensor_.name() << "' is [ " << wrapper_version_snapshot << " ]"; VLOG(7) << " The tensor_version of Tensor '" - << intermidiate_tensor_.name() << "' is [ " << tensor_version + << intermediate_tensor_.name() << "' is [ " << tensor_version << " ]"; } } private: bool no_need_buffer_ = false; - paddle::Tensor intermidiate_tensor_; + paddle::Tensor intermediate_tensor_; std::weak_ptr weak_grad_node_; uint32_t inplace_version_snapshot_ = 0; #ifndef PADDLE_NO_PYTHON diff --git a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc index 2a7f93fbf21fbb..21287e2dab608b 100644 --- a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc +++ b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc @@ -209,7 +209,7 @@ ir::Graph *FuseBatchNormActPass::FuseBatchNormActGrad( batch_norm_grad, batch_norm_grad, bn_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH(act_out, act_out, bn_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH( - d_itermediate_out, d_itermediate_out, bn_act_grad_pattern); + d_intermediate_out, d_intermediate_out, bn_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH(bn_x, bn_x, bn_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH(bn_scale, bn_scale, bn_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH(bn_bias, bn_bias, bn_act_grad_pattern); @@ -225,7 +225,7 @@ ir::Graph *FuseBatchNormActPass::FuseBatchNormActGrad( std::string d_act_out_n = subgraph.at(d_act_out)->Name(); // Y@GRAD std::string act_out_n = act_out->Name(); // Y - std::string d_itermediate_out_n = d_itermediate_out->Name(); + std::string d_intermediate_out_n = d_intermediate_out->Name(); std::string bn_x_n = bn_x->Name(); std::string bn_scale_n = bn_scale->Name(); std::string bn_bias_n = bn_bias->Name(); @@ -266,14 +266,14 @@ ir::Graph *FuseBatchNormActPass::FuseBatchNormActGrad( auto fused_node = g->CreateOpNode(&desc); VLOG(4) << "\n\t " << d_act_out_n << " and " << act_out_n << " -> " - << act_grad->Name() << " -> " << d_itermediate_out_n << "\n\t " - << bn_x_n << ", " << d_itermediate_out_n << ", " << bn_scale_n + << act_grad->Name() << " -> " << d_intermediate_out_n << "\n\t " + << bn_x_n << ", " << d_intermediate_out_n << ", " << bn_scale_n << ", " << bn_bias_n << ", " << bn_saved_mean_n << ", " << bn_saved_variance_n << " and " << bn_reserve_space_n << " -> " << batch_norm_grad->Name() << " -> " << d_bn_x_n << ", " << d_bn_scale_n << " and " << d_bn_bias_n; - ReLinkNodes(g, d_itermediate_out, act_grad, batch_norm_grad, fused_node); + ReLinkNodes(g, d_intermediate_out, act_grad, batch_norm_grad, fused_node); found_bn_act_count++; }; diff --git a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc index 01844d6e0473b5..8a65f40a9227b9 100644 --- a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc +++ b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc @@ -165,7 +165,7 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad( GET_IR_NODE_FROM_SUBGRAPH(act_out, act_out, elewise_add_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH(act_grad, act_grad, elewise_add_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH( - d_itermediate_out, d_itermediate_out, elewise_add_act_grad_pattern); + d_intermediate_out, d_intermediate_out, elewise_add_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH(ele_y, ele_y, elewise_add_act_grad_pattern); GET_IR_NODE_FROM_SUBGRAPH( ele_add_grad, ele_add_grad, elewise_add_act_grad_pattern); @@ -174,7 +174,7 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad( std::string d_act_out_n = subgraph.at(d_act_out)->Name(); std::string act_out_n = act_out->Name(); - std::string d_itermediate_out_n = d_itermediate_out->Name(); + std::string d_intermediate_out_n = d_intermediate_out->Name(); std::string ele_y_n = ele_y->Name(); std::string d_ele_x_n = d_ele_x->Name(); std::string d_ele_y_n = d_ele_y->Name(); @@ -189,7 +189,7 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad( desc.SetOutput(GradVarName("X"), std::vector({d_ele_x_n})); desc.SetOutput(GradVarName("Y"), std::vector({d_ele_y_n})); desc.SetOutput(GradVarName("IntermediateOut"), - std::vector({d_itermediate_out_n})); + std::vector({d_intermediate_out_n})); desc.SetAttr("axis", -1); desc.SetAttr("scale", 0.0f); @@ -207,11 +207,11 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad( auto fused_node = g->CreateOpNode(&desc); VLOG(4) << "\n\t " << d_act_out_n << " and " << act_out_n << " -> " - << act_grad->Name() << " -> " << d_itermediate_out_n << "\n\t " - << d_itermediate_out_n << " and " << act_out_n << " -> " - << ele_add_grad->Name() << " -> " << d_itermediate_out_n; + << act_grad->Name() << " -> " << d_intermediate_out_n << "\n\t " + << d_intermediate_out_n << " and " << act_out_n << " -> " + << ele_add_grad->Name() << " -> " << d_intermediate_out_n; - ReLinkNodes(g, d_itermediate_out, act_grad, ele_add_grad, fused_node); + ReLinkNodes(g, d_intermediate_out, act_grad, ele_add_grad, fused_node); found_elewise_add_act_count++; }; diff --git a/paddle/fluid/framework/ir/fusion_group/subgraph.h b/paddle/fluid/framework/ir/fusion_group/subgraph.h index 8522275c143370..864ed87de011bb 100644 --- a/paddle/fluid/framework/ir/fusion_group/subgraph.h +++ b/paddle/fluid/framework/ir/fusion_group/subgraph.h @@ -148,7 +148,7 @@ class SubGraph { for (auto* n : SortedNodes()) { if (IsOutputOfInternalOp(n) && IsInputOfInternalOp(n) && !IsInputOfExternalOp(n)) { - // When the outputs size is 0, it is also considered a intermidiate + // When the outputs size is 0, it is also considered a intermediate // output. It maybe an unused output or the fetching vars, so that we // cannot eliminate it directly here. intermediate_out_vars.push_back(n); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 5bd167e003f7df..89f26450c736fe 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1365,7 +1365,7 @@ PDNode *patterns::BatchNormActGrad::operator()( auto *act_out_var = pattern->NewNode(act_out_repr()) ->assert_is_ops_input(act_grad_types, "Out"); auto *d_intermediate_var = - pattern->NewNode(d_itermediate_out_repr()) + pattern->NewNode(d_intermediate_out_repr()) ->assert_is_ops_output(act_grad_types, GradVarName("X")) ->assert_has_n_outputs(1); auto *bn_x_var = pattern->NewNode(bn_x_repr()) @@ -1600,7 +1600,7 @@ PDNode *patterns::ElewiseAddActInplaceGrad::operator()( pattern->NewNode(act_out_repr())->assert_is_ops_input(act_types, "Out"); auto *d_intermediate_var = - pattern->NewNode(d_itermediate_out_repr()) + pattern->NewNode(d_intermediate_out_repr()) ->assert_is_ops_output(act_types, GradVarName("X")); act_grad->LinksFrom({d_act_out_var, act_out_var}) diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 0fd7dd842776a9..ac3fa65ed7bf0c 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -776,7 +776,7 @@ struct BatchNormActGrad : public PatternBase { PATTERN_DECL_NODE(batch_norm_grad); // declare variable node's name PATTERN_DECL_NODE(act_out); - PATTERN_DECL_NODE(d_itermediate_out); + PATTERN_DECL_NODE(d_intermediate_out); PATTERN_DECL_NODE(bn_x); PATTERN_DECL_NODE(bn_scale); PATTERN_DECL_NODE(bn_bias); @@ -918,7 +918,7 @@ struct ActElewiseAdd : public PatternBase { // the act is inplace. // op: elementwise_add_grad + act_grad // named nodes: elementwise_add_grad, act_grad -// act_out, act_out_g, ele_y, d_itermediate_out, d_ele_x, d_ele_y +// act_out, act_out_g, ele_y, d_intermediate_out, d_ele_x, d_ele_y struct ElewiseAddActInplaceGrad : public PatternBase { ElewiseAddActInplaceGrad(PDPattern* pattern, const std::string& name_scope) : PatternBase(pattern, name_scope, "elewise_add_act_grad1") {} @@ -932,7 +932,7 @@ struct ElewiseAddActInplaceGrad : public PatternBase { PATTERN_DECL_NODE(ele_add_grad); // declare variable node's name PATTERN_DECL_NODE(act_out); - PATTERN_DECL_NODE(d_itermediate_out); + PATTERN_DECL_NODE(d_intermediate_out); PATTERN_DECL_NODE(d_ele_x); PATTERN_DECL_NODE(d_ele_y); PATTERN_DECL_NODE(ele_y); diff --git a/paddle/fluid/framework/ir/transfer_layout_pass.cc b/paddle/fluid/framework/ir/transfer_layout_pass.cc index 868635c06ee500..1ca0f37ddb196a 100644 --- a/paddle/fluid/framework/ir/transfer_layout_pass.cc +++ b/paddle/fluid/framework/ir/transfer_layout_pass.cc @@ -304,7 +304,7 @@ void TransferLayoutPass::ApplyImpl(ir::Graph *graph) const { vars_shape_nhwc.insert(out_var_node); } - // Insert transfer_layout for intermidiate var. + // Insert transfer_layout for intermediate var. auto op_inputs = op_node->inputs; for (auto *in_var_node : op_inputs) { PADDLE_ENFORCE_EQ(in_var_node->IsVar(), diff --git a/paddle/fluid/pybind/eager_generator.cc b/paddle/fluid/pybind/eager_generator.cc index aaff35058ad8e1..77597031fdb0ee 100644 --- a/paddle/fluid/pybind/eager_generator.cc +++ b/paddle/fluid/pybind/eager_generator.cc @@ -2234,8 +2234,8 @@ static std::string GenerateSingleOpBase( bwd_inplace_input_name, struct_fwd_input_name); const char* GRAD_INS_FWD_TENSOR_TEMPLATE = - "(&this->%s)->get_intermidiate_tensor()"; - std::string tensor_wrapper_intermidiate_tensor_str = + "(&this->%s)->get_intermediate_tensor()"; + std::string tensor_wrapper_intermediate_tensor_str = paddle::string::Sprintf(GRAD_INS_FWD_TENSOR_TEMPLATE, struct_fwd_input_name); generated_grad_function_body += @@ -2249,7 +2249,7 @@ static std::string GenerateSingleOpBase( bwd_inplace_input_name, bwd_inplace_input_name, bwd_inplace_input_name, - tensor_wrapper_intermidiate_tensor_str, + tensor_wrapper_intermediate_tensor_str, can_be_inplaced_name); } } else if (grad_ins_grad_slotname_map.count(grad_input_name)) { diff --git a/paddle/phi/infermeta/spmd_rules/squared_l2_norm.cc b/paddle/phi/infermeta/spmd_rules/squared_l2_norm.cc index 690e8790580ed4..8dea5add592bc1 100644 --- a/paddle/phi/infermeta/spmd_rules/squared_l2_norm.cc +++ b/paddle/phi/infermeta/spmd_rules/squared_l2_norm.cc @@ -29,7 +29,7 @@ using phi::distributed::auto_parallel::str_join; SpmdInfo SquaredL2NormInferSpmd(const DistMetaTensor& x) { VLOG(4) << "SquaredL2NormInferSpmd:"; - VLOG(4) << "Using ReductionInferSpmd Rule as interal implement."; + VLOG(4) << "Using ReductionInferSpmd Rule as internal implement."; SpmdInfo info = ReductionInferSpmdBase( x, {}, false, static_cast(ReduceType::kRedSum)); // NOTE: reduce output is 0D tensor which has a dims_mapping as {}, while diff --git a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/epilogue/epilogue_rescale_output.h b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/epilogue/epilogue_rescale_output.h index 0f2cc92a23b1e4..7ec84d2089b721 100644 --- a/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/epilogue/epilogue_rescale_output.h +++ b/paddle/phi/kernels/fusion/cutlass/memory_efficient_attention/epilogue/epilogue_rescale_output.h @@ -134,7 +134,7 @@ class MemoryEfficientAttentionNormalize { FragmentSource const& source) const { assert(!isFirst); - // Convert source to interal compute numeric type + // Convert source to internal compute numeric type NumericArrayConverter source_converter; NumericArrayConverter @@ -170,7 +170,7 @@ class MemoryEfficientAttentionNormalize { FragmentAccumulator const& accumulator) const { assert(isFirst); - // Convert source to interal compute numeric type + // Convert source to internal compute numeric type NumericArrayConverter accumulator_converter; diff --git a/paddle/phi/kernels/fusion/xpu/fused_linear_param_grad_add_kernel.cc b/paddle/phi/kernels/fusion/xpu/fused_linear_param_grad_add_kernel.cc index be733265f49a9a..af752ef0383313 100644 --- a/paddle/phi/kernels/fusion/xpu/fused_linear_param_grad_add_kernel.cc +++ b/paddle/phi/kernels/fusion/xpu/fused_linear_param_grad_add_kernel.cc @@ -177,12 +177,12 @@ void FusedLinearParamGradAdd(const Context &ctx, PADDLE_ENFORCE_EQ( dweight_out->dtype(), phi::CppTypeToDataType::Type(), - common::errors::InvalidArgument("Invaid data type error.")); + common::errors::InvalidArgument("Invalid data type error.")); } else { PADDLE_ENFORCE_EQ( dweight_out->dtype(), phi::CppTypeToDataType::Type(), - common::errors::InvalidArgument("Invaid data type error.")); + common::errors::InvalidArgument("Invalid data type error.")); } } else { if (multi_precision) { @@ -200,12 +200,12 @@ void FusedLinearParamGradAdd(const Context &ctx, PADDLE_ENFORCE_EQ( dbias_out->dtype(), phi::CppTypeToDataType::Type(), - common::errors::InvalidArgument("Invaid data type error.")); + common::errors::InvalidArgument("Invalid data type error.")); } else { PADDLE_ENFORCE_EQ( dbias_out->dtype(), phi::CppTypeToDataType::Type(), - common::errors::InvalidArgument("Invaid data type error.")); + common::errors::InvalidArgument("Invalid data type error.")); } } else { if (multi_precision) { diff --git a/python/paddle/distributed/fleet/fleet_executor_utils.py b/python/paddle/distributed/fleet/fleet_executor_utils.py index 2c1b288f9c1805..05e50a076376c9 100755 --- a/python/paddle/distributed/fleet/fleet_executor_utils.py +++ b/python/paddle/distributed/fleet/fleet_executor_utils.py @@ -171,7 +171,7 @@ def __init__(self, dist_opt): self.sharding_degree = dist_opt.get('sharding_degree', 1) self.mp_degree = dist_opt.get('mp_degree', 1) - def _invalide_coord(self, coord): + def _invalid_coord(self, coord): """ Test the input coord is valid or not. :param coord: The coord to be tested @@ -194,7 +194,7 @@ def coord_to_rank(self, coord): :param coord: The coord to be converted :return: The rank corresponding with the coord """ - if self._invalide_coord(coord): + if self._invalid_coord(coord): return -1 return int( coord['dp_idx'] diff --git a/python/paddle/incubate/jit/inference_decorator.py b/python/paddle/incubate/jit/inference_decorator.py index db628bd6001480..a162489a971b81 100644 --- a/python/paddle/incubate/jit/inference_decorator.py +++ b/python/paddle/incubate/jit/inference_decorator.py @@ -640,7 +640,7 @@ def decorator(func=None): ) # This is the innermost_decorator, ie. when user invoke the function decorated by @paddle.incubate.jit.inference() - # he is actually invoke this internel function. + # he is actually invoke this internal function. def innermost_decorator(*args, **kwargs): input_tensor_lists = infer_engine.get_input_tensor_lists( *args, **kwargs diff --git a/python/paddle/io/dataloader/dataloader_iter.py b/python/paddle/io/dataloader/dataloader_iter.py index 63559eb6467a97..836c0b40224c6f 100644 --- a/python/paddle/io/dataloader/dataloader_iter.py +++ b/python/paddle/io/dataloader/dataloader_iter.py @@ -739,7 +739,7 @@ def _get_data(self): if self._dataset_kind == _DatasetKind.ITER and isinstance( data, _IterableDatasetStopIteration ): - # if a worker get StopIteraion, we shutdown this worker, + # if a worker get StopIteration, we shutdown this worker, # note that this batch indices to trigger StopIteration # is discard, outstanding batch number should be decrease # and another indices should be put for other workers diff --git a/python/paddle/jit/sot/opcode_translator/executor/opcode_executor.py b/python/paddle/jit/sot/opcode_translator/executor/opcode_executor.py index d3f94b0107533b..ce46af1953a013 100644 --- a/python/paddle/jit/sot/opcode_translator/executor/opcode_executor.py +++ b/python/paddle/jit/sot/opcode_translator/executor/opcode_executor.py @@ -1352,7 +1352,7 @@ def TO_BOOL(self, instr: Instruction): next_instr = self._instructions[self._lasti] assert ( next_instr.opname in NEED_TO_BOOL - ), f"The bytecode is illegal! The opcode following TO_BOOL must be in ['POP_JUMP_IF_TRUE', 'POP_JUMP_IF_FALSE', 'UNARY_NOT'], the next instuction now is {next_instr.opname}" + ), f"The bytecode is illegal! The opcode following TO_BOOL must be in ['POP_JUMP_IF_TRUE', 'POP_JUMP_IF_FALSE', 'UNARY_NOT'], the next instruction now is {next_instr.opname}" @call_break_graph_decorator(push_n=1) def IS_OP(self, instr: Instruction): diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 575c461f54d914..cdd734c42a07e0 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -1917,7 +1917,7 @@ def ctc_loss( An operator integrating the open source Warp-CTC library (https://github.com/baidu-research/warp-ctc) to compute Connectionist Temporal Classification (CTC) loss. It can be aliased as softmax with CTC, since a native softmax activation - is interated to the Warp-CTC library to normalize values for each row of the input tensor. + is integrated to the Warp-CTC library to normalize values for each row of the input tensor. Parameters: log_probs (Tensor): The unscaled probability sequence with padding, which is a 3-D Tensor. The tensor shape is [max_logit_length, batch_size, num_classes + 1], where max_logit_length is the longest length of input logit sequence. The data type should be float32 or float64. diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 94e647293ddce2..e61a81bcc650ae 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -1948,11 +1948,11 @@ class Fold(Layer): Parameters: output_sizes(list): The size of output size, should be [output_size_h, output_size_w] - or an interger o treated as [o, o]. + or an integer o treated as [o, o]. kernel_sizes(int|list|tuple): The size of convolution kernel, should be [k_h, k_w] or an integer k treated as [k, k]. strides(int|list|tuple, optional): The strides, should be [stride_h, stride_w] - or an integer stride treated as [sride, stride]. + or an integer stride treated as [stride, stride]. For default, strides will be [1, 1]. paddings(int|list|tuple, optional): The paddings of each dimension, should be [padding_top, padding_left, padding_bottom, padding_right] diff --git a/python/paddle/nn/utils/clip_grad_norm_.py b/python/paddle/nn/utils/clip_grad_norm_.py index b14192ac496f44..3026aa946912bf 100644 --- a/python/paddle/nn/utils/clip_grad_norm_.py +++ b/python/paddle/nn/utils/clip_grad_norm_.py @@ -33,7 +33,7 @@ def clip_grad_norm_( norm_type: float = 2.0, error_if_nonfinite: bool = False, ) -> Tensor: - r"""Clips gradient norm of the iteratable parameters. + r"""Clips gradient norm of the iterable parameters. Norms are calculated together on all gradients, just as they are connected into one vector. The gradient will be modified in place. diff --git a/test/cpp/inference/api/analyzer_lac_tester.cc b/test/cpp/inference/api/analyzer_lac_tester.cc index 1871a41bfbf514..f5c0868a934295 100644 --- a/test/cpp/inference/api/analyzer_lac_tester.cc +++ b/test/cpp/inference/api/analyzer_lac_tester.cc @@ -103,7 +103,7 @@ void GetOneBatch(std::vector *input_slots, PADDLE_ENFORCE_EQ( batch_size, static_cast(one_batch.lod.size() - 1), - ::common::errors::Fatal("The lod size of one batch is invaild.")); + ::common::errors::Fatal("The lod size of one batch is invalid.")); input_slots->assign({input_tensor}); } @@ -157,7 +157,7 @@ TEST(Analyzer_LAC, profile) { size_t batch1_size = sizeof(lac_ref_data) / sizeof(int64_t); PADDLE_ENFORCE_GE(size, batch1_size, - ::common::errors::Fatal("The size of batch is invaild.")); + ::common::errors::Fatal("The size of batch is invalid.")); int64_t *pdata = static_cast(output[0].data.data()); for (size_t i = 0; i < batch1_size; ++i) { EXPECT_EQ(pdata[i], lac_ref_data[i]); diff --git a/test/cpp/inference/api/analyzer_seq_conv1_tester.cc b/test/cpp/inference/api/analyzer_seq_conv1_tester.cc index 2c7fe3395f74af..268d17b89677db 100644 --- a/test/cpp/inference/api/analyzer_seq_conv1_tester.cc +++ b/test/cpp/inference/api/analyzer_seq_conv1_tester.cc @@ -68,7 +68,7 @@ struct DataRecord { split(line, '\t', &data); PADDLE_ENFORCE_GT(data.size(), 4, - common::errors::Fatal("The size of data is invaild.")); + common::errors::Fatal("The size of data is invalid.")); // load title1 data std::vector title1_data; split_to_int64(data[0], ' ', &title1_data); diff --git a/tools/enforce/grep_invalid_enforce.sh b/tools/enforce/grep_invalid_enforce.sh index 9e653c6b90fcc4..57deb7338f7084 100644 --- a/tools/enforce/grep_invalid_enforce.sh +++ b/tools/enforce/grep_invalid_enforce.sh @@ -43,7 +43,7 @@ # PADDLE_ENFORCE_EQ(addr.size(), 2UL, # "The endpoint should contain host and port: %s", ep); # PADDLE_THROW("create socket failed"); -# PADDLE_THROW("invalied address: %s", ep); +# PADDLE_THROW("invalid address: %s", ep); # - paddle/fluid/imperative/jit/program_desc_tracer.cc # PADDLE_ENFORCE_NOT_NULL(new_var);