Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][I-[32-42]] Fix typos(INSTUCTION,Instrution,interger,itermediate,intermidiate,interal,internel,instrinsics,intristic,invalied,invalide,Invaid,invaild,iteratable,interated,Iteraion) #70572

Merged
merged 6 commits into from
Jan 6, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 3 additions & 17 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ extend-exclude = [
"third_party",
"patches",
"build",
# Skip `intermidiate` check in these files
"test/cpp/eager/task_tests/CMakeLists.txt",
SigureMo marked this conversation as resolved.
Show resolved Hide resolved
"test/cpp/eager/task_tests/hook_test_intermidiate.cc",
]

[default]
Expand Down Expand Up @@ -122,23 +125,6 @@ insid = 'insid'
insepection = 'insepection'
intall = 'intall'
instanciate = 'instanciate'
Instrution = 'Instrution'
INSTUCTION = 'INSTUCTION'
instuction = 'instuction'
interger = 'interger'
intermidiate = 'intermidiate'
itermediate = 'itermediate'
interal = 'interal'
internel = 'internel'
instrinsics = 'instrinsics'
intristic = 'intristic'
invalied = 'invalied'
Invaid = 'Invaid'
invaild = 'invaild'
invalide = 'invalide'
iteratable = 'iteratable'
interated = 'interated'
Iteraion = 'Iteraion'
occured = 'occured'
Ocurred = 'Ocurred'
occures = 'occures'
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/graph_compiler_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ enum class CompilationStatus {
// An error occurred during codegen and jit.
CODEGEN_JIT_FAIL = 2,
// An error occurred during build instruction.
INSTUCTION_FAIL = 3,
INSTRUCTION_FAIL = 3,
// An error occurred during build runtime program.
PROGRAM_FAIL = 4,
// Compile successfully.
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/lowered_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ struct _LoweredFunc_ : public IrNode {
* The output buffer will be resized to the size required, we leave all the
* expression here. The allocation and deallocation expressions will insert
* into the head and tail of the function's body. It supports lazy
* allocation/deallocation if the corresponding intristic methods support.
* allocation/deallocation if the corresponding intrinsic methods support.
*
* Currently, we assume that all the input and output buffers should locate in
* heap, no other memory type is allowed.
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/operator_fusion/fusion_tracker/interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ std::vector<ir::Expr> FusionInterpreter::Run() {
break;
default:
PADDLE_THROW(
::common::errors::Unavailable("Unsupported Fusion Instrution"));
::common::errors::Unavailable("Unsupported Fusion Instruction"));
}
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/runtime/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ gather_srcs(
cuda_util.cc
cuda_intrinsics.cc
cuda_intrinsics_reduce.cc
cuda_instrinsics_float16.cc
cuda_instrinsics_bfloat16.cc)
cuda_intrinsics_float16.cc
cuda_intrinsics_bfloat16.cc)

cinn_nv_test(test_cuda_module SRCS cuda_module_test.cc DEPS cinncore)
cinn_nv_library(cuda_runtime SRCS cinn_cuda_runtime_source.cuh)
6 changes: 3 additions & 3 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -2576,8 +2576,8 @@ def GenerateNodeDefinition(
optional_inplace_var_name.append(
transformed_tensor_name
)
tensor_wrapper_intermidiate_tensor_str = (
f"(&this->{tensor_wrapper_name})->get_intermidiate_tensor()"
tensor_wrapper_intermediate_tensor_str = (
f"(&this->{tensor_wrapper_name})->get_intermediate_tensor()"
)
inplace_check_str += CHECK_BACKWARD_INPLACE_TEMPLATE.format(
transformed_tensor_name,
Expand All @@ -2587,7 +2587,7 @@ def GenerateNodeDefinition(
transformed_tensor_name,
transformed_tensor_name,
transformed_tensor_name,
tensor_wrapper_intermidiate_tensor_str,
tensor_wrapper_intermediate_tensor_str,
transformed_tensor_name,
transformed_tensor_name,
transformed_tensor_name,
Expand Down
60 changes: 30 additions & 30 deletions paddle/fluid/eager/tensor_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class TensorWrapper {
static_cast<phi::DenseTensor*>(tensor.impl().get());
// TODO(jiabin): It's not a good idea to set memory size to zero, find
// another way and change this.
intermidiate_tensor_.set_impl(std::make_shared<phi::DenseTensor>(
intermediate_tensor_.set_impl(std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
dense_tensor->meta()));
} else if (phi::distributed::DistTensor::classof(tensor.impl().get())) {
Expand All @@ -84,7 +84,7 @@ class TensorWrapper {
*no_buffer_dist_tensor->unsafe_mutable_value() = phi::DenseTensor(
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
dist_tensor->value().meta());
intermidiate_tensor_.set_impl(no_buffer_dist_tensor);
intermediate_tensor_.set_impl(no_buffer_dist_tensor);
} else {
PADDLE_THROW(common::errors::Fatal(
"Unrecognized tensor type for no_need_buffer feature"));
Expand All @@ -95,15 +95,15 @@ class TensorWrapper {
tensor.is_dense_tensor() && tensor.initialized()) {
phi::DenseTensor* dense_tensor =
static_cast<phi::DenseTensor*>(tensor.impl().get());
intermidiate_tensor_.set_impl(std::make_shared<phi::DenseTensor>(
intermediate_tensor_.set_impl(std::make_shared<phi::DenseTensor>(
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
dense_tensor->meta()));
auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook();
unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook();
packed_value_ = (*pack_hook)(tensor);
} else if (egr::SavedTensorsHooks::GetInstance().IsEnable() &&
tensor.is_dist_tensor() && tensor.initialized()) {
intermidiate_tensor_.set_impl(
intermediate_tensor_.set_impl(
std::make_shared<phi::distributed::DistTensor>(
tensor.dims(),
static_cast<phi::distributed::DistTensor*>(tensor.impl().get())
Expand All @@ -115,37 +115,37 @@ class TensorWrapper {
std::make_shared<phi::Allocation>(nullptr, 0, tensor.place()),
dense_tensor.meta());
*(static_cast<phi::distributed::DistTensor*>(
intermidiate_tensor_.impl().get())
intermediate_tensor_.impl().get())
->unsafe_mutable_value()) = tmp;
auto pack_hook = egr::SavedTensorsHooks::GetInstance().GetPackHook();
unpack_hook_ = egr::SavedTensorsHooks::GetInstance().GetUnPackHook();
packed_value_ = (*pack_hook)(tensor);
} else {
#endif
intermidiate_tensor_.set_impl(tensor.impl());
intermediate_tensor_.set_impl(tensor.impl());
#ifndef PADDLE_NO_PYTHON
}
#endif
}

if (VLOG_IS_ON(7)) {
// TODO(jiabin): This may has server performance issue
intermidiate_tensor_.set_name(tensor.name() + "@Saved");
intermediate_tensor_.set_name(tensor.name() + "@Saved");
}

if (tensor_autograd_meta) {
auto autograd_meta =
std::make_shared<AutogradMeta>(*tensor_autograd_meta);
autograd_meta->ResetGradNode();
intermidiate_tensor_.set_autograd_meta(autograd_meta);
intermediate_tensor_.set_autograd_meta(autograd_meta);
weak_grad_node_ = tensor_autograd_meta->GetMutableGradNode();
}
}

paddle::Tensor recover() {
VLOG(6) << "Recover tensor: " << intermidiate_tensor_.name()
VLOG(6) << "Recover tensor: " << intermediate_tensor_.name()
<< " for wrapper";
if (!intermidiate_tensor_.defined()) {
if (!intermediate_tensor_.defined()) {
VLOG(6) << "Return NULL tensor Here. ";
return paddle::Tensor();
}
Expand All @@ -168,19 +168,19 @@ class TensorWrapper {
"for egr::TensorWrapper::recover"));
}

if (intermidiate_tensor_.is_dense_tensor()) {
VLOG(6) << "intermidiate_tensor_ is DenseTensor";
static_cast<phi::DenseTensor*>(intermidiate_tensor_.impl().get())
if (intermediate_tensor_.is_dense_tensor()) {
VLOG(6) << "intermediate_tensor_ is DenseTensor";
static_cast<phi::DenseTensor*>(intermediate_tensor_.impl().get())
->ResetHolder(src_dense_tensor->Holder());
} else if (intermidiate_tensor_.is_dist_tensor()) {
VLOG(6) << "intermidiate_tensor_ is DistTensor";
} else if (intermediate_tensor_.is_dist_tensor()) {
VLOG(6) << "intermediate_tensor_ is DistTensor";
static_cast<phi::distributed::DistTensor*>(
intermidiate_tensor_.impl().get())
intermediate_tensor_.impl().get())
->unsafe_mutable_value()
->ResetHolder(src_dense_tensor->Holder());
} else {
PADDLE_THROW(
common::errors::Fatal("Unrecognized intermidiate_tensor_ type for "
common::errors::Fatal("Unrecognized intermediate_tensor_ type for "
"egr::TensorWrapper::recover"));
}
} else {
Expand All @@ -190,7 +190,7 @@ class TensorWrapper {
}
#endif

paddle::Tensor recovered_tensor = intermidiate_tensor_;
paddle::Tensor recovered_tensor = intermediate_tensor_;

std::shared_ptr<GradNodeBase> new_grad_node = weak_grad_node_.lock();
if (new_grad_node) {
Expand All @@ -200,7 +200,7 @@ class TensorWrapper {
VLOG(7) << "Recovered TensorWrapper with Empty GradNode";
}
auto* intermediate_autograd_meta =
EagerUtils::nullable_autograd_meta(intermidiate_tensor_);
EagerUtils::nullable_autograd_meta(intermediate_tensor_);

if (intermediate_autograd_meta) {
auto p_ab_autograd_meta =
Expand All @@ -214,9 +214,9 @@ class TensorWrapper {
return recovered_tensor;
}

paddle::Tensor get_intermidiate_tensor() { return intermidiate_tensor_; }
paddle::Tensor get_intermediate_tensor() { return intermediate_tensor_; }

void clear() { intermidiate_tensor_.reset(); }
void clear() { intermediate_tensor_.reset(); }

private:
void check_inplace_version() {
Expand All @@ -225,15 +225,15 @@ class TensorWrapper {
"no_need_buffer_ is true.";
return;
}
if (intermidiate_tensor_.impl()) {
if (intermediate_tensor_.impl()) {
phi::DenseTensor* dense_tensor = nullptr;
if (phi::DenseTensor::classof(intermidiate_tensor_.impl().get())) {
if (phi::DenseTensor::classof(intermediate_tensor_.impl().get())) {
dense_tensor =
static_cast<phi::DenseTensor*>(intermidiate_tensor_.impl().get());
static_cast<phi::DenseTensor*>(intermediate_tensor_.impl().get());
} else if (phi::distributed::DistTensor::classof(
intermidiate_tensor_.impl().get())) {
intermediate_tensor_.impl().get())) {
dense_tensor = static_cast<phi::distributed::DistTensor*>(
intermidiate_tensor_.impl().get())
intermediate_tensor_.impl().get())
->unsafe_mutable_value();
} else {
return;
Expand All @@ -253,21 +253,21 @@ class TensorWrapper {
"Please fix your code to void calling an inplace operator "
"after using the Tensor which will used in gradient "
"computation.",
intermidiate_tensor_.name(),
intermediate_tensor_.name(),
tensor_version,
wrapper_version_snapshot));
VLOG(7) << " The wrapper_version_snapshot of Tensor '"
<< intermidiate_tensor_.name() << "' is [ "
<< intermediate_tensor_.name() << "' is [ "
<< wrapper_version_snapshot << " ]";
VLOG(7) << " The tensor_version of Tensor '"
<< intermidiate_tensor_.name() << "' is [ " << tensor_version
<< intermediate_tensor_.name() << "' is [ " << tensor_version
<< " ]";
}
}

private:
bool no_need_buffer_ = false;
paddle::Tensor intermidiate_tensor_;
paddle::Tensor intermediate_tensor_;
std::weak_ptr<egr::GradNodeBase> weak_grad_node_;
uint32_t inplace_version_snapshot_ = 0;
#ifndef PADDLE_NO_PYTHON
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/ir/fuse_bn_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ ir::Graph *FuseBatchNormActPass::FuseBatchNormActGrad(
batch_norm_grad, batch_norm_grad, bn_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(act_out, act_out, bn_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(
d_itermediate_out, d_itermediate_out, bn_act_grad_pattern);
d_intermediate_out, d_intermediate_out, bn_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(bn_x, bn_x, bn_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(bn_scale, bn_scale, bn_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(bn_bias, bn_bias, bn_act_grad_pattern);
Expand All @@ -225,7 +225,7 @@ ir::Graph *FuseBatchNormActPass::FuseBatchNormActGrad(

std::string d_act_out_n = subgraph.at(d_act_out)->Name(); // Y@GRAD
std::string act_out_n = act_out->Name(); // Y
std::string d_itermediate_out_n = d_itermediate_out->Name();
std::string d_intermediate_out_n = d_intermediate_out->Name();
std::string bn_x_n = bn_x->Name();
std::string bn_scale_n = bn_scale->Name();
std::string bn_bias_n = bn_bias->Name();
Expand Down Expand Up @@ -266,14 +266,14 @@ ir::Graph *FuseBatchNormActPass::FuseBatchNormActGrad(
auto fused_node = g->CreateOpNode(&desc);

VLOG(4) << "\n\t " << d_act_out_n << " and " << act_out_n << " -> "
<< act_grad->Name() << " -> " << d_itermediate_out_n << "\n\t "
<< bn_x_n << ", " << d_itermediate_out_n << ", " << bn_scale_n
<< act_grad->Name() << " -> " << d_intermediate_out_n << "\n\t "
<< bn_x_n << ", " << d_intermediate_out_n << ", " << bn_scale_n
<< ", " << bn_bias_n << ", " << bn_saved_mean_n << ", "
<< bn_saved_variance_n << " and " << bn_reserve_space_n << " -> "
<< batch_norm_grad->Name() << " -> " << d_bn_x_n << ", "
<< d_bn_scale_n << " and " << d_bn_bias_n;

ReLinkNodes(g, d_itermediate_out, act_grad, batch_norm_grad, fused_node);
ReLinkNodes(g, d_intermediate_out, act_grad, batch_norm_grad, fused_node);
found_bn_act_count++;
};

Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad(
GET_IR_NODE_FROM_SUBGRAPH(act_out, act_out, elewise_add_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(act_grad, act_grad, elewise_add_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(
d_itermediate_out, d_itermediate_out, elewise_add_act_grad_pattern);
d_intermediate_out, d_intermediate_out, elewise_add_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(ele_y, ele_y, elewise_add_act_grad_pattern);
GET_IR_NODE_FROM_SUBGRAPH(
ele_add_grad, ele_add_grad, elewise_add_act_grad_pattern);
Expand All @@ -174,7 +174,7 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad(

std::string d_act_out_n = subgraph.at(d_act_out)->Name();
std::string act_out_n = act_out->Name();
std::string d_itermediate_out_n = d_itermediate_out->Name();
std::string d_intermediate_out_n = d_intermediate_out->Name();
std::string ele_y_n = ele_y->Name();
std::string d_ele_x_n = d_ele_x->Name();
std::string d_ele_y_n = d_ele_y->Name();
Expand All @@ -189,7 +189,7 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad(
desc.SetOutput(GradVarName("X"), std::vector<std::string>({d_ele_x_n}));
desc.SetOutput(GradVarName("Y"), std::vector<std::string>({d_ele_y_n}));
desc.SetOutput(GradVarName("IntermediateOut"),
std::vector<std::string>({d_itermediate_out_n}));
std::vector<std::string>({d_intermediate_out_n}));

desc.SetAttr("axis", -1);
desc.SetAttr("scale", 0.0f);
Expand All @@ -207,11 +207,11 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad(
auto fused_node = g->CreateOpNode(&desc);

VLOG(4) << "\n\t " << d_act_out_n << " and " << act_out_n << " -> "
<< act_grad->Name() << " -> " << d_itermediate_out_n << "\n\t "
<< d_itermediate_out_n << " and " << act_out_n << " -> "
<< ele_add_grad->Name() << " -> " << d_itermediate_out_n;
<< act_grad->Name() << " -> " << d_intermediate_out_n << "\n\t "
<< d_intermediate_out_n << " and " << act_out_n << " -> "
<< ele_add_grad->Name() << " -> " << d_intermediate_out_n;

ReLinkNodes(g, d_itermediate_out, act_grad, ele_add_grad, fused_node);
ReLinkNodes(g, d_intermediate_out, act_grad, ele_add_grad, fused_node);
found_elewise_add_act_count++;
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/fusion_group/subgraph.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ class SubGraph {
for (auto* n : SortedNodes()) {
if (IsOutputOfInternalOp(n) && IsInputOfInternalOp(n) &&
!IsInputOfExternalOp(n)) {
// When the outputs size is 0, it is also considered a intermidiate
// When the outputs size is 0, it is also considered a intermediate
// output. It maybe an unused output or the fetching vars, so that we
// cannot eliminate it directly here.
intermediate_out_vars.push_back(n);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/graph_pattern_detector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1365,7 +1365,7 @@ PDNode *patterns::BatchNormActGrad::operator()(
auto *act_out_var = pattern->NewNode(act_out_repr())
->assert_is_ops_input(act_grad_types, "Out");
auto *d_intermediate_var =
pattern->NewNode(d_itermediate_out_repr())
pattern->NewNode(d_intermediate_out_repr())
->assert_is_ops_output(act_grad_types, GradVarName("X"))
->assert_has_n_outputs(1);
auto *bn_x_var = pattern->NewNode(bn_x_repr())
Expand Down Expand Up @@ -1600,7 +1600,7 @@ PDNode *patterns::ElewiseAddActInplaceGrad::operator()(
pattern->NewNode(act_out_repr())->assert_is_ops_input(act_types, "Out");

auto *d_intermediate_var =
pattern->NewNode(d_itermediate_out_repr())
pattern->NewNode(d_intermediate_out_repr())
->assert_is_ops_output(act_types, GradVarName("X"));

act_grad->LinksFrom({d_act_out_var, act_out_var})
Expand Down
Loading
Loading