Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIR]add build cinn pass constrain #59759

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 48 additions & 3 deletions paddle/fluid/pir/transforms/build_cinn_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,8 @@ std::string GetDebugInfo(const std::unordered_set<std::string>& names) {
return debug_info;
}

bool IsSupportCinn(pir::Operation* op);

// In case of op has some attributes generated by FullOp, it need
// implement OpPattern in pd_to_cinn_pass. Otherwise, we mark them
// as unimplement ops.
Expand All @@ -139,17 +141,57 @@ bool UnimplementOps(pir::Operation* op) {
// CINN
if (op->isa<paddle::dialect::FullOp>()) {
auto out = op->result(0);
if (out.use_count() > 0 &&
out.first_use().owner()->isa<paddle::dialect::UniformOp>()) {
return true;
if (out.use_count() > 0) {
return !IsSupportCinn(out.first_use().owner());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个意味着 Full 的输出如果传递给了一个CINN不支持的算子,则full本身单算子不会交给CINN来生成kernel?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

是的, 这个逻辑后面会跟build cinn pass 统一优化下

}

return false;
} else if (op->isa<paddle::dialect::DropoutOp>()) {
return true;
}
return false;
}

bool HaveZeroDimInput(pir::Operation* op) {
bool have_zero_dim = false;
for (size_t i = 0; i < op->num_operands(); ++i) {
auto in = op->operand_source(i);
if (in) {
if (auto tensor_type =
in.type().dyn_cast<paddle::dialect::DenseTensorType>()) {
if (tensor_type.dims().size() == 0) {
have_zero_dim = true;
}
}
}
}

return have_zero_dim;
}

bool AllInputDenseTensor(pir::Operation* op) {
bool all_denese_tensor = true;
for (size_t i = 0; i < op->num_operands(); ++i) {
auto in = op->operand_source(i);
if (in) {
if (!(in.type().isa<paddle::dialect::DenseTensorType>())) {
all_denese_tensor = false;
}
}
}

return all_denese_tensor;
}

bool IsSupportCinn(pir::Operation* op) {
if (!AllInputDenseTensor(op)) {
return false;
}

if (HaveZeroDimInput(op)) {
return false;
}

auto allow_ops = StringSplit(FLAGS_allow_cinn_ops, kDelim);
auto deny_ops = StringSplit(FLAGS_deny_cinn_ops, kDelim);
VLOG(4) << "The allowed Cinn Ops: " << GetDebugInfo(allow_ops);
Expand All @@ -162,6 +204,9 @@ bool IsSupportCinn(pir::Operation* op) {

// Strip the dialect, like pd_op.abs -> abs
const auto op_name = CompatibleInfo::OpName(*op);
if (op_name == "matmul") {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

类似这样的名单更推荐添加到 line 110行中的 OpTransInfo.default_deny_ops_里,统一管理

return false;
}
OpTransInfo trans_info;
bool is_support = CompatibleInfo::IsSupportCinn(*op) &&
!trans_info.default_deny_ops().count(op_name);
Expand Down