Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Browse files Browse the repository at this point in the history
… fix-anchor-fusion
  • Loading branch information
huangjiyi committed Aug 4, 2024
2 parents 930f061 + 037a293 commit 49bc25a
Show file tree
Hide file tree
Showing 870 changed files with 14,894 additions and 7,888 deletions.
2 changes: 1 addition & 1 deletion .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
<!-- Demo: https://github.com/PaddlePaddle/Paddle/pull/24810 -->

### PR Category
<!-- One of [ User Experience | Execute Infrastructure | Operator Mechanism | CINN | Custom Device | Performance Optimization | Distributed Strategy | Parameter Server | Communication Library | Auto Parallel | Inference | Environment Adaptation | Others ] -->
<!-- One of [ User Experience | Execute Infrastructure | Operator Mechanism | CINN | Custom Device | Performance Optimization | Distributed Strategy | Parameter Server | Communication Library | Auto Parallel | Inference | Environment Adaptation] -->


### PR Types
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/xpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ if(NOT DEFINED XPU_XRE_BASE_VERSION)
set(XPU_XRE_BASE_VERSION "4.32.0.1")
endif()
if(NOT DEFINED XPU_XHPC_BASE_DATE)
set(XPU_XHPC_BASE_DATE "20240712")
set(XPU_XHPC_BASE_DATE "20240730")
endif()
set(XPU_XCCL_BASE_VERSION "1.2.5")
if(NOT DEFINED XPU_XFT_BASE_VERSION)
Expand Down
35 changes: 27 additions & 8 deletions paddle/cinn/adt/generate_map_expr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,11 @@ hlir::framework::OpPatternKind GetOpPatternKind(const ::pir::Operation* node) {
bool CollectRewrittenReductionOpStmts(const OpStmt& op_stmt,
List<OpStmt>* ret) {
const auto& [op, inputs, outputs] = op_stmt.tuple();
CHECK(op.Has<const ::pir::Operation*>());
PADDLE_ENFORCE_EQ(
op.Has<const ::pir::Operation*>(),
true,
phi::errors::InvalidArgument(
"The op should have a value of type ::pir::Operation*"));
if (GetOpPatternKind(op.Get<const ::pir::Operation*>()) ==
hlir::framework::OpPatternKind::kReduction) {
tReduceInit<const ::pir::Operation*> init_op{
Expand Down Expand Up @@ -234,7 +238,10 @@ std::vector<std::shared_ptr<IGroup>> GenerateIGroups(
std::vector<std::shared_ptr<IGroup>> ret{};

List<OpStmt> op_stmts = MakeOpStmts(group);
CHECK(!op_stmts->empty());
PADDLE_ENFORCE_EQ(
!op_stmts->empty(),
true,
phi::errors::InvalidArgument("The op_stmts should not be empty"));

PartitionIGroupOpStmts(op_stmts, [&](const auto& igroup_spec) {
ret.push_back(MakeIGroup(igroup_spec));
Expand Down Expand Up @@ -271,9 +278,12 @@ std::unordered_map<Variable, const Value> MakeSdIterator2Iterator(
std::unordered_map<Variable, const Value> ret{};

for (std::size_t i = 0; i < igroup.loop_iterators()->size(); ++i) {
CHECK(ret.emplace(igroup.loop_iterators()->at(i),
igroup.loop_iterators()->at(i))
.second);
PADDLE_ENFORCE_EQ(
ret.emplace(igroup.loop_iterators()->at(i),
igroup.loop_iterators()->at(i))
.second,
true,
phi::errors::InvalidArgument("The loop iterator should be unique"));
}

return ret;
Expand Down Expand Up @@ -334,7 +344,10 @@ LoopDescriptor4IterVarT MakeGetterLoopDescriptor4IterVar(
using Cache = std::unordered_map<Iterator, LoopDescriptor>;
const auto& sd_iter2sd = std::make_shared<Cache>();
for (std::size_t i = 0; i < loop_iters->size(); ++i) {
CHECK(sd_iter2sd->emplace(loop_iters->at(i), sd->at(i)).second);
PADDLE_ENFORCE_EQ(
sd_iter2sd->emplace(loop_iters->at(i), sd->at(i)).second,
true,
phi::errors::InvalidArgument("The loop iterator should be unique"));
}
return [sd_iter2sd](const auto& sd_iter) { return sd_iter2sd->at(sd_iter); };
}
Expand All @@ -343,7 +356,10 @@ TreeMerger<Stmt> MakeTreeMerger(const MapIr& map_ir) {
using Cache = std::unordered_map<OpStmt, LoopIterators>;
auto cache = std::make_shared<Cache>();
for (const auto& op_stmt : *(map_ir.op_stmts())) {
CHECK(cache->emplace(op_stmt, map_ir.loop_iterators()).second);
PADDLE_ENFORCE_EQ(
cache->emplace(op_stmt, map_ir.loop_iterators()).second,
true,
phi::errors::InvalidArgument("The op_stmt should be unique"));
}

TreeMerger<Stmt> tree_merger{};
Expand All @@ -365,7 +381,10 @@ MapStmt<Stmt> MakeMapStmt(const MapIrList& map_irs) {
1UL,
::common::errors::InvalidArgument(
"The size of stmts should be 1, but got %d.", stmts->size()));
CHECK(stmts->at(0).Has<MapStmt<Stmt>>());
PADDLE_ENFORCE_EQ(stmts->at(0).Has<MapStmt<Stmt>>(),
true,
phi::errors::InvalidArgument(
"The stmts should have a value of type MapStmt<Stmt>"));
return stmts->at(0).Get<MapStmt<Stmt>>();
}

Expand Down
20 changes: 16 additions & 4 deletions paddle/cinn/adt/get_sub_reshape_dim_ranges.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,13 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
if (GetNumel(lhs_dims) != GetNumel(rhs_dims)) {
return std::nullopt;
}
CHECK(!lhs_dims->empty());
CHECK(!rhs_dims->empty());
PADDLE_ENFORCE_EQ(
!lhs_dims->empty(),
true,
phi::errors::InvalidArgument("Sorry,but lhs_dims is empty"));
PADDLE_ENFORCE_EQ(!rhs_dims->empty(),
true,
phi::errors::InvalidArgument("Sory,but rhs_dims is empty"));
std::vector<std::pair<int, int>> lhs_ranges{};
std::vector<std::pair<int, int>> rhs_ranges{};
int lhs_start = 0;
Expand All @@ -51,7 +56,10 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
end = (end > dims->size() ? dims->size() : end);
std::int64_t ret = 1;
for (std::size_t i = 0; i < end; ++i) {
CHECK(dims->at(i).Has<std::int64_t>());
PADDLE_ENFORCE_EQ(
dims->at(i).Has<std::int64_t>(),
true,
phi::errors::InvalidArgument("dims->at(i) is not int64_t"));
ret *= dims->at(i).Get<std::int64_t>();
}
return ret;
Expand Down Expand Up @@ -85,7 +93,11 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
PADDLE_THROW(::common::errors::Fatal("Dead code"));
}
}
CHECK(lhs_end == lhs_dims->size() && rhs_end == rhs_dims->size());
PADDLE_ENFORCE_EQ(lhs_end == lhs_dims->size() && rhs_end == rhs_dims->size(),
true,
phi::errors::InvalidArgument(
"lhs_end is not equal to lhs_dims->size() and rhs_end "
"is not equal to rhs_dims->size()"));
if (lhs_start < lhs_end && rhs_start < rhs_end) {
lhs_ranges.emplace_back(std::make_pair(lhs_start, lhs_end));
rhs_ranges.emplace_back(std::make_pair(rhs_start, rhs_end));
Expand Down
17 changes: 14 additions & 3 deletions paddle/cinn/adt/igroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,10 @@ class IGroup final {
}

const List<Iterator>& loop_iterators() const {
CHECK(anchor_sd_equation_ctx_.has_value());
PADDLE_ENFORCE_EQ(anchor_sd_equation_ctx_.has_value(),
true,
phi::errors::InvalidArgument(
"The anchor_sd_equation_ctx_ has no value."));
return anchor_sd_equation_ctx_.value().sd_iterators();
}

Expand Down Expand Up @@ -122,13 +125,21 @@ class IGroup final {
for (std::size_t idx = 0; idx < op_inputs.value()->size(); ++idx) {
const auto& index = ctx->GetInIndex(idx);
const auto& tensor = op_inputs.value()->at(idx);
CHECK(index2tensor->emplace(index, tensor).second);
PADDLE_ENFORCE_EQ(
index2tensor->emplace(index, tensor).second,
true,
phi::errors::InvalidArgument(
"The index2tensor map has already contained the index."));
(*tensor2indexes)[tensor].emplace_back(index);
}
for (std::size_t idx = 0; idx < op_outputs.value()->size(); ++idx) {
const auto& index = ctx->GetOutIndex(idx);
const auto& tensor = op_outputs.value()->at(idx);
CHECK(index2tensor->emplace(index, tensor).second);
PADDLE_ENFORCE_EQ(
index2tensor->emplace(index, tensor).second,
true,
phi::errors::InvalidArgument(
"The index2tensor map has already contained the index."));
(*tensor2indexes)[tensor].emplace_back(index);
}
}
Expand Down
35 changes: 28 additions & 7 deletions paddle/cinn/adt/inline_translator.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@ struct InlineTranslator final {
using DstTree = Tree<MapT, DstLeaf>;

static DstTree Call(const SrcTree& src_tree) {
CHECK((src_tree.template Has<MapT<SrcTree>>()));
PADDLE_ENFORCE_EQ((src_tree.template Has<MapT<SrcTree>>()),
true,
phi::errors::InvalidArgument(
"src_tree.template should have <MapT<SrcTree>>()"));
const MapT<DstTree> dst_tree =
CallMap(src_tree.template Get<MapT<SrcTree>>());

Expand Down Expand Up @@ -97,7 +100,10 @@ struct InlineTranslator final {
const auto& arg = op_call_children->at(arg_index);
const auto& arg_leaf = arg.template Get<Load<TensorT>>();
const auto& [arg_tensor] = arg_leaf.tuple();
CHECK(producer_tensor == arg_tensor);
PADDLE_ENFORCE_EQ(producer_tensor == arg_tensor,
true,
phi::errors::InvalidArgument(
"producer_tensor should be equal to arg_tensor"));
List<OpExpr> ret{};
ret->assign(op_call_children->begin(), op_call_children->end());
ret->at(arg_index) = producer_tree;
Expand All @@ -108,12 +114,20 @@ struct InlineTranslator final {
static void CheckConsumerPosIsLoadTensor(const DstLeaf& consumer,
int arg_index) {
const auto& [tensor, consumer_tree] = consumer.tuple();
CHECK((consumer_tree.template Has<OpCallT<OpExpr>>()));
PADDLE_ENFORCE_EQ(
(consumer_tree.template Has<OpCallT<OpExpr>>()),
true,
phi::errors::InvalidArgument(
"consumer_tree.template should have <OpCallT<OpExpr>>()"));
const auto& op_call = consumer_tree.template Get<OpCallT<OpExpr>>();
const auto& op_call_children =
InlineTranslatorTrait<OpCallT>::GetTreeInnerNodeChildren(op_call);
const auto& op_call_child = op_call_children->at(arg_index);
CHECK((op_call_child.template Has<Load<TensorT>>()));
PADDLE_ENFORCE_EQ(
(op_call_child.template Has<Load<TensorT>>()),
true,
phi::errors::InvalidArgument(
"op_call_child.template should have <Load<TensorT>>()"));
}

template <typename DoEachT>
Expand Down Expand Up @@ -163,8 +177,12 @@ struct InlineTranslator final {
std::unordered_map<int, DstLeaf> index2dst_leaf{};
// Init dst leaves
for (int i = 0; i < size; ++i) {
CHECK(index2dst_leaf.emplace(i, NaiveTranslateLeaf(*std::next(begin, i)))
.second);
PADDLE_ENFORCE_EQ(
index2dst_leaf.emplace(i, NaiveTranslateLeaf(*std::next(begin, i)))
.second,
true,
phi::errors::InvalidArgument(
"index2dst_leaf.emplace should return true"));
}
// Inline dst leaves
for (int producer_i = 0; producer_i < size; ++producer_i) {
Expand Down Expand Up @@ -195,7 +213,10 @@ struct InlineTranslator final {
// using SrcLeaf = Store<TensorT, OpCallT<Load<TensorT>>>;
// using DstLeaf = Store<TensorT, OpExpr>;
static DstLeaf NaiveTranslateLeaf(const SrcTree& src_tree) {
CHECK(src_tree.template Has<SrcLeaf>());
PADDLE_ENFORCE_EQ(src_tree.template Has<SrcLeaf>(),
true,
phi::errors::InvalidArgument(
"src_tree.template should have <SrcLeaf>()"));
const auto& [tensor, op_call] = src_tree.template Get<SrcLeaf>().tuple();
const List<Load<TensorT>>& src_loads =
InlineTranslatorTrait<OpCallT>::GetTreeInnerNodeChildren(op_call);
Expand Down
24 changes: 18 additions & 6 deletions paddle/cinn/adt/naive_bidirection_equation_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,10 @@ OpArgIndexes<std::optional<Index>> MakeOutMsgOpArgIndexes(
const List<std::optional<Index>>& opt_out_msg_out_indexes) {
List<Index> out_msg_in_indexes{};
for (const auto& out_msg_in_index : *opt_out_msg_in_indexes) {
CHECK(out_msg_in_index.has_value());
PADDLE_ENFORCE_EQ(out_msg_in_index.has_value(),
true,
phi::errors::InvalidArgument(
"The out_msg_in_index should have value."));
out_msg_in_indexes->emplace_back(out_msg_in_index.value());
}
return OpArgIndexes<std::optional<Index>>{out_msg_in_indexes,
Expand Down Expand Up @@ -111,9 +114,13 @@ void NaiveBidirectionEquationGenerator::InitInMsgIndex2OutMsgIndex() {
in_msg_indexes,
out_msg_indexes,
[&](const Index& in_index, const Index& out_index) {
CHECK(
PADDLE_ENFORCE_EQ(
this->in_msg_index2out_msg_index_.emplace(in_index, out_index)
.second);
.second,
true,
phi::errors::InvalidArgument(
"The out_msg_index2in_msg_index_ map has already "
"contained the out_index."));
});
};

Expand Down Expand Up @@ -160,9 +167,14 @@ NaiveBidirectionEquationGenerator::MakeGetterOpStmt4OpPlaceHolder() const {
std::make_shared<FakeOpPlaceHolder2OpStmt>();

for (std::size_t i = 0; i < fake_op_placeholders_->size(); ++i) {
CHECK(fake_op_placeholder2op_stmt
->emplace(fake_op_placeholders_->at(i), op_stmts_->at(i))
.second);
PADDLE_ENFORCE_EQ(
fake_op_placeholder2op_stmt
->emplace(fake_op_placeholders_->at(i), op_stmts_->at(i))
.second,
true,
phi::errors::InvalidArgument(
"The fake_op_placeholder2op_stmt map has already contained the "
"fake_op_placeholder."));
}

return [fake_op_placeholder2op_stmt](
Expand Down
19 changes: 15 additions & 4 deletions paddle/cinn/adt/naive_op_equation_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@ void GenerateOpEquationsImpl(const ::pir::Operation* op_node,
"generate_equations");
const hlir::framework::Operator* cinn_op = hlir::framework::Operator::Get(
hlir::framework::pir::CompatibleInfo::OpName(*op_node));
CHECK(generate_equations.Find(cinn_op));
PADDLE_ENFORCE_EQ(generate_equations.Find(cinn_op),
true,
phi::errors::NotFound("generate_equations not found"));
generate_equations[cinn_op](ctx);
}

Expand Down Expand Up @@ -125,8 +127,14 @@ GetArgStaticDimT MakeGetterArgStaticDim(const List<Tensor>& tensors) {
return [=](std::size_t tensor_idx,
std::size_t dim_idx) -> std::optional<std::int64_t> {
const auto& opt_expr = GetArgDim(tensors, tensor_idx, dim_idx);
CHECK(opt_expr.has_value());
CHECK(opt_expr.value().Has<std::int64_t>());
PADDLE_ENFORCE_EQ(
opt_expr.has_value(),
true,
phi::errors::InvalidArgument("Sorry,but opt_expr don't has value"));
PADDLE_ENFORCE_EQ(opt_expr.value().Has<std::int64_t>(),
true,
phi::errors::InvalidArgument(
"Sorry,but opt_expr should has value int64_t"));
return opt_expr.value().Get<std::int64_t>();
};
}
Expand Down Expand Up @@ -212,7 +220,10 @@ GenerateContext4LocalOpStmt(const List<OpStmt>& op_stmts) {

for (const auto& op_stmt : *op_stmts) {
const auto& ctx = MakeContextAndGenerateEquations(op_stmt);
CHECK(op_stmt2equation_ctx->emplace(op_stmt, ctx).second);
PADDLE_ENFORCE_EQ(
op_stmt2equation_ctx->emplace(op_stmt, ctx).second,
true,
phi::errors::InvalidArgument("op_stmt2equation_ctx insert failed"));
}

return [op_stmt2equation_ctx](const auto& op_stmt) {
Expand Down
6 changes: 5 additions & 1 deletion paddle/cinn/adt/schedule_dim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,11 @@ std::shared_ptr<IndexExprInferContext> InitIndexExprInferContext(
const List<Iterator>& input_iterators) {
std::unordered_map<Variable, const Value> init_var2value;
for (const auto& iterator : *input_iterators) {
CHECK(init_var2value.emplace(iterator, iterator).second);
PADDLE_ENFORCE_EQ(
init_var2value.emplace(iterator, iterator).second,
true,
::common::errors::InvalidArgument(
"Insertion failed in init_var2value map. The key already exists."));
}

return std::make_shared<IndexExprInferContext>(init_var2value);
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/schedule_dim.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include "paddle/cinn/adt/equation_value.h"
#include "paddle/cinn/adt/equation_variable.h"
#include "paddle/cinn/adt/schedule_descriptor.h"

#include "paddle/common/enforce.h"
namespace cinn::adt {

DEFINE_ADT_TAG(tReduced);
Expand Down
8 changes: 7 additions & 1 deletion paddle/cinn/backends/codegen_c_x86.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,13 @@ void CodeGenCX86::Visit(const ir::Div *op) {
void CodeGenCX86::Visit(const ir::Load *op) {
Expr dense_strided_ramp = detail::StridedRampBase(op->index(), 1);
if (dense_strided_ramp.defined()) { // Loading a continuous Ramp address.
CHECK(op->type().is_vector());
PADDLE_ENFORCE_EQ(
op->type().is_vector(),
true,
::common::errors::InvalidArgument(
"The operation type is expected to be a vector, but it is not. "
"Please check the operation type and ensure it is correctly set to "
"a vector."));

int bits = op->type().bits() * op->type().lanes();
if (SupportsAVX512() && bits == 512) {
Expand Down
9 changes: 7 additions & 2 deletions paddle/cinn/backends/codegen_c_x86.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,13 @@ class CodeGenCX86 : public CodeGenC {
auto index = op->index();
auto *ramp_n = index.template As<ir::Ramp>();
if (ramp_n) {
CHECK(!ramp_n->base.template As<ir::Ramp>())
<< "base of a Ramp node should not be Ramp type";
PADDLE_ENFORCE_EQ(
!ramp_n->base.template As<ir::Ramp>(),
true,
::common::errors::InvalidArgument(
"The base of a Ramp node should not be of Ramp type. "
"Please ensure that the base is correctly set to a non-Ramp "
"type."));
IrPrinter::Visit(ramp_n->base);
} else {
IrPrinter::Visit(op->index());
Expand Down
Loading

0 comments on commit 49bc25a

Please sign in to comment.