diff --git a/clang/include/clang/CIR/CIRGenerator.h b/clang/include/clang/CIR/CIRGenerator.h index 78e6055ca993..f4c30a5e892b 100644 --- a/clang/include/clang/CIR/CIRGenerator.h +++ b/clang/include/clang/CIR/CIRGenerator.h @@ -66,7 +66,7 @@ class CIRGenerator : public clang::ASTConsumer { ~HandlingTopLevelDeclRAII() { unsigned Level = --Self.HandlingTopLevelDecls; if (Level == 0 && EmitDeferred) - Self.buildDeferredDecls(); + Self.emitDeferredDecls(); } }; @@ -101,8 +101,8 @@ class CIRGenerator : public clang::ASTConsumer { bool verifyModule(); - void buildDeferredDecls(); - void buildDefaultMethods(); + void emitDeferredDecls(); + void emitDefaultMethods(); }; } // namespace cir diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 412ac4385f4b..afc1e6b4f148 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -54,9 +54,9 @@ constexpr bool cirCConvAssertionMode = namespace cir { struct MissingFeatures { - // TODO(CIR): Implement the CIRGenFunction::buildTypeCheck method that handles + // TODO(CIR): Implement the CIRGenFunction::emitTypeCheck method that handles // sanitizer related type check features - static bool buildTypeCheck() { return false; } + static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } @@ -128,8 +128,8 @@ struct MissingFeatures { // Missing Emissions static bool variablyModifiedTypeEmission() { return false; } - static bool buildLValueAlignmentAssumption() { return false; } - static bool buildDerivedToBaseCastForDevirt() { return false; } + static bool emitLValueAlignmentAssumption() { return false; } + static bool emitDerivedToBaseCastForDevirt() { return false; } static bool emitFunctionEpilog() { return false; } // References related stuff @@ -226,7 +226,7 @@ struct MissingFeatures { static bool deferredReplacements() { return false; } static bool shouldInstrumentFunction() { return false; } static bool xray() { return false; } - static bool buildConstrainedFPCall() { return false; } + static bool emitConstrainedFPCall() { return false; } static bool emitEmptyRecordCheck() { return false; } // Inline assembly diff --git a/clang/lib/CIR/CodeGen/CIRAsm.cpp b/clang/lib/CIR/CodeGen/CIRAsm.cpp index 39a2ee8192d7..a20e75e07423 100644 --- a/clang/lib/CIR/CodeGen/CIRAsm.cpp +++ b/clang/lib/CIR/CodeGen/CIRAsm.cpp @@ -200,13 +200,13 @@ static void collectInOutConstrainsInfos(const CIRGenFunction &cgf, } } -std::pair CIRGenFunction::buildAsmInputLValue( +std::pair CIRGenFunction::emitAsmInputLValue( const TargetInfo::ConstraintInfo &Info, LValue InputValue, QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { if (Info.allowsRegister() || !Info.allowsMemory()) { if (hasScalarEvaluationKind(InputType)) - return {buildLoadOfLValue(InputValue, Loc).getScalarVal(), mlir::Type()}; + return {emitLoadOfLValue(InputValue, Loc).getScalarVal(), mlir::Type()}; mlir::Type Ty = convertType(InputType); uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); @@ -226,9 +226,9 @@ std::pair CIRGenFunction::buildAsmInputLValue( } std::pair -CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, - const Expr *InputExpr, - std::string &ConstraintStr) { +CIRGenFunction::emitAsmInput(const TargetInfo::ConstraintInfo &Info, + const Expr *InputExpr, + std::string &ConstraintStr) { auto loc = getLoc(InputExpr->getExprLoc()); // If this can't be a register or memory, i.e., has to be a constant @@ -251,23 +251,23 @@ CIRGenFunction::buildAsmInput(const TargetInfo::ConstraintInfo &Info, if (Info.allowsRegister() || !Info.allowsMemory()) if (CIRGenFunction::hasScalarEvaluationKind(InputExpr->getType())) - return {buildScalarExpr(InputExpr), mlir::Type()}; + return {emitScalarExpr(InputExpr), mlir::Type()}; if (InputExpr->getStmtClass() == Expr::CXXThisExprClass) - return {buildScalarExpr(InputExpr), mlir::Type()}; + return {emitScalarExpr(InputExpr), mlir::Type()}; InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); - LValue Dest = buildLValue(InputExpr); - return buildAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, - InputExpr->getExprLoc()); + LValue Dest = emitLValue(InputExpr); + return emitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, + InputExpr->getExprLoc()); } -static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, - const llvm::ArrayRef RegResults, - const llvm::ArrayRef ResultRegTypes, - const llvm::ArrayRef ResultTruncRegTypes, - const llvm::ArrayRef ResultRegDests, - const llvm::ArrayRef ResultRegQualTys, - const llvm::BitVector &ResultTypeRequiresCast, - const llvm::BitVector &ResultRegIsFlagReg) { +static void emitAsmStores(CIRGenFunction &CGF, const AsmStmt &S, + const llvm::ArrayRef RegResults, + const llvm::ArrayRef ResultRegTypes, + const llvm::ArrayRef ResultTruncRegTypes, + const llvm::ArrayRef ResultRegDests, + const llvm::ArrayRef ResultRegQualTys, + const llvm::BitVector &ResultTypeRequiresCast, + const llvm::BitVector &ResultRegIsFlagReg) { CIRGenBuilderTy &Builder = CGF.getBuilder(); CIRGenModule &CGM = CGF.CGM; auto CTX = Builder.getContext(); @@ -337,11 +337,11 @@ static void buildAsmStores(CIRGenFunction &CGF, const AsmStmt &S, Dest = CGF.makeAddrLValue(A, Ty); } - CGF.buildStoreThroughLValue(RValue::get(Tmp), Dest); + CGF.emitStoreThroughLValue(RValue::get(Tmp), Dest); } } -mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { +mlir::LogicalResult CIRGenFunction::emitAsmStmt(const AsmStmt &S) { // Assemble the final asm string. std::string AsmString = S.generateAsmString(getContext()); @@ -405,7 +405,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg); OutputConstraints.push_back(OutputConstraint); - LValue Dest = buildLValue(OutExpr); + LValue Dest = emitLValue(OutExpr); if (!Constraints.empty()) Constraints += ','; @@ -496,8 +496,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { mlir::Value Arg; mlir::Type ArgElemType; std::tie(Arg, ArgElemType) = - buildAsmInputLValue(Info, Dest, InputExpr->getType(), - InOutConstraints, InputExpr->getExprLoc()); + emitAsmInputLValue(Info, Dest, InputExpr->getType(), InOutConstraints, + InputExpr->getExprLoc()); if (mlir::Type AdjTy = getTargetHooks().adjustInlineAsmType( *this, OutputConstraint, Arg.getType())) @@ -555,7 +555,7 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { std::string ReplaceConstraint(InputConstraint); mlir::Value Arg; mlir::Type ArgElemType; - std::tie(Arg, ArgElemType) = buildAsmInput(Info, InputExpr, Constraints); + std::tie(Arg, ArgElemType) = emitAsmInput(Info, InputExpr, Constraints); // If this input argument is tied to a larger output result, extend the // input to be the same size as the output. The LLVM backend wants to see @@ -676,8 +676,8 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } else if (ResultRegTypes.size() > 1) { auto alignment = CharUnits::One(); auto sname = cast(ResultType).getName(); - auto dest = buildAlloca(sname, ResultType, getLoc(S.getAsmLoc()), - alignment, false); + auto dest = emitAlloca(sname, ResultType, getLoc(S.getAsmLoc()), + alignment, false); auto addr = Address(dest, alignment); builder.createStore(getLoc(S.getAsmLoc()), result, addr); @@ -692,9 +692,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) { } } - buildAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, - ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, - ResultRegIsFlagReg); + emitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, + ResultRegIsFlagReg); return mlir::success(); } diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp index e2958d9450fe..41fcd60179d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp @@ -261,11 +261,11 @@ class AtomicInfo { // This function emits any expression (scalar, complex, or aggregate) // into a temporary alloca. -static Address buildValToTemp(CIRGenFunction &CGF, Expr *E) { +static Address emitValToTemp(CIRGenFunction &CGF, Expr *E) { Address DeclPtr = CGF.CreateMemTemp( E->getType(), CGF.getLoc(E->getSourceRange()), ".atomictmp"); - CGF.buildAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), - /*Init*/ true); + CGF.emitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); return DeclPtr; } @@ -372,7 +372,7 @@ static bool isCstWeak(mlir::Value weakVal, bool &val) { // Create a "default:" label and add it to the given collection of case labels. // Create the region that will hold the body of the "default:" block. -static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { +static void emitDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { auto EmptyArrayAttr = builder.getArrayAttr({}); mlir::OpBuilder::InsertPoint insertPoint; builder.create(loc, EmptyArrayAttr, cir::CaseOpKind::Default, @@ -383,9 +383,8 @@ static void buildDefaultCase(CIRGenBuilderTy &builder, mlir::Location loc) { // Create a single "case" label with the given MemOrder as its value. Add the // "case" label to the given collection of case labels. Create the region that // will hold the body of the "case" block. -static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, - mlir::Location loc, mlir::Type Type, - cir::MemOrder Order) { +static void emitSingleMemOrderCase(CIRGenBuilderTy &builder, mlir::Location loc, + mlir::Type Type, cir::MemOrder Order) { SmallVector OneOrder{ cir::IntAttr::get(Type, static_cast(Order))}; auto OneAttribute = builder.getArrayAttr(OneOrder); @@ -398,10 +397,9 @@ static void buildSingleMemOrderCase(CIRGenBuilderTy &builder, // Create a pair of "case" labels with the given MemOrders as their values. // Add the combined "case" attribute to the given collection of case labels. // Create the region that will hold the body of the "case" block. -static void buildDoubleMemOrderCase(CIRGenBuilderTy &builder, - mlir::Location loc, mlir::Type Type, - cir::MemOrder Order1, - cir::MemOrder Order2) { +static void emitDoubleMemOrderCase(CIRGenBuilderTy &builder, mlir::Location loc, + mlir::Type Type, cir::MemOrder Order1, + cir::MemOrder Order2) { SmallVector TwoOrders{ cir::IntAttr::get(Type, static_cast(Order1)), cir::IntAttr::get(Type, static_cast(Order2))}; @@ -412,12 +410,12 @@ static void buildDoubleMemOrderCase(CIRGenBuilderTy &builder, builder.restoreInsertionPoint(insertPoint); } -static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, - Address Dest, Address Ptr, Address Val1, - Address Val2, uint64_t Size, - cir::MemOrder SuccessOrder, - cir::MemOrder FailureOrder, - llvm::SyncScope::ID Scope) { +static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, + Address Dest, Address Ptr, Address Val1, + Address Val2, uint64_t Size, + cir::MemOrder SuccessOrder, + cir::MemOrder FailureOrder, + llvm::SyncScope::ID Scope) { auto &builder = CGF.getBuilder(); auto loc = CGF.getLoc(E->getSourceRange()); auto Expected = builder.createLoad(loc, Val1); @@ -442,14 +440,14 @@ static void buildAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, }); // Update the memory at Dest with Cmp's value. - CGF.buildStoreOfScalar(cmpxchg.getCmp(), - CGF.makeAddrLValue(Dest, E->getType())); + CGF.emitStoreOfScalar(cmpxchg.getCmp(), + CGF.makeAddrLValue(Dest, E->getType())); } /// Given an ordering required on success, emit all possible cmpxchg /// instructions to cope with the provided (but possibly only dynamically known) /// FailureOrder. -static void buildAtomicCmpXchgFailureSet( +static void emitAtomicCmpXchgFailureSet( CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size, cir::MemOrder SuccessOrder, llvm::SyncScope::ID Scope) { @@ -483,8 +481,8 @@ static void buildAtomicCmpXchgFailureSet( // success argument". This condition has been lifted and the only // precondition is 31.7.2.18. Effectively treat this as a DR and skip // language version checks. - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, FailureOrder, Scope); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, + FailureOrder, Scope); return; } @@ -502,9 +500,9 @@ static void buildAtomicCmpXchgFailureSet( // default: // Unsupported memory orders get generated as memory_order_relaxed, // because there is no practical way to report an error at runtime. - buildDefaultCase(builder, loc); - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, cir::MemOrder::Relaxed, Scope); + emitDefaultCase(builder, loc); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, cir::MemOrder::Relaxed, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -513,10 +511,10 @@ static void buildAtomicCmpXchgFailureSet( // case acquire: // memory_order_consume is not implemented and always falls back to // memory_order_acquire - buildDoubleMemOrderCase(builder, loc, FailureOrderVal.getType(), - cir::MemOrder::Consume, cir::MemOrder::Acquire); - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, cir::MemOrder::Acquire, Scope); + emitDoubleMemOrderCase(builder, loc, FailureOrderVal.getType(), + cir::MemOrder::Consume, cir::MemOrder::Acquire); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, cir::MemOrder::Acquire, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -526,11 +524,11 @@ static void buildAtomicCmpXchgFailureSet( // the failure memory order. They fall back to memory_order_relaxed. // case seq_cst: - buildSingleMemOrderCase(builder, loc, FailureOrderVal.getType(), - cir::MemOrder::SequentiallyConsistent); - buildAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, - SuccessOrder, cir::MemOrder::SequentiallyConsistent, - Scope); + emitSingleMemOrderCase(builder, loc, FailureOrderVal.getType(), + cir::MemOrder::SequentiallyConsistent); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, + SuccessOrder, cir::MemOrder::SequentiallyConsistent, + Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -538,10 +536,10 @@ static void buildAtomicCmpXchgFailureSet( }); } -static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, - Address Ptr, Address Val1, Address Val2, - mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, cir::MemOrder Order, uint8_t Scope) { +static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, cir::MemOrder Order, uint8_t Scope) { assert(!cir::MissingFeatures::syncScopeID()); StringRef Op; @@ -559,8 +557,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__hip_atomic_compare_exchange_strong: case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: - buildAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: @@ -573,8 +571,8 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { bool weakVal; if (isCstWeak(IsWeak, weakVal)) { - buildAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + emitAtomicCmpXchgFailureSet(CGF, E, weakVal, Dest, Ptr, Val1, Val2, + FailureOrder, Size, Order, Scope); } else { llvm_unreachable("NYI"); } @@ -769,27 +767,27 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest, builder.createStore(loc, Result, Dest); } -static RValue buildAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, - QualType resultType, CallArgList &args) { +static RValue emitAtomicLibcall(CIRGenFunction &CGF, StringRef fnName, + QualType resultType, CallArgList &args) { [[maybe_unused]] const CIRGenFunctionInfo &fnInfo = CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args); [[maybe_unused]] auto fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); llvm_unreachable("NYI"); } -static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, - Address Ptr, Address Val1, Address Val2, - mlir::Value IsWeak, mlir::Value FailureOrder, - uint64_t Size, cir::MemOrder Order, - mlir::Value Scope) { +static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, + Address Ptr, Address Val1, Address Val2, + mlir::Value IsWeak, mlir::Value FailureOrder, + uint64_t Size, cir::MemOrder Order, + mlir::Value Scope) { auto ScopeModel = Expr->getScopeModel(); // LLVM atomic instructions always have synch scope. If clang atomic // expression has no scope operand, use default LLVM synch scope. if (!ScopeModel) { assert(!cir::MissingFeatures::syncScopeID()); - buildAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, - Order, /*FIXME(cir): LLVM default scope*/ 1); + emitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, + Order, /*FIXME(cir): LLVM default scope*/ 1); return; } @@ -804,7 +802,7 @@ static void buildAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest, llvm_unreachable("NYI"); } -RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { +RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *E) { QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); QualType MemTy = AtomicTy; if (const AtomicType *AT = AtomicTy->getAs()) @@ -814,12 +812,12 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { Address Val1 = Address::invalid(); Address Val2 = Address::invalid(); Address Dest = Address::invalid(); - Address Ptr = buildPointerWithAlignment(E->getPtr()); + Address Ptr = emitPointerWithAlignment(E->getPtr()); if (E->getOp() == AtomicExpr::AO__c11_atomic_init || E->getOp() == AtomicExpr::AO__opencl_atomic_init) { LValue lvalue = makeAddrLValue(Ptr, AtomicTy); - buildAtomicInit(E->getVal1(), lvalue); + emitAtomicInit(E->getVal1(), lvalue); return RValue::get(nullptr); } @@ -842,8 +840,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity(); } - auto Order = buildScalarExpr(E->getOrder()); - auto Scope = E->getScopeModel() ? buildScalarExpr(E->getScope()) : nullptr; + auto Order = emitScalarExpr(E->getOrder()); + auto Scope = E->getScopeModel() ? emitScalarExpr(E->getScope()) : nullptr; bool ShouldCastToIntPtrTy = true; switch (E->getOp()) { @@ -860,18 +858,18 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__scoped_atomic_load: - Dest = buildPointerWithAlignment(E->getVal1()); + Dest = emitPointerWithAlignment(E->getVal1()); break; case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__scoped_atomic_store: - Val1 = buildPointerWithAlignment(E->getVal1()); + Val1 = emitPointerWithAlignment(E->getVal1()); break; case AtomicExpr::AO__atomic_exchange: case AtomicExpr::AO__scoped_atomic_exchange: - Val1 = buildPointerWithAlignment(E->getVal1()); - Dest = buildPointerWithAlignment(E->getVal2()); + Val1 = emitPointerWithAlignment(E->getVal1()); + Dest = emitPointerWithAlignment(E->getVal2()); break; case AtomicExpr::AO__atomic_compare_exchange: @@ -884,18 +882,18 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: case AtomicExpr::AO__scoped_atomic_compare_exchange: case AtomicExpr::AO__scoped_atomic_compare_exchange_n: - Val1 = buildPointerWithAlignment(E->getVal1()); + Val1 = emitPointerWithAlignment(E->getVal1()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) - Val2 = buildPointerWithAlignment(E->getVal2()); + Val2 = emitPointerWithAlignment(E->getVal2()); else - Val2 = buildValToTemp(*this, E->getVal2()); - OrderFail = buildScalarExpr(E->getOrderFail()); + Val2 = emitValToTemp(*this, E->getVal2()); + OrderFail = emitScalarExpr(E->getOrderFail()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n || E->getOp() == AtomicExpr::AO__atomic_compare_exchange || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n || E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange) { - IsWeak = buildScalarExpr(E->getWeak()); + IsWeak = emitScalarExpr(E->getWeak()); } break; @@ -970,7 +968,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__scoped_atomic_xor_fetch: case AtomicExpr::AO__scoped_atomic_store_n: case AtomicExpr::AO__scoped_atomic_exchange_n: - Val1 = buildValToTemp(*this, E->getVal1()); + Val1 = emitValToTemp(*this, E->getVal1()); break; } @@ -1164,7 +1162,7 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { } [[maybe_unused]] RValue Res = - buildAtomicLibcall(*this, LibCallName, RetTy, Args); + emitAtomicLibcall(*this, LibCallName, RetTy, Args); // The value is returned directly from the libcall. if (E->isCmpXChg()) { llvm_unreachable("NYI"); @@ -1201,31 +1199,31 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (cir::isValidCIRAtomicOrderingCABI(ord)) { switch ((cir::MemOrder)ord) { case cir::MemOrder::Relaxed: - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Relaxed, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Relaxed, Scope); break; case cir::MemOrder::Consume: case cir::MemOrder::Acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Acquire, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Acquire, Scope); break; case cir::MemOrder::Release: if (IsLoad) break; // Avoid crashing on code with undefined behavior - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Release, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Release, Scope); break; case cir::MemOrder::AcquireRelease: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::AcquireRelease, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::AcquireRelease, Scope); break; case cir::MemOrder::SequentiallyConsistent: - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::SequentiallyConsistent, Scope); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::SequentiallyConsistent, Scope); break; } } @@ -1250,9 +1248,9 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // order value that is not supported. There is no good way to report // an unsupported memory order at runtime, hence the fallback to // memory_order_relaxed. - buildDefaultCase(builder, loc); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::Relaxed, Scope); + emitDefaultCase(builder, loc); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Relaxed, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -1263,11 +1261,11 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { // memory_order_consume is not implemented; it is always treated like // memory_order_acquire. These memory orders are not valid for // write-only operations. - buildDoubleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::Consume, - cir::MemOrder::Acquire); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, cir::MemOrder::Acquire, Scope); + emitDoubleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::Consume, + cir::MemOrder::Acquire); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Acquire, Scope); builder.createBreak(loc); } @@ -1276,10 +1274,10 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (!IsLoad) { // case release: // memory_order_release is not valid for read-only operations. - buildSingleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::Release); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, cir::MemOrder::Release, Scope); + emitSingleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::Release); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::Release, Scope); builder.createBreak(loc); } @@ -1288,20 +1286,20 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { if (!IsLoad && !IsStore) { // case acq_rel: // memory_order_acq_rel is only valid for read-write operations. - buildSingleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::AcquireRelease); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, - Size, cir::MemOrder::AcquireRelease, Scope); + emitSingleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::AcquireRelease); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::AcquireRelease, Scope); builder.createBreak(loc); } builder.setInsertionPointToEnd(switchBlock); // case seq_cst: - buildSingleMemOrderCase(builder, loc, Order.getType(), - cir::MemOrder::SequentiallyConsistent); - buildAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - cir::MemOrder::SequentiallyConsistent, Scope); + emitSingleMemOrderCase(builder, loc, Order.getType(), + cir::MemOrder::SequentiallyConsistent); + emitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + cir::MemOrder::SequentiallyConsistent, Scope); builder.createBreak(loc); builder.setInsertionPointToEnd(switchBlock); @@ -1314,8 +1312,8 @@ RValue CIRGenFunction::buildAtomicExpr(AtomicExpr *E) { RValTy, E->getExprLoc()); } -void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, - bool isInit) { +void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue lvalue, + bool isInit) { bool IsVolatile = lvalue.isVolatileQualified(); cir::MemOrder MO; if (lvalue.getType()->isAtomicType()) { @@ -1324,7 +1322,7 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue lvalue, MO = cir::MemOrder::Release; IsVolatile = true; } - return buildAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); + return emitAtomicStore(rvalue, lvalue, MO, IsVolatile, isInit); } /// Return true if \param ValTy is a type that should be casted to integer @@ -1390,7 +1388,7 @@ void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { // Okay, store the rvalue in. if (rvalue.isScalar()) { - CGF.buildStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); + CGF.emitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); } else { llvm_unreachable("NYI"); } @@ -1401,7 +1399,7 @@ mlir::Value AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { // through memory. Floats get casted if needed by AtomicExpandPass. if (auto Value = getScalarRValValueOrNull(RVal)) { if (!shouldCastToInt(Value.getType(), CmpXchg)) { - return CGF.buildToMemory(Value, ValueTy); + return CGF.emitToMemory(Value, ValueTy); } else { llvm_unreachable("NYI"); } @@ -1415,9 +1413,9 @@ mlir::Value AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const { /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. -void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, - cir::MemOrder MO, bool IsVolatile, - bool isInit) { +void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, + cir::MemOrder MO, bool IsVolatile, + bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. auto loc = dest.getPointer().getLoc(); @@ -1472,12 +1470,12 @@ void CIRGenFunction::buildAtomicStore(RValue rvalue, LValue dest, llvm_unreachable("NYI"); } -void CIRGenFunction::buildAtomicInit(Expr *init, LValue dest) { +void CIRGenFunction::emitAtomicInit(Expr *init, LValue dest) { AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange())); switch (atomics.getEvaluationKind()) { case cir::TEK_Scalar: { - mlir::Value value = buildScalarExpr(init); + mlir::Value value = emitScalarExpr(init); atomics.emitCopyIntoMemory(RValue::get(value)); return; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 57813b6df0c1..6809c3ad5ce3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -39,11 +39,10 @@ using namespace clang::CIRGen; using namespace cir; using namespace llvm; -static RValue buildLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, - const CallExpr *E, - mlir::Operation *calleeValue) { +static RValue emitLibraryCall(CIRGenFunction &CGF, const FunctionDecl *FD, + const CallExpr *E, mlir::Operation *calleeValue) { auto callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(FD)); - return CGF.buildCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); + return CGF.emitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot()); } static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, @@ -58,8 +57,8 @@ static mlir::Value tryUseTestFPKind(CIRGenFunction &CGF, unsigned BuiltinID, } template -static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { - auto Arg = CGF.buildScalarExpr(E.getArg(0)); +static RValue emitUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg = CGF.emitScalarExpr(E.getArg(0)); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, &E); if (CGF.getBuilder().getIsFPConstrained()) @@ -71,10 +70,10 @@ static RValue buildUnaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { } template -static RValue buildUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, - const CallExpr &E) { +static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { auto ResultType = CGF.ConvertType(E.getType()); - auto Src = CGF.buildScalarExpr(E.getArg(0)); + auto Src = CGF.emitScalarExpr(E.getArg(0)); if (CGF.getBuilder().getIsFPConstrained()) llvm_unreachable("constraint FP operations are NYI"); @@ -84,9 +83,9 @@ static RValue buildUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &CGF, } template -static RValue buildBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { - auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); - auto Arg1 = CGF.buildScalarExpr(E.getArg(1)); +static RValue emitBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { + auto Arg0 = CGF.emitScalarExpr(E.getArg(0)); + auto Arg1 = CGF.emitScalarExpr(E.getArg(1)); auto Loc = CGF.getLoc(E.getExprLoc()); auto Ty = CGF.ConvertType(E.getType()); @@ -96,10 +95,10 @@ static RValue buildBinaryFPBuiltin(CIRGenFunction &CGF, const CallExpr &E) { } template -static mlir::Value buildBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, - const CallExpr &E) { - auto Arg0 = CGF.buildScalarExpr(E.getArg(0)); - auto Arg1 = CGF.buildScalarExpr(E.getArg(1)); +static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, + const CallExpr &E) { + auto Arg0 = CGF.emitScalarExpr(E.getArg(0)); + auto Arg1 = CGF.emitScalarExpr(E.getArg(1)); auto Loc = CGF.getLoc(E.getExprLoc()); auto Ty = CGF.ConvertType(E.getType()); @@ -115,13 +114,13 @@ static mlir::Value buildBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &CGF, template static RValue -buildBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, - std::optional CK) { +emitBuiltinBitOp(CIRGenFunction &CGF, const CallExpr *E, + std::optional CK) { mlir::Value arg; if (CK.has_value()) - arg = CGF.buildCheckedArgForBuiltin(E->getArg(0), *CK); + arg = CGF.emitCheckedArgForBuiltin(E->getArg(0), *CK); else - arg = CGF.buildScalarExpr(E->getArg(0)); + arg = CGF.emitScalarExpr(E->getArg(0)); auto resultTy = CGF.ConvertType(E->getType()); auto op = @@ -195,9 +194,9 @@ EncompassingIntegerType(ArrayRef Types) { /// Emit the conversions required to turn the given value into an /// integer of the given size. -static mlir::Value buildToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, - cir::IntType intType) { - v = CGF.buildToMemory(v, t); +static mlir::Value emitToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, + cir::IntType intType) { + v = CGF.emitToMemory(v, t); if (isa(v.getType())) return CGF.getBuilder().createPtrToInt(v, intType); @@ -206,9 +205,9 @@ static mlir::Value buildToInt(CIRGenFunction &CGF, mlir::Value v, QualType t, return v; } -static mlir::Value buildFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, - mlir::Type resultType) { - v = CGF.buildFromMemory(v, t); +static mlir::Value emitFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, + mlir::Type resultType) { + v = CGF.emitFromMemory(v, t); if (isa(resultType)) return CGF.getBuilder().createIntToPtr(v, resultType); @@ -219,7 +218,7 @@ static mlir::Value buildFromInt(CIRGenFunction &CGF, mlir::Value v, QualType t, static Address checkAtomicAlignment(CIRGenFunction &CGF, const CallExpr *E) { ASTContext &ctx = CGF.getContext(); - Address ptr = CGF.buildPointerWithAlignment(E->getArg(0)); + Address ptr = CGF.emitPointerWithAlignment(E->getArg(0)); unsigned bytes = isa(ptr.getElementType()) ? ctx.getTypeSizeInChars(ctx.VoidPtrTy).getQuantity() @@ -254,19 +253,19 @@ static mlir::Value makeBinaryAtomicValue( expr->getArg(0)->getType()->getPointeeType()->isUnsignedIntegerType() ? builder.getUIntNTy(cgf.getContext().getTypeSize(typ)) : builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); - mlir::Value val = cgf.buildScalarExpr(expr->getArg(1)); + mlir::Value val = cgf.emitScalarExpr(expr->getArg(1)); mlir::Type valueType = val.getType(); - val = buildToInt(cgf, val, typ, intType); + val = emitToInt(cgf, val, typ, intType); auto rmwi = builder.create( cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(), val, kind, ordering, false, /* is volatile */ true); /* fetch first */ - return buildFromInt(cgf, rmwi->getResult(0), typ, valueType); + return emitFromInt(cgf, rmwi->getResult(0), typ, valueType); } -static RValue buildBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, - const CallExpr *E) { +static RValue emitBinaryAtomic(CIRGenFunction &CGF, cir::AtomicFetchKind kind, + const CallExpr *E) { return RValue::get(makeBinaryAtomicValue(CGF, kind, E)); } @@ -281,10 +280,10 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, expr->getArg(0)->getType()->getPointeeType()->isUnsignedIntegerType() ? builder.getUIntNTy(cgf.getContext().getTypeSize(typ)) : builder.getSIntNTy(cgf.getContext().getTypeSize(typ)); - auto cmpVal = cgf.buildScalarExpr(expr->getArg(1)); - cmpVal = buildToInt(cgf, cmpVal, typ, intType); + auto cmpVal = cgf.emitScalarExpr(expr->getArg(1)); + cmpVal = emitToInt(cgf, cmpVal, typ, intType); auto newVal = - buildToInt(cgf, cgf.buildScalarExpr(expr->getArg(2)), typ, intType); + emitToInt(cgf, cgf.emitScalarExpr(expr->getArg(2)), typ, intType); auto op = builder.create( cgf.getLoc(expr->getSourceRange()), cmpVal.getType(), builder.getBoolTy(), @@ -295,9 +294,9 @@ static mlir::Value MakeAtomicCmpXchgValue(CIRGenFunction &cgf, return returnBool ? op.getResult(1) : op.getResult(0); } -RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { - auto src = buildScalarExpr(E->getArg(0)); - auto shiftAmt = buildScalarExpr(E->getArg(1)); +RValue CIRGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { + auto src = emitScalarExpr(E->getArg(0)); + auto shiftAmt = emitScalarExpr(E->getArg(1)); // The builtin's shift arg may have a different type than the source arg and // result, but the CIR ops uses the same type for all values. @@ -310,9 +309,9 @@ RValue CIRGenFunction::buildRotate(const CallExpr *E, bool IsRotateRight) { return RValue::get(r); } -RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, - const CallExpr *E, - ReturnValueSlot ReturnValue) { +RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue) { const FunctionDecl *FD = GD.getDecl()->getAsFunction(); // See if we can constant fold this builtin. If so, don't emit it at all. @@ -484,7 +483,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: case Builtin::BI__builtin_ceilf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -492,7 +491,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_copysign: case Builtin::BI__builtin_copysignf: case Builtin::BI__builtin_copysignl: - return buildBinaryFPBuiltin(*this, *E); + return emitBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_copysignf16: case Builtin::BI__builtin_copysignf128: @@ -507,7 +506,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIcosh: case Builtin::BIcoshf: @@ -528,7 +527,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -539,7 +538,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_exp10: case Builtin::BI__builtin_exp10f: @@ -556,7 +555,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fabsf16: case Builtin::BI__builtin_fabsl: case Builtin::BI__builtin_fabsf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIfloor: case Builtin::BIfloorf: @@ -566,7 +565,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: case Builtin::BI__builtin_floorf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIfma: case Builtin::BIfmaf: @@ -585,7 +584,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmaxf: case Builtin::BI__builtin_fmaxl: return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxf128: @@ -598,7 +597,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fminf: case Builtin::BI__builtin_fminl: return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminf128: @@ -613,7 +612,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_fmodf: case Builtin::BI__builtin_fmodl: assert(!cir::MissingFeatures::fastMathFlags()); - return buildBinaryFPBuiltin(*this, *E); + return emitBinaryFPBuiltin(*this, *E); case Builtin::BI__builtin_fmodf16: case Builtin::BI__builtin_fmodf128: @@ -629,7 +628,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -640,7 +639,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -651,7 +650,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -660,7 +659,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: case Builtin::BI__builtin_nearbyintf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIpow: case Builtin::BIpowf: @@ -670,7 +669,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_powl: assert(!cir::MissingFeatures::fastMathFlags()); return RValue::get( - buildBinaryMaybeConstrainedFPBuiltin(*this, *E)); + emitBinaryMaybeConstrainedFPBuiltin(*this, *E)); case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powf128: @@ -684,7 +683,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: case Builtin::BI__builtin_rintf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIround: case Builtin::BIroundf: @@ -694,7 +693,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: case Builtin::BI__builtin_roundf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIroundeven: case Builtin::BIroundevenf: @@ -715,7 +714,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -726,7 +725,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: assert(!cir::MissingFeatures::fastMathFlags()); - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BI__builtin_elementwise_sqrt: llvm_unreachable("BI__builtin_elementwise_sqrt NYI"); @@ -759,7 +758,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - return buildUnaryFPBuiltin(*this, *E); + return emitUnaryFPBuiltin(*this, *E); case Builtin::BIlround: case Builtin::BIlroundf: @@ -767,7 +766,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lround: case Builtin::BI__builtin_lroundf: case Builtin::BI__builtin_lroundl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_lroundf128: llvm_unreachable("BI__builtin_lroundf128 NYI"); @@ -778,8 +777,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llround: case Builtin::BI__builtin_llroundf: case Builtin::BI__builtin_llroundl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, - *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_llroundf128: llvm_unreachable("BI__builtin_llroundf128 NYI"); @@ -790,7 +788,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_lrint: case Builtin::BI__builtin_lrintf: case Builtin::BI__builtin_lrintl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_lrintf128: llvm_unreachable("BI__builtin_lrintf128 NYI"); @@ -801,7 +799,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_llrint: case Builtin::BI__builtin_llrintf: case Builtin::BI__builtin_llrintl: - return buildUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); + return emitUnaryMaybeConstrainedFPToIntBuiltin(*this, *E); case Builtin::BI__builtin_llrintf128: llvm_unreachable("BI__builtin_llrintf128 NYI"); @@ -831,15 +829,15 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_va_start: case Builtin::BI__va_start: case Builtin::BI__builtin_va_end: { - buildVAStartEnd(BuiltinID == Builtin::BI__va_start - ? buildScalarExpr(E->getArg(0)) - : buildVAListRef(E->getArg(0)).getPointer(), - BuiltinID != Builtin::BI__builtin_va_end); + emitVAStartEnd(BuiltinID == Builtin::BI__va_start + ? emitScalarExpr(E->getArg(0)) + : emitVAListRef(E->getArg(0)).getPointer(), + BuiltinID != Builtin::BI__builtin_va_end); return {}; } case Builtin::BI__builtin_va_copy: { - auto dstPtr = buildVAListRef(E->getArg(0)).getPointer(); - auto srcPtr = buildVAListRef(E->getArg(1)).getPointer(); + auto dstPtr = emitVAListRef(E->getArg(0)).getPointer(); + auto srcPtr = emitVAListRef(E->getArg(1)).getPointer(); builder.create(dstPtr.getLoc(), dstPtr, srcPtr); return {}; } @@ -851,7 +849,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_labs: case Builtin::BI__builtin_llabs: { bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow); - auto Arg = buildScalarExpr(E->getArg(0)); + auto Arg = emitScalarExpr(E->getArg(0)); mlir::Value Result; switch (getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: { @@ -877,8 +875,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Result); } case Builtin::BI__builtin_complex: { - mlir::Value Real = buildScalarExpr(E->getArg(0)); - mlir::Value Imag = buildScalarExpr(E->getArg(1)); + mlir::Value Real = emitScalarExpr(E->getArg(0)); + mlir::Value Imag = emitScalarExpr(E->getArg(1)); mlir::Value Complex = builder.createComplexCreate(getLoc(E->getExprLoc()), Real, Imag); return RValue::getComplex(Complex); @@ -890,7 +888,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIconj: case Builtin::BIconjf: case Builtin::BIconjl: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value ComplexVal = emitComplexExpr(E->getArg(0)); mlir::Value Conj = builder.createUnaryOp(getLoc(E->getExprLoc()), cir::UnaryOpKind::Not, ComplexVal); return RValue::getComplex(Conj); @@ -902,7 +900,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIcreal: case Builtin::BIcrealf: case Builtin::BIcreall: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value ComplexVal = emitComplexExpr(E->getArg(0)); mlir::Value Real = builder.createComplexReal(getLoc(E->getExprLoc()), ComplexVal); return RValue::get(Real); @@ -917,7 +915,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIcimag: case Builtin::BIcimagf: case Builtin::BIcimagl: { - mlir::Value ComplexVal = buildComplexExpr(E->getArg(0)); + mlir::Value ComplexVal = emitComplexExpr(E->getArg(0)); mlir::Value Real = builder.createComplexImag(getLoc(E->getExprLoc()), ComplexVal); return RValue::get(Real); @@ -926,31 +924,31 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_clrsb: case Builtin::BI__builtin_clrsbl: case Builtin::BI__builtin_clrsbll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_ctzs: case Builtin::BI__builtin_ctz: case Builtin::BI__builtin_ctzl: case Builtin::BI__builtin_ctzll: case Builtin::BI__builtin_ctzg: - return buildBuiltinBitOp(*this, E, BCK_CTZPassedZero); + return emitBuiltinBitOp(*this, E, BCK_CTZPassedZero); case Builtin::BI__builtin_clzs: case Builtin::BI__builtin_clz: case Builtin::BI__builtin_clzl: case Builtin::BI__builtin_clzll: case Builtin::BI__builtin_clzg: - return buildBuiltinBitOp(*this, E, BCK_CLZPassedZero); + return emitBuiltinBitOp(*this, E, BCK_CLZPassedZero); case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_parity: case Builtin::BI__builtin_parityl: case Builtin::BI__builtin_parityll: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__lzcnt16: case Builtin::BI__lzcnt: @@ -964,18 +962,18 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_popcountl: case Builtin::BI__builtin_popcountll: case Builtin::BI__builtin_popcountg: - return buildBuiltinBitOp(*this, E, std::nullopt); + return emitBuiltinBitOp(*this, E, std::nullopt); case Builtin::BI__builtin_unpredictable: { if (CGM.getCodeGenOpts().OptimizationLevel != 0) assert(!cir::MissingFeatures::insertBuiltinUnpredictable()); - return RValue::get(buildScalarExpr(E->getArg(0))); + return RValue::get(emitScalarExpr(E->getArg(0))); } case Builtin::BI__builtin_expect: case Builtin::BI__builtin_expect_with_probability: { - auto ArgValue = buildScalarExpr(E->getArg(0)); - auto ExpectedValue = buildScalarExpr(E->getArg(1)); + auto ArgValue = emitScalarExpr(E->getArg(0)); + auto ExpectedValue = emitScalarExpr(E->getArg(1)); // Don't generate cir.expect on -O0 as the backend won't use it for // anything. Note, we still IRGen ExpectedValue because it could have @@ -1007,17 +1005,17 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_assume_aligned: { const Expr *ptr = E->getArg(0); - mlir::Value ptrValue = buildScalarExpr(ptr); + mlir::Value ptrValue = emitScalarExpr(ptr); mlir::Value offsetValue = - (E->getNumArgs() > 2) ? buildScalarExpr(E->getArg(2)) : nullptr; + (E->getNumArgs() > 2) ? emitScalarExpr(E->getArg(2)) : nullptr; mlir::Attribute alignmentAttr = ConstantEmitter(*this).emitAbstract( E->getArg(1), E->getArg(1)->getType()); std::int64_t alignment = cast(alignmentAttr).getSInt(); - ptrValue = buildAlignmentAssumption(ptrValue, ptr, ptr->getExprLoc(), - builder.getI64IntegerAttr(alignment), - offsetValue); + ptrValue = emitAlignmentAssumption(ptrValue, ptr, ptr->getExprLoc(), + builder.getI64IntegerAttr(alignment), + offsetValue); return RValue::get(ptrValue); } @@ -1026,7 +1024,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (E->getArg(0)->HasSideEffects(getContext())) return RValue::get(nullptr); - mlir::Value argValue = buildScalarExpr(E->getArg(0)); + mlir::Value argValue = emitScalarExpr(E->getArg(0)); builder.create(getLoc(E->getExprLoc()), argValue); return RValue::get(nullptr); } @@ -1035,8 +1033,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const Expr *arg0 = E->getArg(0); const Expr *arg1 = E->getArg(1); - mlir::Value value0 = buildScalarExpr(arg0); - mlir::Value value1 = buildScalarExpr(arg1); + mlir::Value value0 = emitScalarExpr(arg0); + mlir::Value value1 = emitScalarExpr(arg1); builder.create(getLoc(E->getExprLoc()), value0, value1); @@ -1055,7 +1053,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_byteswap_ushort: case Builtin::BI_byteswap_ulong: case Builtin::BI_byteswap_uint64: { - auto arg = buildScalarExpr(E->getArg(0)); + auto arg = emitScalarExpr(E->getArg(0)); return RValue::get( builder.create(getLoc(E->getSourceRange()), arg)); } @@ -1075,7 +1073,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_rotl: case Builtin::BI_lrotl: case Builtin::BI_rotl64: - return buildRotate(E, false); + return emitRotate(E, false); case Builtin::BI__builtin_rotateright8: case Builtin::BI__builtin_rotateright16: @@ -1086,7 +1084,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI_rotr: case Builtin::BI_lrotr: case Builtin::BI_rotr64: - return buildRotate(E, true); + return emitRotate(E, true); case Builtin::BI__builtin_constant_p: { mlir::Type ResultType = ConvertType(E->getType()); @@ -1110,7 +1108,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, builder.getConstInt(getLoc(E->getSourceRange()), mlir::cast(ResultType), 0)); - mlir::Value ArgValue = buildScalarExpr(Arg); + mlir::Value ArgValue = emitScalarExpr(Arg); if (ArgType->isObjCObjectPointerType()) // Convert Objective-C objects to id because we cannot distinguish between // LLVM types for Obj-C classes as they are opaque. @@ -1158,7 +1156,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, if (E->getNumArgs() > 2) Locality = evaluateOperandAsInt(E->getArg(2)); - mlir::Value Address = buildScalarExpr(E->getArg(0)); + mlir::Value Address = emitScalarExpr(E->getArg(0)); builder.create(getLoc(E->getSourceRange()), Address, Locality, IsWrite); return RValue::get(nullptr); @@ -1171,9 +1169,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin___clear_cache: { mlir::Type voidTy = cir::VoidType::get(&getMLIRContext()); mlir::Value begin = - builder.createPtrBitcast(buildScalarExpr(E->getArg(0)), voidTy); + builder.createPtrBitcast(emitScalarExpr(E->getArg(0)), voidTy); mlir::Value end = - builder.createPtrBitcast(buildScalarExpr(E->getArg(1)), voidTy); + builder.createPtrBitcast(emitScalarExpr(E->getArg(1)), voidTy); builder.create(getLoc(E->getSourceRange()), begin, end); return RValue::get(nullptr); } @@ -1191,7 +1189,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__debugbreak: llvm_unreachable("BI__debugbreak NYI"); case Builtin::BI__builtin_unreachable: { - buildUnreachable(E->getExprLoc()); + emitUnreachable(E->getExprLoc()); // We do need to preserve an insertion point. builder.createBlock(builder.getBlock()->getParent()); @@ -1339,7 +1337,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_alloca_uninitialized: case Builtin::BI__builtin_alloca: { // Get alloca size input - mlir::Value Size = buildScalarExpr(E->getArg(0)); + mlir::Value Size = emitScalarExpr(E->getArg(0)); // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__. const TargetInfo &TI = getContext().getTargetInfo(); @@ -1394,14 +1392,13 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_memcpy: case Builtin::BImempcpy: case Builtin::BI__builtin_mempcpy: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - Address Src = buildPointerWithAlignment(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); - buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 1); + Address Dest = emitPointerWithAlignment(E->getArg(0)); + Address Src = emitPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = emitScalarExpr(E->getArg(2)); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); + emitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); builder.createMemCpy(getLoc(E->getSourceRange()), Dest.getPointer(), Src.getPointer(), SizeVal); if (BuiltinID == Builtin::BImempcpy || @@ -1416,11 +1413,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_char_memchr: case Builtin::BI__builtin_memchr: { - Address srcPtr = buildPointerWithAlignment(E->getArg(0)); + Address srcPtr = emitPointerWithAlignment(E->getArg(0)); mlir::Value src = builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy()); - mlir::Value pattern = buildScalarExpr(E->getArg(1)); - mlir::Value len = buildScalarExpr(E->getArg(2)); + mlir::Value pattern = emitScalarExpr(E->getArg(1)); + mlir::Value len = emitScalarExpr(E->getArg(2)); mlir::Value res = builder.create(getLoc(E->getExprLoc()), src, pattern, len); return RValue::get(res); @@ -1436,8 +1433,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::APSInt dstSize = dstSizeResult.Val.getInt(); if (size.ugt(dstSize)) break; - Address dest = buildPointerWithAlignment(E->getArg(0)); - Address src = buildPointerWithAlignment(E->getArg(1)); + Address dest = emitPointerWithAlignment(E->getArg(0)); + Address src = emitPointerWithAlignment(E->getArg(1)); auto loc = getLoc(E->getSourceRange()); ConstantOp sizeOp = builder.getConstInt(loc, size); builder.createMemCpy(loc, dest.getPointer(), src.getPointer(), sizeOp); @@ -1452,26 +1449,24 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BImemmove: case Builtin::BI__builtin_memmove: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - Address Src = buildPointerWithAlignment(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); - buildNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), - E->getArg(1)->getExprLoc(), FD, 1); + Address Dest = emitPointerWithAlignment(E->getArg(0)); + Address Src = emitPointerWithAlignment(E->getArg(1)); + mlir::Value SizeVal = emitScalarExpr(E->getArg(2)); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); + emitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(), + E->getArg(1)->getExprLoc(), FD, 1); builder.createMemMove(getLoc(E->getSourceRange()), Dest.getPointer(), Src.getPointer(), SizeVal); return RValue::get(Dest.getPointer()); } case Builtin::BImemset: case Builtin::BI__builtin_memset: { - Address Dest = buildPointerWithAlignment(E->getArg(0)); - mlir::Value ByteVal = buildScalarExpr(E->getArg(1)); - mlir::Value SizeVal = buildScalarExpr(E->getArg(2)); - buildNonNullArgCheck(RValue::get(Dest.getPointer()), - E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), - FD, 0); + Address Dest = emitPointerWithAlignment(E->getArg(0)); + mlir::Value ByteVal = emitScalarExpr(E->getArg(1)); + mlir::Value SizeVal = emitScalarExpr(E->getArg(2)); + emitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), + E->getArg(0)->getExprLoc(), FD, 0); builder.createMemSet(getLoc(E->getSourceRange()), Dest.getPointer(), ByteVal, SizeVal); return RValue::get(Dest.getPointer()); @@ -1489,8 +1484,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm::APSInt dstSize = dstSizeResult.Val.getInt(); if (size.ugt(dstSize)) break; - Address dest = buildPointerWithAlignment(E->getArg(0)); - mlir::Value byteVal = buildScalarExpr(E->getArg(1)); + Address dest = emitPointerWithAlignment(E->getArg(0)); + mlir::Value byteVal = emitScalarExpr(E->getArg(1)); auto loc = getLoc(E->getSourceRange()); ConstantOp sizeOp = builder.getConstInt(loc, size); builder.createMemSet(loc, dest.getPointer(), byteVal, sizeOp); @@ -1559,7 +1554,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_fetch_and_add_4: case Builtin::BI__sync_fetch_and_add_8: case Builtin::BI__sync_fetch_and_add_16: { - return buildBinaryAtomic(*this, cir::AtomicFetchKind::Add, E); + return emitBinaryAtomic(*this, cir::AtomicFetchKind::Add, E); } case Builtin::BI__sync_fetch_and_sub_1: @@ -1567,7 +1562,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__sync_fetch_and_sub_4: case Builtin::BI__sync_fetch_and_sub_8: case Builtin::BI__sync_fetch_and_sub_16: { - return buildBinaryAtomic(*this, cir::AtomicFetchKind::Sub, E); + return emitBinaryAtomic(*this, cir::AtomicFetchKind::Sub, E); } case Builtin::BI__sync_fetch_and_or_1: @@ -1754,9 +1749,9 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, auto ResultCIRTy = mlir::cast(CGM.getTypes().ConvertType(ResultQTy)); - mlir::Value Left = buildScalarExpr(LeftArg); - mlir::Value Right = buildScalarExpr(RightArg); - Address ResultPtr = buildPointerWithAlignment(ResultArg); + mlir::Value Left = emitScalarExpr(LeftArg); + mlir::Value Right = emitScalarExpr(RightArg); + Address ResultPtr = emitPointerWithAlignment(ResultArg); // Extend each operand to the encompassing type, if necessary. if (Left.getType() != EncompassingCIRTy) @@ -1798,7 +1793,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // Finally, store the result using the pointer. bool isVolatile = ResultArg->getType()->getPointeeType().isVolatileQualified(); - builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy), ResultPtr, isVolatile); return RValue::get(ArithResult.overflow); @@ -1823,11 +1818,11 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_smull_overflow: case Builtin::BI__builtin_smulll_overflow: { // Scalarize our inputs. - mlir::Value X = buildScalarExpr(E->getArg(0)); - mlir::Value Y = buildScalarExpr(E->getArg(1)); + mlir::Value X = emitScalarExpr(E->getArg(0)); + mlir::Value Y = emitScalarExpr(E->getArg(1)); const clang::Expr *ResultArg = E->getArg(2); - Address ResultPtr = buildPointerWithAlignment(ResultArg); + Address ResultPtr = emitPointerWithAlignment(ResultArg); // Decide which of the arithmetic operation we are lowering to: cir::BinOpOverflowKind ArithKind; @@ -1871,7 +1866,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, bool isVolatile = ResultArg->getType()->getPointeeType().isVolatileQualified(); - builder.createStore(Loc, buildToMemory(ArithResult.result, ResultQTy), + builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy), ResultPtr, isVolatile); return RValue::get(ArithResult.overflow); @@ -1880,14 +1875,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BIaddressof: case Builtin::BI__addressof: case Builtin::BI__builtin_addressof: - return RValue::get(buildLValue(E->getArg(0)).getPointer()); + return RValue::get(emitLValue(E->getArg(0)).getPointer()); case Builtin::BI__builtin_function_start: llvm_unreachable("BI__builtin_function_start NYI"); case Builtin::BI__builtin_operator_new: - return buildBuiltinNewDeleteCall( + return emitBuiltinNewDeleteCall( E->getCallee()->getType()->castAs(), E, false); case Builtin::BI__builtin_operator_delete: - buildBuiltinNewDeleteCall( + emitBuiltinNewDeleteCall( E->getCallee()->getType()->castAs(), E, true); return RValue::get(nullptr); case Builtin::BI__builtin_is_aligned: @@ -2007,7 +2002,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BImove_if_noexcept: case Builtin::BIforward: case Builtin::BIas_const: - return RValue::get(buildLValue(E->getArg(0)).getPointer()); + return RValue::get(emitLValue(E->getArg(0)).getPointer()); case Builtin::BIforward_like: llvm_unreachable("BIforward_like NYI"); case Builtin::BI__GetExceptionInfo: @@ -2030,7 +2025,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, llvm_unreachable("BI__builtin_coro_id like NYI"); case Builtin::BI__builtin_coro_frame: { - return buildCoroutineFrame(); + return emitCoroutineFrame(); } case Builtin::BI__builtin_coro_free: case Builtin::BI__builtin_coro_size: { @@ -2042,8 +2037,8 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, CGM.GetOrCreateCIRFunction(ND->getName(), ty, gd, /*ForVTable=*/false, /*DontDefer=*/false); fnOp.setBuiltinAttr(mlir::UnitAttr::get(&getMLIRContext())); - return buildCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), - E, ReturnValue); + return emitCall(E->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), E, + ReturnValue); } case Builtin::BIread_pipe: @@ -2151,7 +2146,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // any of data classes, specified by the second argument. case Builtin::BI__builtin_isnan: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) return RValue::get(Result); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2163,7 +2158,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_issignaling: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2173,7 +2168,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_isinf: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) return RValue::get(Result); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2191,7 +2186,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__finitel: case Builtin::BI__builtin_isfinite: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); if (mlir::Value Result = tryUseTestFPKind(*this, BuiltinID, V)) return RValue::get(Result); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2203,7 +2198,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_isnormal: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2213,7 +2208,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_issubnormal: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2223,7 +2218,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, case Builtin::BI__builtin_iszero: { CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); mlir::Location Loc = getLoc(E->getBeginLoc()); // FIXME: We should use builder.createZExt once createZExt is available. return RValue::get(builder.createZExtOrBitCast( @@ -2237,7 +2232,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, break; CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, E); - mlir::Value V = buildScalarExpr(E->getArg(0)); + mlir::Value V = emitScalarExpr(E->getArg(0)); uint64_t Test = Result.Val.getInt().getLimitedValue(); mlir::Location Loc = getLoc(E->getBeginLoc()); @@ -2251,14 +2246,14 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, // the call using the normal call path, but using the unmangled // version of the function name. if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) - return buildLibraryCall(*this, FD, E, - CGM.getBuiltinLibFunction(FD, BuiltinID)); + return emitLibraryCall(*this, FD, E, + CGM.getBuiltinLibFunction(FD, BuiltinID)); // If this is a predefined lib function (e.g. malloc), emit the call // using exactly the normal call path. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) - return buildLibraryCall(*this, FD, E, - buildScalarExpr(E->getCallee()).getDefiningOp()); + return emitLibraryCall(*this, FD, E, + emitScalarExpr(E->getCallee()).getDefiningOp()); // Check that a call to a target specific builtin has the correct target // features. @@ -2299,7 +2294,7 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, } // Now see if we can emit a target-specific builtin. - if (auto V = buildTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { + if (auto V = emitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) { switch (EvalKind) { case cir::TEK_Scalar: if (mlir::isa(V.getType())) @@ -2319,12 +2314,12 @@ RValue CIRGenFunction::buildBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return GetUndefRValue(E->getType()); } -mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, - BuiltinCheckKind Kind) { +mlir::Value CIRGenFunction::emitCheckedArgForBuiltin(const Expr *E, + BuiltinCheckKind Kind) { assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && "Unsupported builtin check kind"); - auto value = buildScalarExpr(E); + auto value = emitScalarExpr(E); if (!SanOpts.has(SanitizerKind::Builtin)) return value; @@ -2332,11 +2327,11 @@ mlir::Value CIRGenFunction::buildCheckedArgForBuiltin(const Expr *E, llvm_unreachable("NYI"); } -static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, - unsigned BuiltinID, - const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch) { +static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *CGF, + unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { // When compiling in HipStdPar mode we have to be conservative in rejecting // target specific features in the FE, and defer the possible error to the // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is @@ -2356,13 +2351,13 @@ static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, case llvm::Triple::aarch64: case llvm::Triple::aarch64_32: case llvm::Triple::aarch64_be: - return CGF->buildAArch64BuiltinExpr(BuiltinID, E, ReturnValue, Arch); + return CGF->emitAArch64BuiltinExpr(BuiltinID, E, ReturnValue, Arch); case llvm::Triple::bpfeb: case llvm::Triple::bpfel: llvm_unreachable("NYI"); case llvm::Triple::x86: case llvm::Triple::x86_64: - return CGF->buildX86BuiltinExpr(BuiltinID, E); + return CGF->emitX86BuiltinExpr(BuiltinID, E); case llvm::Triple::ppc: case llvm::Triple::ppcle: case llvm::Triple::ppc64: @@ -2389,21 +2384,21 @@ static mlir::Value buildTargetArchBuiltinExpr(CIRGenFunction *CGF, } } -mlir::Value -CIRGenFunction::buildTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue) { +mlir::Value CIRGenFunction::emitTargetBuiltinExpr(unsigned BuiltinID, + const CallExpr *E, + ReturnValueSlot ReturnValue) { if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) { assert(getContext().getAuxTargetInfo() && "Missing aux target info"); - return buildTargetArchBuiltinExpr( + return emitTargetArchBuiltinExpr( this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E, ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch()); } - return buildTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, - getTarget().getTriple().getArch()); + return emitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue, + getTarget().getTriple().getArch()); } -void CIRGenFunction::buildVAStartEnd(mlir::Value ArgValue, bool IsStart) { +void CIRGenFunction::emitVAStartEnd(mlir::Value ArgValue, bool IsStart) { // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this // early, defer to LLVM lowering. if (IsStart) @@ -2448,8 +2443,8 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, auto DIter = LocalDeclMap.find(D); assert(DIter != LocalDeclMap.end()); - return buildLoadOfScalar(DIter->second, /*Volatile=*/false, - getContext().getSizeType(), E->getBeginLoc()); + return emitLoadOfScalar(DIter->second, /*Volatile=*/false, + getContext().getSizeType(), E->getBeginLoc()); } } @@ -2459,7 +2454,7 @@ mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext()))) llvm_unreachable("NYI"); - auto Ptr = EmittedE ? EmittedE : buildScalarExpr(E); + auto Ptr = EmittedE ? EmittedE : emitScalarExpr(E); assert(mlir::isa(Ptr.getType()) && "Non-pointer passed to __builtin_object_size?"); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index c9eb83434268..d7eb4701c84b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1957,11 +1957,11 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags, llvm_unreachable("Unknown vector element type!"); } -static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, - unsigned BuiltinID, - const CallExpr *E, - SmallVectorImpl &Ops, - llvm::Triple::ArchType Arch) { +static mlir::Value emitAArch64TblBuiltinExpr(CIRGenFunction &CGF, + unsigned BuiltinID, + const CallExpr *E, + SmallVectorImpl &Ops, + llvm::Triple::ArchType Arch) { unsigned int Int = 0; [[maybe_unused]] const char *s = nullptr; @@ -2084,16 +2084,16 @@ static mlir::Value buildAArch64TblBuiltinExpr(CIRGenFunction &CGF, llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildAArch64SMEBuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitAArch64SMEBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID, AArch64SMEIntrinsicsProvenSorted); (void)Builtin; llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildAArch64SVEBuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitAArch64SVEBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 && BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64_x4) { llvm_unreachable("NYI"); @@ -2104,12 +2104,12 @@ mlir::Value CIRGenFunction::buildAArch64SVEBuiltinExpr(unsigned BuiltinID, llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildScalarOrConstFoldImmArg(unsigned ICEArguments, - unsigned Idx, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, + unsigned Idx, + const CallExpr *E) { mlir::Value Arg = {}; if ((ICEArguments & (1 << Idx)) == 0) { - Arg = buildScalarExpr(E->getArg(Idx)); + Arg = emitScalarExpr(E->getArg(Idx)); } else { // If this is required to be a constant, constant fold it so that we // know that the generated intrinsic gets a ConstantInt. @@ -2121,9 +2121,9 @@ mlir::Value CIRGenFunction::buildScalarOrConstFoldImmArg(unsigned ICEArguments, return Arg; } -static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, - const CallExpr *clangCallExpr, - CIRGenFunction &cgf) { +static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID, + const CallExpr *clangCallExpr, + CIRGenFunction &cgf) { StringRef intrinsicName; if (builtinID == clang::AArch64::BI__builtin_arm_ldrex) { intrinsicName = "aarch64.ldxr"; @@ -2131,7 +2131,7 @@ static mlir::Value buildArmLdrexNon128Intrinsic(unsigned int builtinID, llvm_unreachable("Unknown builtinID"); } // Argument - mlir::Value loadAddr = cgf.buildScalarExpr(clangCallExpr->getArg(0)); + mlir::Value loadAddr = cgf.emitScalarExpr(clangCallExpr->getArg(0)); // Get Instrinc call CIRGenBuilderTy &builder = cgf.getBuilder(); QualType clangResTy = clangCallExpr->getType(); @@ -2191,10 +2191,10 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { /// Build a constant shift amount vector of `vecTy` to shift a vector /// Here `shitfVal` is a constant integer that will be splated into a /// a const vector of `vecTy` which is the return of this function -static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, - mlir::Value shiftVal, - cir::VectorType vecTy, - mlir::Location loc, bool neg) { +static mlir::Value emitNeonShiftVector(CIRGenBuilderTy &builder, + mlir::Value shiftVal, + cir::VectorType vecTy, + mlir::Location loc, bool neg) { int shiftAmt = getIntValueFromConstOp(shiftVal); if (neg) shiftAmt = -shiftAmt; @@ -2208,23 +2208,21 @@ static mlir::Value buildNeonShiftVector(CIRGenBuilderTy &builder, } /// Build ShiftOp of vector type whose shift amount is a vector built -/// from a constant integer using `buildNeonShiftVector` function -static mlir::Value buildCommonNeonShift(CIRGenBuilderTy &builder, - mlir::Location loc, - cir::VectorType resTy, - mlir::Value shifTgt, - mlir::Value shiftAmt, bool shiftLeft, - bool negAmt = false) { - shiftAmt = buildNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); +/// from a constant integer using `emitNeonShiftVector` function +static mlir::Value +emitCommonNeonShift(CIRGenBuilderTy &builder, mlir::Location loc, + cir::VectorType resTy, mlir::Value shifTgt, + mlir::Value shiftAmt, bool shiftLeft, bool negAmt = false) { + shiftAmt = emitNeonShiftVector(builder, shiftAmt, resTy, loc, negAmt); return builder.create( loc, resTy, builder.createBitcast(shifTgt, resTy), shiftAmt, shiftLeft); } /// Right-shift a vector by a constant. -static mlir::Value buildNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, - mlir::Value shiftVal, - cir::VectorType vecTy, bool usgn, - mlir::Location loc) { +static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, + mlir::Value shiftVal, + cir::VectorType vecTy, bool usgn, + mlir::Location loc) { CIRGenBuilderTy &builder = cgf.getBuilder(); int64_t shiftAmt = getIntValueFromConstOp(shiftVal); int eltSize = cgf.CGM.getDataLayout().getTypeSizeInBits(vecTy.getEltType()); @@ -2242,37 +2240,37 @@ static mlir::Value buildNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec, --shiftAmt; shiftVal = builder.getConstInt(loc, vecTy.getEltType(), shiftAmt); } - return buildCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, - false /* right shift */); + return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal, + false /* right shift */); } -mlir::Value buildNeonCall(CIRGenBuilderTy &builder, - llvm::SmallVector argTypes, - llvm::SmallVectorImpl &args, - llvm::StringRef intrinsicName, mlir::Type funcResTy, - mlir::Location loc, - bool isConstrainedFPIntrinsic = false, - unsigned shift = 0, bool rightshift = false) { +mlir::Value emitNeonCall(CIRGenBuilderTy &builder, + llvm::SmallVector argTypes, + llvm::SmallVectorImpl &args, + llvm::StringRef intrinsicName, mlir::Type funcResTy, + mlir::Location loc, + bool isConstrainedFPIntrinsic = false, + unsigned shift = 0, bool rightshift = false) { // TODO: Consider removing the following unreachable when we have - // buildConstrainedFPCall feature implemented - assert(!cir::MissingFeatures::buildConstrainedFPCall()); + // emitConstrainedFPCall feature implemented + assert(!cir::MissingFeatures::emitConstrainedFPCall()); if (isConstrainedFPIntrinsic) llvm_unreachable("isConstrainedFPIntrinsic NYI"); for (unsigned j = 0; j < argTypes.size(); ++j) { if (isConstrainedFPIntrinsic) { - assert(!cir::MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::emitConstrainedFPCall()); } if (shift > 0 && shift == j) { - args[j] = buildNeonShiftVector(builder, args[j], - mlir::cast(argTypes[j]), - loc, rightshift); + args[j] = emitNeonShiftVector(builder, args[j], + mlir::cast(argTypes[j]), + loc, rightshift); } else { args[j] = builder.createBitcast(args[j], argTypes[j]); } } if (isConstrainedFPIntrinsic) { - assert(!cir::MissingFeatures::buildConstrainedFPCall()); + assert(!cir::MissingFeatures::emitConstrainedFPCall()); return nullptr; } return builder @@ -2281,16 +2279,16 @@ mlir::Value buildNeonCall(CIRGenBuilderTy &builder, .getResult(); } -/// This function `buildCommonNeonCallPattern0` implements a common way +/// This function `emitCommonNeonCallPattern0` implements a common way /// to generate neon intrinsic call that has following pattern: /// 1. There is a need to cast result of the intrinsic call back to /// expression type. /// 2. Function arg types are given, not deduced from actual arg types. static mlir::Value -buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, - llvm::SmallVector argTypes, - llvm::SmallVectorImpl &ops, - mlir::Type funcResTy, const clang::CallExpr *e) { +emitCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, + llvm::SmallVector argTypes, + llvm::SmallVectorImpl &ops, + mlir::Type funcResTy, const clang::CallExpr *e) { CIRGenBuilderTy &builder = cgf.getBuilder(); if (argTypes.empty()) { // The most common arg types is {funcResTy, funcResTy} for neon intrinsic @@ -2300,13 +2298,13 @@ buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, argTypes = {funcResTy, funcResTy}; } mlir::Value res = - buildNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, - cgf.getLoc(e->getExprLoc())); + emitNeonCall(builder, std::move(argTypes), ops, intrincsName, funcResTy, + cgf.getLoc(e->getExprLoc())); mlir::Type resultType = cgf.ConvertType(e->getType()); return builder.createBitcast(res, resultType); } -mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( +mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, llvm::SmallVectorImpl &ops, Address ptrOp0, Address ptrOp1, @@ -2362,10 +2360,10 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vpaddlq_v: { // The source operand type has twice as many elements of half the size. cir::VectorType narrowTy = getHalfEltSizeTwiceNumElemsVecType(builder, vTy); - return buildNeonCall(builder, {narrowTy}, ops, - isUnsigned ? "aarch64.neon.uaddlp" - : "aarch64.neon.saddlp", - vTy, getLoc(e->getExprLoc())); + return emitNeonCall(builder, {narrowTy}, ops, + isUnsigned ? "aarch64.neon.uaddlp" + : "aarch64.neon.saddlp", + vTy, getLoc(e->getExprLoc())); } case NEON::BI__builtin_neon_vext_v: case NEON::BI__builtin_neon_vextq_v: { @@ -2392,27 +2390,27 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( cir::VectorType mulVecT = GetNeonType(this, NeonTypeFlags(neonType.getEltType(), false, /*isQuad*/ false)); - return buildNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, - (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || - builtinID == NEON::BI__builtin_neon_vqdmulh_lane_v) - ? "aarch64.neon.sqdmulh.lane" - : "aarch64.neon.sqrdmulh.lane", - resTy, getLoc(e->getExprLoc())); + return emitNeonCall(builder, {resTy, mulVecT, SInt32Ty}, ops, + (builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || + builtinID == NEON::BI__builtin_neon_vqdmulh_lane_v) + ? "aarch64.neon.sqdmulh.lane" + : "aarch64.neon.sqrdmulh.lane", + resTy, getLoc(e->getExprLoc())); } case NEON::BI__builtin_neon_vqshlu_n_v: case NEON::BI__builtin_neon_vqshluq_n_v: { // These intrinsics expect signed vector type as input, but // return unsigned vector type. cir::VectorType srcTy = getSignChangedVectorType(builder, vTy); - return buildNeonCall(builder, {srcTy, srcTy}, ops, "aarch64.neon.sqshlu", - vTy, getLoc(e->getExprLoc()), - false, /* not fp constrained op */ - 1, /* second arg is shift amount */ - false /* leftshift */); + return emitNeonCall(builder, {srcTy, srcTy}, ops, "aarch64.neon.sqshlu", + vTy, getLoc(e->getExprLoc()), + false, /* not fp constrained op */ + 1, /* second arg is shift amount */ + false /* leftshift */); } case NEON::BI__builtin_neon_vrshr_n_v: case NEON::BI__builtin_neon_vrshrq_n_v: { - return buildNeonCall( + return emitNeonCall( builder, {vTy, isUnsigned ? getSignChangedVectorType(builder, vTy) : vTy}, ops, isUnsigned ? "aarch64.neon.urshl" : "aarch64.neon.srshl", vTy, @@ -2423,7 +2421,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vshl_n_v: case NEON::BI__builtin_neon_vshlq_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); - return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); + return emitCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } case NEON::BI__builtin_neon_vshll_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); @@ -2433,7 +2431,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( ops[0] = builder.createBitcast(ops[0], srcTy); // The following cast will be lowered to SExt or ZExt in LLVM. ops[0] = builder.createIntCast(ops[0], vTy); - return buildCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); + return emitCommonNeonShift(builder, loc, vTy, ops[0], ops[1], true); } case NEON::BI__builtin_neon_vshrn_n_v: { mlir::Location loc = getLoc(e->getExprLoc()); @@ -2441,13 +2439,13 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( vTy, true /* extended */, mlir::cast(vTy.getEltType()).isSigned()); ops[0] = builder.createBitcast(ops[0], srcTy); - ops[0] = buildCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); + ops[0] = emitCommonNeonShift(builder, loc, srcTy, ops[0], ops[1], false); return builder.createIntCast(ops[0], vTy); } case NEON::BI__builtin_neon_vshr_n_v: case NEON::BI__builtin_neon_vshrq_n_v: - return buildNeonRShiftImm(*this, ops[0], ops[1], vTy, isUnsigned, - getLoc(e->getExprLoc())); + return emitNeonRShiftImm(*this, ops[0], ops[1], vTy, isUnsigned, + getLoc(e->getExprLoc())); case NEON::BI__builtin_neon_vtst_v: case NEON::BI__builtin_neon_vtstq_v: { mlir::Location loc = getLoc(e->getExprLoc()); @@ -2540,21 +2538,20 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( if (intrincsName.empty()) return nullptr; - return buildCommonNeonCallPattern0(*this, intrincsName, argTypes, ops, vTy, - e); + return emitCommonNeonCallPattern0(*this, intrincsName, argTypes, ops, vTy, e); } mlir::Value -CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch) { +CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch) { if (BuiltinID >= clang::AArch64::FirstSVEBuiltin && BuiltinID <= clang::AArch64::LastSVEBuiltin) - return buildAArch64SVEBuiltinExpr(BuiltinID, E); + return emitAArch64SVEBuiltinExpr(BuiltinID, E); if (BuiltinID >= clang::AArch64::FirstSMEBuiltin && BuiltinID <= clang::AArch64::LastSMEBuiltin) - return buildAArch64SMEBuiltinExpr(BuiltinID, E); + return emitAArch64SMEBuiltinExpr(BuiltinID, E); if (BuiltinID == Builtin::BI__builtin_cpu_supports) llvm_unreachable("NYI"); @@ -2683,7 +2680,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex || BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) { - return buildArmLdrexNon128Intrinsic(BuiltinID, E, *this); + return emitArmLdrexNon128Intrinsic(BuiltinID, E, *this); } if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex || @@ -2899,12 +2896,12 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vstl1q_lane_s64: // Get the alignment for the argument in addition to the value; // we'll use it later. - PtrOp0 = buildPointerWithAlignment(E->getArg(0)); + PtrOp0 = emitPointerWithAlignment(E->getArg(0)); Ops.push_back(PtrOp0.emitRawPointer()); continue; } } - Ops.push_back(buildScalarOrConstFoldImmArg(ICEArguments, i, E)); + Ops.push_back(emitScalarOrConstFoldImmArg(ICEArguments, i, E)); } auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap); @@ -3072,7 +3069,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vsetq_lane_i32: case NEON::BI__builtin_neon_vsetq_lane_i64: case NEON::BI__builtin_neon_vsetq_lane_f32: - Ops.push_back(buildScalarExpr(E->getArg(2))); + Ops.push_back(emitScalarExpr(E->getArg(2))); return builder.create(getLoc(E->getExprLoc()), Ops[1], Ops[0], Ops[2]); case NEON::BI__builtin_neon_vset_lane_bf16: @@ -3093,73 +3090,73 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt8Ty, 8)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i8: case NEON::BI__builtin_neon_vdupb_laneq_i8: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt8Ty, 16)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i16: case NEON::BI__builtin_neon_vduph_lane_i16: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt16Ty, 4)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i16: case NEON::BI__builtin_neon_vduph_laneq_i16: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt16Ty, 8)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i32: case NEON::BI__builtin_neon_vdups_lane_i32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt32Ty, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_f32: case NEON::BI__builtin_neon_vdups_lane_f32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), FloatTy, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i32: case NEON::BI__builtin_neon_vdups_laneq_i32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt32Ty, 4)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vget_lane_i64: case NEON::BI__builtin_neon_vdupd_lane_i64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt64Ty, 1)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vdupd_lane_f64: case NEON::BI__builtin_neon_vget_lane_f64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 1)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_i64: case NEON::BI__builtin_neon_vdupd_laneq_i64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), UInt64Ty, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f32: case NEON::BI__builtin_neon_vdups_laneq_f32: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), FloatTy, 4)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vgetq_lane_f64: case NEON::BI__builtin_neon_vdupd_laneq_f64: Ops[0] = builder.createBitcast( Ops[0], cir::VectorType::get(&getMLIRContext(), DoubleTy, 2)); return builder.create(getLoc(E->getExprLoc()), Ops[0], - buildScalarExpr(E->getArg(1))); + emitScalarExpr(E->getArg(1))); case NEON::BI__builtin_neon_vaddh_f16: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vsubh_f16: @@ -3257,13 +3254,12 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID, AArch64SIMDIntrinsicsProvenSorted); if (Builtin) - return buildCommonNeonBuiltinExpr( + return emitCommonNeonBuiltinExpr( Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic, Builtin->NameHint, Builtin->TypeModifier, E, Ops, /*never use addresses*/ Address::invalid(), Address::invalid(), Arch); - if (mlir::Value V = - buildAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) + if (mlir::Value V = emitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch)) return V; cir::VectorType vTy = ty; @@ -3301,8 +3297,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, name = "aarch64.neon.pmull"; cir::VectorType argTy = builder.getExtendedOrTruncatedElementVectorType( ty, false /* truncated */, !usgn); - return buildNeonCall(builder, {argTy, argTy}, Ops, name, ty, - getLoc(E->getExprLoc())); + return emitNeonCall(builder, {argTy, argTy}, Ops, name, ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vmax_v: case NEON::BI__builtin_neon_vmaxq_v: @@ -3315,8 +3311,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::StringRef name = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin"; if (cir::isFPOrFPVectorTy(ty)) name = "aarch64.neon.fmin"; - return buildNeonCall(builder, {ty, ty}, Ops, name, ty, - getLoc(E->getExprLoc())); + return emitNeonCall(builder, {ty, ty}, Ops, name, ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vminh_f16: { llvm_unreachable("NYI"); @@ -3326,8 +3322,8 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::StringRef name = usgn ? "aarch64.neon.uabd" : "aarch64.neon.sabd"; if (cir::isFPOrFPVectorTy(ty)) name = "aarch64.neon.fabd"; - return buildNeonCall(builder, {ty, ty}, Ops, name, ty, - getLoc(E->getExprLoc())); + return emitNeonCall(builder, {ty, ty}, Ops, name, ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { @@ -3361,7 +3357,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vqrshrun_n_v: // The prototype of builtin_neon_vqrshrun_n can be found at // https://developer.arm.com/architectures/instruction-sets/intrinsics/ - return buildNeonCall( + return emitNeonCall( builder, {builder.getExtendedOrTruncatedElementVectorType(ty, true, true), SInt32Ty}, @@ -3369,7 +3365,7 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, case NEON::BI__builtin_neon_vqshrn_n_v: llvm_unreachable("NYI"); case NEON::BI__builtin_neon_vrshrn_n_v: - return buildNeonCall( + return emitNeonCall( builder, {builder.getExtendedOrTruncatedElementVectorType( vTy, true /* extend */, @@ -3383,9 +3379,9 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, } case NEON::BI__builtin_neon_vrnda_v: case NEON::BI__builtin_neon_vrndaq_v: { - assert(!cir::MissingFeatures::buildConstrainedFPCall()); - return buildNeonCall(builder, {ty}, Ops, "round", ty, - getLoc(E->getExprLoc())); + assert(!cir::MissingFeatures::emitConstrainedFPCall()); + return emitNeonCall(builder, {ty}, Ops, "round", ty, + getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndih_f16: { llvm_unreachable("NYI"); @@ -3405,10 +3401,10 @@ CIRGenFunction::buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm_unreachable("NYI"); } case NEON::BI__builtin_neon_vrndns_f32: { - mlir::Value arg0 = buildScalarExpr(E->getArg(0)); + mlir::Value arg0 = emitScalarExpr(E->getArg(0)); args.push_back(arg0); - return buildNeonCall(builder, {arg0.getType()}, args, "roundeven.f32", - getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); + return emitNeonCall(builder, {arg0.getType()}, args, "roundeven.f32", + getCIRGenModule().FloatTy, getLoc(E->getExprLoc())); } case NEON::BI__builtin_neon_vrndph_f16: { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index 76fe5315009e..0cd8f09f6da3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -31,7 +31,7 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -mlir::Value CIRGenFunction::buildX86BuiltinExpr(unsigned BuiltinID, - const CallExpr *E) { +mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { llvm_unreachable("NYI"); } diff --git a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp index beadfbb26a23..7668ef3dd1b7 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXX.cpp @@ -172,18 +172,18 @@ bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { llvm_unreachable("NYI"); // Create the alias with no name. - buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); + emitAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); return false; } -static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, - Address DeclPtr) { +static void emitDeclInit(CIRGenFunction &CGF, const VarDecl *D, + Address DeclPtr) { assert((D->hasGlobalStorage() || (D->hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && "VarDecl must have global or local (in the case of OpenCL) storage!"); assert(!D->getType()->isReferenceType() && - "Should not call buildDeclInit on a reference!"); + "Should not call emitDeclInit on a reference!"); QualType type = D->getType(); LValue lv = CGF.makeAddrLValue(DeclPtr, type); @@ -191,21 +191,21 @@ static void buildDeclInit(CIRGenFunction &CGF, const VarDecl *D, const Expr *Init = D->getInit(); switch (CIRGenFunction::getEvaluationKind(type)) { case cir::TEK_Aggregate: - CGF.buildAggExpr( - Init, AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap)); + CGF.emitAggExpr(Init, + AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); return; case cir::TEK_Scalar: - CGF.buildScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); + CGF.emitScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false); return; case cir::TEK_Complex: llvm_unreachable("complext evaluation NYI"); } } -static void buildDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { +static void emitDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) { // Honor __attribute__((no_destroy)) and bail instead of attempting // to emit a reference to a possibly nonexistent destructor, which // in turn can cause a crash. This will result in a global constructor @@ -292,12 +292,12 @@ cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) { /// Emit code to cause the variable at the given address to be considered as /// constant from this point onwards. -static void buildDeclInvariant(CIRGenFunction &CGF, const VarDecl *D) { - return CGF.buildInvariantStart( +static void emitDeclInvariant(CIRGenFunction &CGF, const VarDecl *D) { + return CGF.emitInvariantStart( CGF.getContext().getTypeSizeInChars(D->getType())); } -void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { +void CIRGenFunction::emitInvariantStart([[maybe_unused]] CharUnits Size) { // Do not emit the intrinsic if we're not optimizing. if (!CGM.getCodeGenOpts().OptimizationLevel) return; @@ -305,9 +305,9 @@ void CIRGenFunction::buildInvariantStart([[maybe_unused]] CharUnits Size) { assert(!cir::MissingFeatures::createInvariantIntrinsic()); } -void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, - cir::GlobalOp addr, - bool performInit) { +void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl, + cir::GlobalOp addr, + bool performInit) { const Expr *init = varDecl->getInit(); QualType ty = varDecl->getType(); @@ -357,7 +357,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, getASTContext().getDeclAlign(varDecl)); assert(performInit && "cannot have constant initializer which needs " "destruction for reference"); - RValue rv = cgf.buildReferenceBindingToExpr(init); + RValue rv = cgf.emitReferenceBindingToExpr(init); { mlir::OpBuilder::InsertionGuard guard(builder); mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp(); @@ -370,7 +370,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.setInsertionPoint(yield); } } - cgf.buildStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); + cgf.emitStoreOfScalar(rv.getScalarVal(), declAddr, false, ty); } builder.setInsertionPointToEnd(block); builder.create(addr->getLoc()); @@ -390,7 +390,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, builder.setInsertionPointToStart(block); Address declAddr(getAddrOfGlobalVar(varDecl), getASTContext().getDeclAlign(varDecl)); - buildDeclInit(cgf, varDecl, declAddr); + emitDeclInit(cgf, varDecl, declAddr); builder.setInsertionPointToEnd(block); builder.create(addr->getLoc()); } @@ -398,7 +398,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, if (isConstantStorage) { // TODO: this leads to a missing feature in the moment, probably also need // a LexicalScope to be inserted here. - buildDeclInvariant(cgf, varDecl); + emitDeclInvariant(cgf, varDecl); } else { // If not constant storage we'll emit this regardless of NeedsDtor value. mlir::OpBuilder::InsertionGuard guard(builder); @@ -408,7 +408,7 @@ void CIRGenModule::buildCXXGlobalVarDeclInit(const VarDecl *varDecl, lexScope.setAsGlobalInit(); builder.setInsertionPointToStart(block); - buildDeclDestroy(cgf, varDecl); + emitDeclDestroy(cgf, varDecl); builder.setInsertionPointToEnd(block); if (block->empty()) { block->erase(); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 587a1ce9c880..0a0c1bef4242 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -95,8 +95,8 @@ class CIRGenCXXABI { clang::CXXCtorType Type, bool ForVirtualBase, bool Delegating) = 0; /// Emit the ABI-specific prolog for the function - virtual void buildInstanceFunctionProlog(SourceLocation Loc, - CIRGenFunction &CGF) = 0; + virtual void emitInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) = 0; /// Get the type of the implicit "this" parameter used by a method. May return /// zero if no specific type is applicable, e.g. if the ABI expects the "this" @@ -162,16 +162,15 @@ class CIRGenCXXABI { bool Delegating) = 0; /// Emit constructor variants required by this ABI. - virtual void buildCXXConstructors(const clang::CXXConstructorDecl *D) = 0; + virtual void emitCXXConstructors(const clang::CXXConstructorDecl *D) = 0; /// Emit dtor variants required by this ABI. - virtual void buildCXXDestructors(const clang::CXXDestructorDecl *D) = 0; + virtual void emitCXXDestructors(const clang::CXXDestructorDecl *D) = 0; /// Emit the destructor call. - virtual void buildDestructorCall(CIRGenFunction &CGF, - const CXXDestructorDecl *DD, - CXXDtorType Type, bool ForVirtualBase, - bool Delegating, Address This, - QualType ThisTy) = 0; + virtual void emitDestructorCall(CIRGenFunction &CGF, + const CXXDestructorDecl *DD, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, + Address This, QualType ThisTy) = 0; /// Emit code to force the execution of a destructor during global /// teardown. The default implementation of this uses atexit. @@ -328,23 +327,23 @@ class CIRGenCXXABI { /// Emit a single constructor/destructor with the gien type from a C++ /// constructor Decl. - virtual void buildCXXStructor(clang::GlobalDecl GD) = 0; + virtual void emitCXXStructor(clang::GlobalDecl GD) = 0; - virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; - virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; + virtual void emitRethrow(CIRGenFunction &CGF, bool isNoReturn) = 0; + virtual void emitThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) = 0; - virtual void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; + virtual void emitBadCastCall(CIRGenFunction &CGF, mlir::Location loc) = 0; virtual mlir::Value getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &CGF, Address This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) = 0; - virtual mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, - QualType DestRecordTy, - cir::PointerType DestCIRTy, - bool isRefCast, Address Src) = 0; + virtual mlir::Value emitDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool isRefCast, Address Src) = 0; virtual cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 64b4c2f0957f..a8e06467e08d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -287,8 +287,8 @@ CIRGenCallee CIRGenCallee::prepareConcreteCallee(CIRGenFunction &CGF) const { return *this; } -void CIRGenFunction::buildAggregateStore(mlir::Value Val, Address Dest, - bool DestIsVolatile) { +void CIRGenFunction::emitAggregateStore(mlir::Value Val, Address Dest, + bool DestIsVolatile) { // In LLVM codegen: // Function to store a first-class aggregate into memory. We prefer to // store the elements rather than the aggregate to be more friendly to @@ -465,7 +465,7 @@ void CIRGenModule::constructAttributeList(StringRef Name, getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, funcAttrs); } -static cir::CIRCallOpInterface buildCallLikeOp( +static cir::CIRCallOpInterface emitCallLikeOp( CIRGenFunction &CGF, mlir::Location callLoc, cir::FuncType indirectFuncTy, mlir::Value indirectFuncVal, cir::FuncOp directFuncOp, SmallVectorImpl &CIRCallArgs, bool isInvoke, @@ -486,7 +486,7 @@ static cir::CIRCallOpInterface buildCallLikeOp( // Don't emit the code right away for catch clauses, for // now create the regions and consume the try scope result. // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. + // CIRGenFunction::emitLandingPad. [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { // Since this didn't come from an explicit try, we only need one @@ -551,13 +551,13 @@ static cir::CIRCallOpInterface buildCallLikeOp( extraFnAttrs); } -RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, - const CIRGenCallee &Callee, - ReturnValueSlot ReturnValue, - const CallArgList &CallArgs, - cir::CIRCallOpInterface *callOrTryCall, - bool IsMustTail, mlir::Location loc, - std::optional E) { +RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, + ReturnValueSlot ReturnValue, + const CallArgList &CallArgs, + cir::CIRCallOpInterface *callOrTryCall, + bool IsMustTail, mlir::Location loc, + std::optional E) { auto builder = CGM.getBuilder(); // FIXME: We no longer need the types from CallArgs; lift up and simplify @@ -823,7 +823,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, auto extraFnAttrs = cir::ExtraFuncAttributesAttr::get( &getMLIRContext(), Attrs.getDictionary(&getMLIRContext())); - cir::CIRCallOpInterface callLikeOp = buildCallLikeOp( + cir::CIRCallOpInterface callLikeOp = emitCallLikeOp( *this, callLoc, indirectFuncTy, indirectFuncVal, directFuncOp, CIRCallArgs, isInvoke, callingConv, extraFnAttrs); @@ -880,7 +880,7 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, assert(Results.size() <= 1 && "multiple returns NYI"); SourceLocRAIIObject Loc{*this, callLoc}; - buildAggregateStore(Results[0], DestPtr, DestIsVolatile); + emitAggregateStore(Results[0], DestPtr, DestIsVolatile); return RValue::getAggregate(DestPtr); } case cir::TEK_Scalar: { @@ -921,9 +921,9 @@ RValue CIRGenFunction::buildCall(const CIRGenFunctionInfo &CallInfo, return ret; } -mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, - cir::FuncOp callee, - ArrayRef args) { +mlir::Value CIRGenFunction::emitRuntimeCall(mlir::Location loc, + cir::FuncOp callee, + ArrayRef args) { // TODO(cir): set the calling convention to this runtime call. assert(!cir::MissingFeatures::setCallingConv()); @@ -937,8 +937,8 @@ mlir::Value CIRGenFunction::buildRuntimeCall(mlir::Location loc, return call->getResult(0); } -void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, - QualType type) { +void CIRGenFunction::emitCallArg(CallArgList &args, const Expr *E, + QualType type) { // TODO: Add the DisableDebugLocationUpdates helper assert(!dyn_cast(E) && "NYI"); @@ -947,7 +947,7 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, if (E->isGLValue()) { assert(E->getObjectKind() == OK_Ordinary); - return args.add(buildReferenceBindingToExpr(E), type); + return args.add(emitReferenceBindingToExpr(E), type); } bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); @@ -962,13 +962,13 @@ void CIRGenFunction::buildCallArg(CallArgList &args, const Expr *E, if (HasAggregateEvalKind && isa(E) && cast(E)->getCastKind() == CK_LValueToRValue) { - LValue L = buildLValue(cast(E)->getSubExpr()); + LValue L = emitLValue(cast(E)->getSubExpr()); assert(L.isSimple()); args.addUncopiedAggregate(L, type); return; } - args.add(buildAnyExprToTemp(E), type); + args.add(emitAnyExprToTemp(E), type); } QualType CIRGenFunction::getVarArgType(const Expr *Arg) { @@ -989,19 +989,19 @@ QualType CIRGenFunction::getVarArgType(const Expr *Arg) { return Arg->getType(); } -/// Similar to buildAnyExpr(), however, the result will always be accessible +/// Similar to emitAnyExpr(), however, the result will always be accessible /// even if no aggregate location is provided. -RValue CIRGenFunction::buildAnyExprToTemp(const Expr *E) { +RValue CIRGenFunction::emitAnyExprToTemp(const Expr *E) { AggValueSlot AggSlot = AggValueSlot::ignored(); if (hasAggregateEvaluationKind(E->getType())) AggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), getCounterAggTmpAsString()); - return buildAnyExpr(E, AggSlot); + return emitAnyExpr(E, AggSlot); } -void CIRGenFunction::buildCallArgs( +void CIRGenFunction::emitCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { @@ -1076,11 +1076,11 @@ void CIRGenFunction::buildCallArgs( assert(!isa(*Arg) && "NYI"); assert(!isa_and_nonnull(AC.getDecl()) && "NYI"); - buildCallArg(Args, *Arg, ArgTypes[Idx]); + emitCallArg(Args, *Arg, ArgTypes[Idx]); // In particular, we depend on it being the last arg in Args, and the // objectsize bits depend on there only being one arg if !LeftToRight. assert(InitialArgSize + 1 == Args.size() && - "The code below depends on only adding one arg per buildCallArg"); + "The code below depends on only adding one arg per emitCallArg"); (void)InitialArgSize; // Since pointer argument are never emitted as LValue, it is safe to emit // non-null argument check for r-value only. @@ -1343,11 +1343,11 @@ static bool isInAllocaArgument(CIRGenCXXABI &ABI, QualType type) { ABI.getRecordArgABI(RD) == CIRGenCXXABI::RecordArgABI::DirectInMemory; } -void CIRGenFunction::buildDelegateCallArg(CallArgList &args, - const VarDecl *param, - SourceLocation loc) { +void CIRGenFunction::emitDelegateCallArg(CallArgList &args, + const VarDecl *param, + SourceLocation loc) { // StartFunction converted the ABI-lowered parameter(s) into a local alloca. - // We need to turn that into an r-value suitable for buildCall + // We need to turn that into an r-value suitable for emitCall Address local = GetAddrOfLocalVar(param); QualType type = param->getType(); @@ -1553,15 +1553,15 @@ RValue CallArg::getRValue(CIRGenFunction &CGF, mlir::Location loc) const { if (!HasLV) return RV; LValue Copy = CGF.makeAddrLValue(CGF.CreateMemTemp(Ty, loc), Ty); - CGF.buildAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, - LV.isVolatile()); + CGF.emitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, + LV.isVolatile()); IsUsed = true; return RValue::getAggregate(Copy.getAddress()); } -void CIRGenFunction::buildNonNullArgCheck(RValue RV, QualType ArgType, - SourceLocation ArgLoc, - AbstractCallee AC, unsigned ParmNum) { +void CIRGenFunction::emitNonNullArgCheck(RValue RV, QualType ArgType, + SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum) { if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || SanOpts.has(SanitizerKind::NullabilityArg))) return; @@ -1572,11 +1572,11 @@ void CIRGenFunction::buildNonNullArgCheck(RValue RV, QualType ArgType, // FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. We // need to decide how to handle va_arg target-specific codegen. -mlir::Value CIRGenFunction::buildVAArg(VAArgExpr *VE, Address &VAListAddr) { +mlir::Value CIRGenFunction::emitVAArg(VAArgExpr *VE, Address &VAListAddr) { assert(!VE->isMicrosoftABI() && "NYI"); auto loc = CGM.getLoc(VE->getExprLoc()); auto type = ConvertType(VE->getType()); - auto vaList = buildVAListRef(VE->getSubExpr()).getPointer(); + auto vaList = emitVAListRef(VE->getSubExpr()).getPointer(); return builder.create(loc, type, vaList); } diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 88e353b421a6..5e2081abda28 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -141,7 +141,7 @@ class FieldMemcpyizer { return MemcpySize; } - void buildMemcpy() { + void emitMemcpy() { // Give the subclass a chance to bail out if it feels the memcpy isn't worth // it (e.g. Hasn't aggregated enough data). if (!FirstField) { @@ -158,7 +158,7 @@ class FieldMemcpyizer { const CXXRecordDecl *ClassDecl; private: - void buildMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { + void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { llvm_unreachable("NYI"); } @@ -200,27 +200,27 @@ class FieldMemcpyizer { unsigned LastAddedFieldIndex; }; -static void buildLValueForAnyFieldInitialization(CIRGenFunction &CGF, - CXXCtorInitializer *MemberInit, - LValue &LHS) { +static void emitLValueForAnyFieldInitialization(CIRGenFunction &CGF, + CXXCtorInitializer *MemberInit, + LValue &LHS) { FieldDecl *Field = MemberInit->getAnyMember(); if (MemberInit->isIndirectMemberInitializer()) { // If we are initializing an anonymous union field, drill down to the field. IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); for (const auto *I : IndirectField->chain()) { auto *fd = cast(I); - LHS = CGF.buildLValueForFieldInitialization(LHS, fd, fd->getName()); + LHS = CGF.emitLValueForFieldInitialization(LHS, fd, fd->getName()); } } else { - LHS = CGF.buildLValueForFieldInitialization(LHS, Field, Field->getName()); + LHS = CGF.emitLValueForFieldInitialization(LHS, Field, Field->getName()); } } -static void buildMemberInitializer(CIRGenFunction &CGF, - const CXXRecordDecl *ClassDecl, - CXXCtorInitializer *MemberInit, - const CXXConstructorDecl *Constructor, - FunctionArgList &Args) { +static void emitMemberInitializer(CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *MemberInit, + const CXXConstructorDecl *Constructor, + FunctionArgList &Args) { // TODO: ApplyDebugLocation assert(MemberInit->isAnyMemberInitializer() && "Mush have member initializer!"); @@ -241,7 +241,7 @@ static void buildMemberInitializer(CIRGenFunction &CGF, else LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); - buildLValueForAnyFieldInitialization(CGF, MemberInit, LHS); + emitLValueForAnyFieldInitialization(CGF, MemberInit, LHS); // Special case: If we are in a copy or move constructor, and we are copying // an array off PODs or classes with tirival copy constructors, ignore the AST @@ -255,7 +255,7 @@ static void buildMemberInitializer(CIRGenFunction &CGF, llvm_unreachable("NYI"); } - CGF.buildInitializerForField(Field, LHS, MemberInit->getInit()); + CGF.emitInitializerForField(Field, LHS, MemberInit->getInit()); } class ConstructorMemcpyizer : public FieldMemcpyizer { @@ -296,13 +296,13 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { AggregatedInits.push_back(MemberInit); addMemcpyableField(MemberInit->getMember()); } else { - buildAggregatedInits(); - buildMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, - ConstructorDecl, Args); + emitAggregatedInits(); + emitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, + ConstructorDecl, Args); } } - void buildAggregatedInits() { + void emitAggregatedInits() { if (AggregatedInits.size() <= 1) { // This memcpy is too small to be worthwhile. Fall back on default // codegen. @@ -314,7 +314,7 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { } pushEHDestructors(); - buildMemcpy(); + emitMemcpy(); AggregatedInits.clear(); } @@ -331,12 +331,12 @@ class ConstructorMemcpyizer : public FieldMemcpyizer { if (!CGF.needsEHCleanup(dtorKind)) continue; LValue FieldLHS = LHS; - buildLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); + emitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); } } - void finish() { buildAggregatedInits(); } + void finish() { emitAggregatedInits(); } private: const CXXConstructorDecl *ConstructorDecl; @@ -435,7 +435,7 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { AggregatedStmts.push_back(S); } else { emitAggregatedStmts(); - if (CGF.buildStmt(S, /*useCurrentScope=*/true).failed()) + if (CGF.emitStmt(S, /*useCurrentScope=*/true).failed()) llvm_unreachable("Should not get here!"); } } @@ -444,14 +444,13 @@ class AssignmentMemcpyizer : public FieldMemcpyizer { if (AggregatedStmts.size() <= 1) { if (!AggregatedStmts.empty()) { CopyingValueRepresentation CVR(CGF); - if (CGF.buildStmt(AggregatedStmts[0], /*useCurrentScope=*/true) - .failed()) + if (CGF.emitStmt(AggregatedStmts[0], /*useCurrentScope=*/true).failed()) llvm_unreachable("Should not get here!"); } reset(); } - buildMemcpy(); + emitMemcpy(); AggregatedStmts.clear(); } @@ -486,8 +485,8 @@ struct CallBaseDtor final : EHScopeStack::Cleanup { Address Addr = CGF.getAddressOfDirectBaseInCompleteClass( *CGF.currSrcLoc, CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, BaseIsVirtual); - CGF.buildCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, - /*Delegating=*/false, Addr, ThisTy); + CGF.emitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, + /*Delegating=*/false, Addr, ThisTy); } }; @@ -540,9 +539,9 @@ Address CIRGenFunction::getAddressOfDirectBaseInCompleteClass( /*assume_not_null=*/true); } -static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, - const CXXRecordDecl *ClassDecl, - CXXCtorInitializer *BaseInit) { +static void emitBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, + const CXXRecordDecl *ClassDecl, + CXXCtorInitializer *BaseInit) { assert(BaseInit->isBaseInitializer() && "Must have base initializer!"); Address ThisPtr = CGF.LoadCXXThisAddress(); @@ -568,7 +567,7 @@ static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, CGF.getOverlapForBaseInit(ClassDecl, BaseClassDecl, isBaseVirtual)); - CGF.buildAggExpr(BaseInit->getInit(), AggSlot); + CGF.emitAggExpr(BaseInit->getInit(), AggSlot); if (CGF.CGM.getLangOpts().Exceptions && !BaseClassDecl->hasTrivialDestructor()) @@ -578,11 +577,11 @@ static void buildBaseInitializer(mlir::Location loc, CIRGenFunction &CGF, /// This routine generates necessary code to initialize base classes and /// non-static data members belonging to this constructor. -void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, - CXXCtorType CtorType, - FunctionArgList &Args) { +void CIRGenFunction::emitCtorPrologue(const CXXConstructorDecl *CD, + CXXCtorType CtorType, + FunctionArgList &Args) { if (CD->isDelegatingConstructor()) - return buildDelegatingCXXConstructorCall(CD, Args); + return emitDelegatingCXXConstructorCall(CD, Args); const CXXRecordDecl *ClassDecl = CD->getParent(); @@ -617,7 +616,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CGM.getCodeGenOpts().OptimizationLevel > 0 && isInitializerOfDynamicClass(*B)) llvm_unreachable("NYI"); - buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); + emitBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); } if (BaseCtorContinueBB) { @@ -632,7 +631,7 @@ void CIRGenFunction::buildCtorPrologue(const CXXConstructorDecl *CD, CGM.getCodeGenOpts().OptimizationLevel > 0 && isInitializerOfDynamicClass(*B)) llvm_unreachable("NYI"); - buildBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); + emitBaseInitializer(getLoc(CD->getBeginLoc()), *this, ClassDecl, *B); } CXXThisValue = OldThis; @@ -847,13 +846,13 @@ Address CIRGenFunction::LoadCXXThisAddress() { return Address(LoadCXXThis(), CXXThisAlignment); } -void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, - Expr *Init) { +void CIRGenFunction::emitInitializerForField(FieldDecl *Field, LValue LHS, + Expr *Init) { QualType FieldType = Field->getType(); switch (getEvaluationKind(FieldType)) { case cir::TEK_Scalar: if (LHS.isSimple()) { - buildExprAsInit(Init, Field, LHS, false); + emitExprAsInit(Init, Field, LHS, false); } else { llvm_unreachable("NYI"); } @@ -868,7 +867,7 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, AggValueSlot::IsNotZeroed, // Checks are made by the code that calls constructor. AggValueSlot::IsSanitizerChecked); - buildAggExpr(Init, Slot); + emitAggExpr(Init, Slot); break; } } @@ -881,7 +880,7 @@ void CIRGenFunction::buildInitializerForField(FieldDecl *Field, LValue LHS, llvm_unreachable("NYI"); } -void CIRGenFunction::buildDelegateCXXConstructorCall( +void CIRGenFunction::emitDelegateCXXConstructorCall( const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc) { CallArgList DelegateArgs; @@ -904,17 +903,16 @@ void CIRGenFunction::buildDelegateCXXConstructorCall( for (; I != E; ++I) { const VarDecl *param = *I; // FIXME: per-argument source location - buildDelegateCallArg(DelegateArgs, param, Loc); + emitDelegateCallArg(DelegateArgs, param, Loc); } - buildCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false, - /*Delegating=*/true, This, DelegateArgs, - AggValueSlot::MayOverlap, Loc, - /*NewPointerIsChecked=*/true); + emitCXXConstructorCall(Ctor, CtorType, /*ForVirtualBase=*/false, + /*Delegating=*/true, This, DelegateArgs, + AggValueSlot::MayOverlap, Loc, + /*NewPointerIsChecked=*/true); } -void CIRGenFunction::buildImplicitAssignmentOperatorBody( - FunctionArgList &Args) { +void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { const CXXMethodDecl *AssignOp = cast(CurGD.getDecl()); const Stmt *RootS = AssignOp->getBody(); assert(isa(RootS) && @@ -931,7 +929,7 @@ void CIRGenFunction::buildImplicitAssignmentOperatorBody( AM.finish(); } -void CIRGenFunction::buildForwardingCallToLambda( +void CIRGenFunction::emitForwardingCallToLambda( const CXXMethodDecl *callOperator, CallArgList &callArgs) { // Get the address of the call operator. const auto &calleeFnInfo = @@ -956,19 +954,19 @@ void CIRGenFunction::buildForwardingCallToLambda( // Now emit our call. auto callee = CIRGenCallee::forDirect(calleePtr, GlobalDecl(callOperator)); - RValue RV = buildCall(calleeFnInfo, callee, returnSlot, callArgs); + RValue RV = emitCall(calleeFnInfo, callee, returnSlot, callArgs); // If necessary, copy the returned value into the slot. if (!resultType->isVoidType() && returnSlot.isNull()) { if (getLangOpts().ObjCAutoRefCount && resultType->isObjCRetainableType()) llvm_unreachable("NYI"); - buildReturnOfRValue(*currSrcLoc, RV, resultType); + emitReturnOfRValue(*currSrcLoc, RV, resultType); } else { llvm_unreachable("NYI"); } } -void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { +void CIRGenFunction::emitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { const CXXRecordDecl *Lambda = MD->getParent(); // Start building arguments for forwarding call @@ -982,7 +980,7 @@ void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { // Add the rest of the parameters. for (auto *Param : MD->parameters()) - buildDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); + emitDelegateCallArg(CallArgs, Param, Param->getBeginLoc()); const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); // For a generic lambda, find the corresponding call operator specialization @@ -998,10 +996,10 @@ void CIRGenFunction::buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { assert(CorrespondingCallOpSpecialization); CallOp = cast(CorrespondingCallOpSpecialization); } - buildForwardingCallToLambda(CallOp, CallArgs); + emitForwardingCallToLambda(CallOp, CallArgs); } -void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { +void CIRGenFunction::emitLambdaStaticInvokeBody(const CXXMethodDecl *MD) { if (MD->isVariadic()) { // Codgen for LLVM doesn't emit code for this as well, it says: // FIXME: Making this work correctly is nasty because it requires either @@ -1010,7 +1008,7 @@ void CIRGenFunction::buildLambdaStaticInvokeBody(const CXXMethodDecl *MD) { llvm_unreachable("NYI"); } - buildLambdaDelegatingInvokeBody(MD); + emitLambdaDelegatingInvokeBody(MD); } void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, @@ -1022,8 +1020,8 @@ void CIRGenFunction::destroyCXXObject(CIRGenFunction &CGF, Address addr, // dtors which shall be removed on later CIR passes. However, only remove this // assertion once we get a testcase to exercise this path. assert(!dtor->isTrivial()); - CGF.buildCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, - /*Delegating=*/false, addr, type); + CGF.emitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, + /*Delegating=*/false, addr, type); } static bool FieldHasTrivialDestructorBody(ASTContext &Context, @@ -1115,7 +1113,7 @@ static bool CanSkipVTablePointerInitialization(CIRGenFunction &CGF, } /// Emits the body of the current destructor. -void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { +void CIRGenFunction::emitDestructorBody(FunctionArgList &Args) { const CXXDestructorDecl *Dtor = cast(CurGD.getDecl()); CXXDtorType DtorType = CurGD.getDtorType(); @@ -1148,9 +1146,8 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { EnterDtorCleanups(Dtor, Dtor_Deleting); if (HaveInsertPoint()) { QualType ThisTy = Dtor->getFunctionObjectParameterType(); - buildCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, - /*Delegating=*/false, LoadCXXThisAddress(), - ThisTy); + emitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), ThisTy); } return; } @@ -1188,9 +1185,8 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { if (!isTryBody) { QualType ThisTy = Dtor->getFunctionObjectParameterType(); - buildCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, - /*Delegating=*/false, LoadCXXThisAddress(), - ThisTy); + emitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, + /*Delegating=*/false, LoadCXXThisAddress(), ThisTy); break; } @@ -1217,7 +1213,7 @@ void CIRGenFunction::buildDestructorBody(FunctionArgList &Args) { if (isTryBody) llvm_unreachable("NYI"); else if (Body) - (void)buildStmt(Body, /*useCurrentScope=*/true); + (void)emitStmt(Body, /*useCurrentScope=*/true); else { assert(Dtor->isImplicit() && "bodyless dtor not implicit"); // nothing to do besides what's in the epilogue @@ -1242,7 +1238,7 @@ namespace { [[maybe_unused]] mlir::Value LoadThisForDtorDelete(CIRGenFunction &CGF, const CXXDestructorDecl *DD) { if (Expr *ThisArg = DD->getOperatorDeleteThisArg()) - return CGF.buildScalarExpr(ThisArg); + return CGF.emitScalarExpr(ThisArg); return CGF.LoadCXXThis(); } @@ -1253,9 +1249,9 @@ struct CallDtorDelete final : EHScopeStack::Cleanup { void Emit(CIRGenFunction &CGF, Flags flags) override { const CXXDestructorDecl *Dtor = cast(CGF.CurCodeDecl); const CXXRecordDecl *ClassDecl = Dtor->getParent(); - CGF.buildDeleteCall(Dtor->getOperatorDelete(), - LoadThisForDtorDelete(CGF, Dtor), - CGF.getContext().getTagDeclType(ClassDecl)); + CGF.emitDeleteCall(Dtor->getOperatorDelete(), + LoadThisForDtorDelete(CGF, Dtor), + CGF.getContext().getTagDeclType(ClassDecl)); } }; } // namespace @@ -1276,7 +1272,7 @@ class DestroyField final : public EHScopeStack::Cleanup { Address thisValue = CGF.LoadCXXThisAddress(); QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); LValue ThisLV = CGF.makeAddrLValue(thisValue, RecordTy); - LValue LV = CGF.buildLValueForField(ThisLV, field); + LValue LV = CGF.emitLValueForField(ThisLV, field); assert(LV.isSimple()); CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, @@ -1417,13 +1413,13 @@ struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup { // We are calling the destructor from within the constructor. // Therefore, "this" should have the expected type. QualType ThisTy = Dtor->getFunctionObjectParameterType(); - CGF.buildCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, - /*Delegating=*/true, Addr, ThisTy); + CGF.emitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, + /*Delegating=*/true, Addr, ThisTy); } }; } // end anonymous namespace -void CIRGenFunction::buildDelegatingCXXConstructorCall( +void CIRGenFunction::emitDelegatingCXXConstructorCall( const CXXConstructorDecl *Ctor, const FunctionArgList &Args) { assert(Ctor->isDelegatingConstructor()); @@ -1436,7 +1432,7 @@ void CIRGenFunction::buildDelegatingCXXConstructorCall( // Checks are made by the code that calls constructor. AggValueSlot::IsSanitizerChecked); - buildAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); + emitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); const CXXRecordDecl *ClassDecl = Ctor->getParent(); if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { @@ -1448,13 +1444,12 @@ void CIRGenFunction::buildDelegatingCXXConstructorCall( } } -void CIRGenFunction::buildCXXDestructorCall(const CXXDestructorDecl *DD, - CXXDtorType Type, - bool ForVirtualBase, - bool Delegating, Address This, - QualType ThisTy) { - CGM.getCXXABI().buildDestructorCall(*this, DD, Type, ForVirtualBase, - Delegating, This, ThisTy); +void CIRGenFunction::emitCXXDestructorCall(const CXXDestructorDecl *DD, + CXXDtorType Type, + bool ForVirtualBase, bool Delegating, + Address This, QualType ThisTy) { + CGM.getCXXABI().emitDestructorCall(*this, DD, Type, ForVirtualBase, + Delegating, This, ThisTy); } mlir::Value CIRGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, @@ -1633,9 +1628,9 @@ bool CIRGenFunction::shouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) { TypeName); } -void CIRGenFunction::buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, - mlir::Value VTable, - SourceLocation Loc) { +void CIRGenFunction::emitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, + mlir::Value VTable, + SourceLocation Loc) { if (SanOpts.has(SanitizerKind::CFIVCall)) { llvm_unreachable("NYI"); } else if (CGM.getCodeGenOpts().WholeProgramVTables && @@ -1661,7 +1656,7 @@ mlir::Value CIRGenFunction::getVTablePtr(mlir::Location Loc, Address This, return VTable; } -Address CIRGenFunction::buildCXXMemberDataPointerAddress( +Address CIRGenFunction::emitCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo) { assert(!cir::MissingFeatures::cxxABI()); @@ -1744,14 +1739,14 @@ CIRGenModule::getVBaseAlignment(CharUnits actualDerivedAlign, /// \param arrayBegin an arrayType* /// \param zeroInitialize true if each element should be /// zero-initialized before it is constructed -void CIRGenFunction::buildCXXAggrConstructorCall( +void CIRGenFunction::emitCXXAggrConstructorCall( const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) { QualType elementType; - auto numElements = buildArrayLength(arrayType, elementType, arrayBegin); - buildCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, - NewPointerIsChecked, zeroInitialize); + auto numElements = emitArrayLength(arrayType, elementType, arrayBegin); + emitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, + NewPointerIsChecked, zeroInitialize); } /// Emit a loop to call a particular constructor for each of several members @@ -1763,7 +1758,7 @@ void CIRGenFunction::buildCXXAggrConstructorCall( /// \param arrayBase a T*, where T is the type constructed by ctor /// \param zeroInitialize true if each element should be /// zero-initialized before it is constructed -void CIRGenFunction::buildCXXAggrConstructorCall( +void CIRGenFunction::emitCXXAggrConstructorCall( const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase, const CXXConstructExpr *E, bool NewPointerIsChecked, bool zeroInitialize) { // It's legal for numElements to be zero. This can happen both @@ -1837,9 +1832,9 @@ void CIRGenFunction::buildCXXAggrConstructorCall( AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed, NewPointerIsChecked ? AggValueSlot::IsSanitizerChecked : AggValueSlot::IsNotSanitizerChecked); - buildCXXConstructorCall(ctor, Ctor_Complete, - /*ForVirtualBase=*/false, - /*Delegating=*/false, currAVS, E); + emitCXXConstructorCall(ctor, Ctor_Complete, + /*ForVirtualBase=*/false, + /*Delegating=*/false, currAVS, E); builder.create(loc); }); } @@ -1872,12 +1867,12 @@ static bool canEmitDelegateCallArgs(CIRGenFunction &CGF, return true; } -void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, - bool ForVirtualBase, - bool Delegating, - AggValueSlot ThisAVS, - const clang::CXXConstructExpr *E) { +void CIRGenFunction::emitCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, + bool ForVirtualBase, + bool Delegating, + AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E) { CallArgList Args; Address This = ThisAVS.getAddress(); LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace(); @@ -1901,15 +1896,15 @@ void CIRGenFunction::buildCXXConstructorCall(const clang::CXXConstructorDecl *D, ? EvaluationOrder::ForceLeftToRight : EvaluationOrder::Default; - buildCallArgs(Args, FPT, E->arguments(), E->getConstructor(), - /*ParamsToSkip*/ 0, Order); + emitCallArgs(Args, FPT, E->arguments(), E->getConstructor(), + /*ParamsToSkip*/ 0, Order); - buildCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, - ThisAVS.mayOverlap(), E->getExprLoc(), - ThisAVS.isSanitizerChecked()); + emitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args, + ThisAVS.mayOverlap(), E->getExprLoc(), + ThisAVS.isSanitizerChecked()); } -void CIRGenFunction::buildCXXConstructorCall( +void CIRGenFunction::emitCXXConstructorCall( const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, Address This, CallArgList &Args, AggValueSlot::Overlap_t Overlap, SourceLocation Loc, @@ -1918,8 +1913,8 @@ void CIRGenFunction::buildCXXConstructorCall( const auto *ClassDecl = D->getParent(); if (!NewPointerIsChecked) - buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), - getContext().getRecordType(ClassDecl), CharUnits::Zero()); + emitTypeCheck(CIRGenFunction::TCK_ConstructorCall, Loc, This.getPointer(), + getContext().getRecordType(ClassDecl), CharUnits::Zero()); // If this is a call to a trivial default constructor: // In LLVM: do nothing. @@ -1953,7 +1948,7 @@ void CIRGenFunction::buildCXXConstructorCall( Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(D, Type)); cir::CIRCallOpInterface C; - buildCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); + emitCall(Info, Callee, ReturnValueSlot(), Args, &C, false, getLoc(Loc)); assert(CGM.getCodeGenOpts().OptimizationLevel == 0 || ClassDecl->isDynamicClass() || Type == Ctor_Base || @@ -1961,7 +1956,7 @@ void CIRGenFunction::buildCXXConstructorCall( "vtable assumption loads NYI"); } -void CIRGenFunction::buildInheritedCXXConstructorCall( +void CIRGenFunction::emitInheritedCXXConstructorCall( const CXXConstructorDecl *D, bool ForVirtualBase, Address This, bool InheritedFromVBase, const CXXInheritedCtorInitExpr *E) { CallArgList Args; @@ -1986,24 +1981,23 @@ void CIRGenFunction::buildInheritedCXXConstructorCall( assert(getContext().hasSameUnqualifiedType( OuterCtor->getParamDecl(Param->getFunctionScopeIndex())->getType(), Param->getType())); - buildDelegateCallArg(Args, Param, E->getLocation()); + emitDelegateCallArg(Args, Param, E->getLocation()); // Forward __attribute__(pass_object_size). if (Param->hasAttr()) { auto *POSParam = SizeArguments[Param]; assert(POSParam && "missing pass_object_size value for forwarding"); - buildDelegateCallArg(Args, POSParam, E->getLocation()); + emitDelegateCallArg(Args, POSParam, E->getLocation()); } } } - buildCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/ false, - This, Args, AggValueSlot::MayOverlap, - E->getLocation(), - /*NewPointerIsChecked*/ true); + emitCXXConstructorCall(D, Ctor_Base, ForVirtualBase, /*Delegating*/ false, + This, Args, AggValueSlot::MayOverlap, E->getLocation(), + /*NewPointerIsChecked*/ true); } -void CIRGenFunction::buildInlinedInheritingCXXConstructorCall( +void CIRGenFunction::emitInlinedInheritingCXXConstructorCall( const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase, bool Delegating, CallArgList &Args) { GlobalDecl GD(Ctor, CtorType); diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 96dce5e2960f..4e0a305a502c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -33,8 +33,8 @@ using namespace cir; /// or with the labeled blocked if already solved. /// /// Track on scope basis, goto's we need to fix later. -cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, - JumpDest Dest) { +cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, + JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). assert(builder.getInsertionBlock() && "not yet implemented"); @@ -47,8 +47,8 @@ cir::BrOp CIRGenFunction::buildBranchThroughCleanup(mlir::Location Loc, } /// Emits all the code to cause the given temporary to be cleaned up. -void CIRGenFunction::buildCXXTemporary(const CXXTemporary *Temporary, - QualType TempType, Address Ptr) { +void CIRGenFunction::emitCXXTemporary(const CXXTemporary *Temporary, + QualType TempType, Address Ptr) { pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, /*useEHCleanup*/ true); } @@ -248,9 +248,9 @@ static void destroyOptimisticNormalEntry(CIRGenFunction &CGF, llvm_unreachable("NYI"); } -static void buildCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, - EHScopeStack::Cleanup::Flags flags, - Address ActiveFlag) { +static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, + EHScopeStack::Cleanup::Flags flags, + Address ActiveFlag) { auto emitCleanup = [&]() { // Ask the cleanup to emit itself. assert(CGF.HaveInsertPoint() && "expected insertion point"); @@ -409,7 +409,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); Scope.markEmitted(); - buildCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. @@ -454,7 +454,7 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { auto yield = cast(ehEntry->getTerminator()); builder.setInsertionPoint(yield); - buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + emitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); } if (CPI) @@ -478,12 +478,12 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { currYield->getParentOp()->getParentOfType()) { mlir::Block *resumeBlockToPatch = tryToPatch.getCatchUnwindEntryBlock(); - buildEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch, - tryToPatch.getLoc()); + emitEHResumeBlock(/*isCleanup=*/true, resumeBlockToPatch, + tryToPatch.getLoc()); } } - buildCleanup(*this, Fn, cleanupFlags, EHActiveFlag); + emitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); currBlock = blockToPatch; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp index 86366f6bfa15..6b6ed53faafa 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCoroutine.cpp @@ -123,12 +123,12 @@ struct ParamReferenceReplacerRAII { } // namespace // Emit coroutine intrinsic and patch up arguments of the token type. -RValue CIRGenFunction::buildCoroutineIntrinsic(const CallExpr *E, - unsigned int IID) { +RValue CIRGenFunction::emitCoroutineIntrinsic(const CallExpr *E, + unsigned int IID) { llvm_unreachable("NYI"); } -RValue CIRGenFunction::buildCoroutineFrame() { +RValue CIRGenFunction::emitCoroutineFrame() { if (CurCoro.Data && CurCoro.Data->CoroBegin) { return RValue::get(CurCoro.Data->CoroBegin); } @@ -136,10 +136,10 @@ RValue CIRGenFunction::buildCoroutineFrame() { } static mlir::LogicalResult -buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, - Stmt *Body, - const CIRGenFunction::LexicalScope *currLexScope) { - if (CGF.buildStmt(Body, /*useCurrentScope=*/true).failed()) +emitBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, + Stmt *Body, + const CIRGenFunction::LexicalScope *currLexScope) { + if (CGF.emitStmt(Body, /*useCurrentScope=*/true).failed()) return mlir::failure(); // Note that LLVM checks CanFallthrough by looking into the availability // of the insert block which is kinda brittle and unintuitive, seems to be @@ -152,14 +152,14 @@ buildBodyAndFallthrough(CIRGenFunction &CGF, const CoroutineBodyStmt &S, const bool CanFallthrough = !currLexScope->hasCoreturn(); if (CanFallthrough) if (Stmt *OnFallthrough = S.getFallthroughHandler()) - if (CGF.buildStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) + if (CGF.emitStmt(OnFallthrough, /*useCurrentScope=*/true).failed()) return mlir::failure(); return mlir::success(); } -cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, - mlir::Value nullPtr) { +cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto int32Ty = builder.getUInt32Ty(); auto &TI = CGM.getASTContext().getTargetInfo(); @@ -183,7 +183,7 @@ cir::CallOp CIRGenFunction::buildCoroIDBuiltinCall(mlir::Location loc, nullPtr, nullPtr, nullPtr}); } -cir::CallOp CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { +cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) { auto boolTy = builder.getBoolTy(); auto int32Ty = builder.getUInt32Ty(); @@ -204,8 +204,8 @@ cir::CallOp CIRGenFunction::buildCoroAllocBuiltinCall(mlir::Location loc) { } cir::CallOp -CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, - mlir::Value coroframeAddr) { +CIRGenFunction::emitCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr) { auto int32Ty = builder.getUInt32Ty(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroBegin); @@ -225,8 +225,8 @@ CIRGenFunction::buildCoroBeginBuiltinCall(mlir::Location loc, mlir::ValueRange{CurCoro.Data->CoroId.getResult(), coroframeAddr}); } -cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, - mlir::Value nullPtr) { +cir::CallOp CIRGenFunction::emitCoroEndBuiltinCall(mlir::Location loc, + mlir::Value nullPtr) { auto boolTy = builder.getBoolTy(); mlir::Operation *builtin = CGM.getGlobalValue(CGM.builtinCoroEnd); @@ -246,19 +246,19 @@ cir::CallOp CIRGenFunction::buildCoroEndBuiltinCall(mlir::Location loc, } mlir::LogicalResult -CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { +CIRGenFunction::emitCoroutineBody(const CoroutineBodyStmt &S) { auto openCurlyLoc = getLoc(S.getBeginLoc()); auto nullPtrCst = builder.getNullPtr(VoidPtrTy, openCurlyLoc); auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); Fn.setCoroutineAttr(mlir::UnitAttr::get(&getMLIRContext())); - auto coroId = buildCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); + auto coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst); createCoroData(*this, CurCoro, coroId); // Backend is allowed to elide memory allocations, to help it, emit // auto mem = coro.alloc() ? 0 : ... allocation code ...; - auto coroAlloc = buildCoroAllocBuiltinCall(openCurlyLoc); + auto coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc); // Initialize address of coroutine frame to null auto astVoidPtrTy = CGM.getASTContext().VoidPtrTy; @@ -275,13 +275,13 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { /*thenBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { builder.CIRBaseBuilderTy::createStore( - loc, buildScalarExpr(S.getAllocate()), + loc, emitScalarExpr(S.getAllocate()), storeAddr); builder.create(loc); }); CurCoro.Data->CoroBegin = - buildCoroBeginBuiltinCall( + emitCoroBeginBuiltinCall( openCurlyLoc, builder.create(openCurlyLoc, allocaTy, storeAddr)) .getResult(); @@ -310,12 +310,12 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // evolution of coroutine TS may allow promise constructor to observe // parameter copies. for (auto *PM : S.getParamMoves()) { - if (buildStmt(PM, /*useCurrentScope=*/true).failed()) + if (emitStmt(PM, /*useCurrentScope=*/true).failed()) return mlir::failure(); ParamReplacer.addCopy(cast(PM)); } - if (buildStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); // ReturnValue should be valid as long as the coroutine's return type @@ -331,22 +331,22 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { // otherwise the call to get_return_object wouldn't be in front // of initial_suspend. if (ReturnValue.isValid()) { - buildAnyExprToMem(S.getReturnValue(), ReturnValue, - S.getReturnValue()->getType().getQualifiers(), - /*IsInit*/ true); + emitAnyExprToMem(S.getReturnValue(), ReturnValue, + S.getReturnValue()->getType().getQualifiers(), + /*IsInit*/ true); } // FIXME(cir): EHStack.pushCleanup(EHCleanup); CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::init; - if (buildStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInitSuspendStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); CurCoro.Data->CurrentAwaitKind = cir::AwaitKind::user; - // FIXME(cir): wrap buildBodyAndFallthrough with try/catch bits. + // FIXME(cir): wrap emitBodyAndFallthrough with try/catch bits. if (S.getExceptionHandler()) assert(!cir::MissingFeatures::unhandledException() && "NYI"); - if (buildBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) + if (emitBodyAndFallthrough(*this, S, S.getBody(), currLexScope).failed()) return mlir::failure(); // Note that LLVM checks CanFallthrough by looking into the availability @@ -364,7 +364,7 @@ CIRGenFunction::buildCoroutineBody(const CoroutineBodyStmt &S) { { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPoint(CurCoro.Data->FinalSuspendInsPoint); - if (buildStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true) + if (emitStmt(S.getFinalSuspendStmt(), /*useCurrentScope=*/true) .failed()) return mlir::failure(); } @@ -406,11 +406,11 @@ struct LValueOrRValue { }; } // namespace static LValueOrRValue -buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, - CoroutineSuspendExpr const &S, cir::AwaitKind Kind, - AggValueSlot aggSlot, bool ignoreResult, - mlir::Block *scopeParentBlock, - mlir::Value &tmpResumeRValAddr, bool forLValue) { +emitSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, + CoroutineSuspendExpr const &S, cir::AwaitKind Kind, + AggValueSlot aggSlot, bool ignoreResult, + mlir::Block *scopeParentBlock, + mlir::Value &tmpResumeRValAddr, bool forLValue) { auto *E = S.getCommonExpr(); auto awaitBuild = mlir::success(); @@ -435,7 +435,7 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, // to LLVM dialect (or some other MLIR dialect) // A invalid suspendRet indicates "void returning await_suspend" - auto suspendRet = CGF.buildScalarExpr(S.getSuspendExpr()); + auto suspendRet = CGF.emitScalarExpr(S.getSuspendExpr()); // Veto suspension if requested by bool returning await_suspend. if (suspendRet) { @@ -461,14 +461,14 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, // FIXME(cir): the alloca for the resume expr should be placed in the // enclosing cir.scope instead. if (forLValue) - awaitRes.LV = CGF.buildLValue(S.getResumeExpr()); + awaitRes.LV = CGF.emitLValue(S.getResumeExpr()); else { awaitRes.RV = - CGF.buildAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); + CGF.emitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); if (!awaitRes.RV.isIgnored()) { // Create the alloca in the block before the scope wrapping // cir.await. - tmpResumeRValAddr = CGF.buildAlloca( + tmpResumeRValAddr = CGF.emitAlloca( "__coawait_resume_rval", awaitRes.RV.getScalarVal().getType(), loc, CharUnits::One(), builder.getBestAllocaInsertPoint(scopeParentBlock)); @@ -490,10 +490,10 @@ buildSuspendExpression(CIRGenFunction &CGF, CGCoroData &Coro, return awaitRes; } -static RValue buildSuspendExpr(CIRGenFunction &CGF, - const CoroutineSuspendExpr &E, - cir::AwaitKind kind, AggValueSlot aggSlot, - bool ignoreResult) { +static RValue emitSuspendExpr(CIRGenFunction &CGF, + const CoroutineSuspendExpr &E, + cir::AwaitKind kind, AggValueSlot aggSlot, + bool ignoreResult) { RValue rval; auto scopeLoc = CGF.getLoc(E.getSourceRange()); @@ -508,9 +508,9 @@ static RValue buildSuspendExpr(CIRGenFunction &CGF, // No need to explicitly wrap this into a scope since the AST already uses a // ExprWithCleanups, which will wrap this into a cir.scope anyways. - rval = buildSuspendExpression(CGF, *CGF.CurCoro.Data, E, kind, aggSlot, - ignoreResult, currEntryBlock, tmpResumeRValAddr, - /*forLValue*/ false) + rval = emitSuspendExpression(CGF, *CGF.CurCoro.Data, E, kind, aggSlot, + ignoreResult, currEntryBlock, tmpResumeRValAddr, + /*forLValue*/ false) .RV; if (ignoreResult || rval.isIgnored()) @@ -529,21 +529,21 @@ static RValue buildSuspendExpr(CIRGenFunction &CGF, return rval; } -RValue CIRGenFunction::buildCoawaitExpr(const CoawaitExpr &E, - AggValueSlot aggSlot, - bool ignoreResult) { - return buildSuspendExpr(*this, E, CurCoro.Data->CurrentAwaitKind, aggSlot, - ignoreResult); +RValue CIRGenFunction::emitCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + return emitSuspendExpr(*this, E, CurCoro.Data->CurrentAwaitKind, aggSlot, + ignoreResult); } -RValue CIRGenFunction::buildCoyieldExpr(const CoyieldExpr &E, - AggValueSlot aggSlot, - bool ignoreResult) { - return buildSuspendExpr(*this, E, cir::AwaitKind::yield, aggSlot, - ignoreResult); +RValue CIRGenFunction::emitCoyieldExpr(const CoyieldExpr &E, + AggValueSlot aggSlot, + bool ignoreResult) { + return emitSuspendExpr(*this, E, cir::AwaitKind::yield, aggSlot, + ignoreResult); } -mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { +mlir::LogicalResult CIRGenFunction::emitCoreturnStmt(CoreturnStmt const &S) { ++CurCoro.Data->CoreturnCount; currLexScope->setCoreturn(); @@ -553,9 +553,9 @@ mlir::LogicalResult CIRGenFunction::buildCoreturnStmt(CoreturnStmt const &S) { // with a void expression for side effects. // FIXME(cir): add scope // RunCleanupsScope cleanupScope(*this); - buildIgnoredExpr(RV); + emitIgnoredExpr(RV); } - if (buildStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getPromiseCall(), /*useCurrentScope=*/true).failed()) return mlir::failure(); // Create a new return block (if not existent) and add a branch to // it. The actual return instruction is only inserted during current diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 5ed32d800bbb..e6bcb0d6bf04 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -33,8 +33,8 @@ using namespace clang; using namespace clang::CIRGen; CIRGenFunction::AutoVarEmission -CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, - mlir::OpBuilder::InsertPoint ip) { +CIRGenFunction::emitAutoVarAlloca(const VarDecl &D, + mlir::OpBuilder::InsertPoint ip) { QualType Ty = D.getType(); assert( Ty.getAddressSpace() == LangAS::Default || @@ -51,7 +51,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, // If the type is variably-modified, emit all the VLA sizes for it. if (Ty->isVariablyModifiedType()) - buildVariablyModifiedType(Ty); + emitVariablyModifiedType(Ty); assert(!cir::MissingFeatures::generateDebugInfo()); assert(!cir::MissingFeatures::cxxABI()); @@ -92,7 +92,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, (!NRVO && !D.isEscapingByref() && CGM.isTypeConstant(Ty, /*ExcludeCtor=*/true, /*ExcludeDtor=*/false))) { - buildStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); + emitStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); // Signal this condition to later callbacks. emission.Addr = Address::invalid(); @@ -199,7 +199,7 @@ CIRGenFunction::buildAutoVarAlloca(const VarDecl &D, assert(!cir::MissingFeatures::generateDebugInfo()); if (D.hasAttr()) - buildVarAnnotations(&D, address.emitRawPointer()); + emitVarAnnotations(&D, address.emitRawPointer()); // TODO(cir): in LLVM this calls @llvm.lifetime.end. assert(!cir::MissingFeatures::shouldEmitLifetimeMarkers()); @@ -254,7 +254,7 @@ static void emitStoresForConstant(CIRGenModule &CGM, const VarDecl &D, builder.createStore(loc, builder.getConstant(loc, constant), addr); } -void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { +void CIRGenFunction::emitAutoVarInit(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); // If this was emitted as a global constant, we're done. @@ -328,7 +328,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { if (!constant || isa(Init)) { initializeWhatIsTechnicallyUninitialized(Loc); LValue lv = LValue::makeAddr(Loc, type, AlignmentSource::Decl); - buildExprAsInit(Init, &D, lv); + emitExprAsInit(Init, &D, lv); // In case lv has uses it means we indeed initialized something // out of it while trying to build the expression, mark it as such. auto addr = lv.getAddress().getPointer(); @@ -350,7 +350,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { assert(Init && "expected initializer"); auto initLoc = getLoc(Init->getSourceRange()); lv.setNonGC(true); - return buildStoreThroughLValue( + return emitStoreThroughLValue( RValue::get(builder.getConstant(initLoc, typedConstant)), lv); } @@ -358,7 +358,7 @@ void CIRGenFunction::buildAutoVarInit(const AutoVarEmission &emission) { typedConstant, /*IsAutoInit=*/false); } -void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { +void CIRGenFunction::emitAutoVarCleanups(const AutoVarEmission &emission) { assert(emission.Variable && "emission was not valid!"); // If this was emitted as a global constant, we're done. @@ -372,7 +372,7 @@ void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { // Check the type for a cleanup. if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) - buildAutoVarTypeCleanup(emission, dtorKind); + emitAutoVarTypeCleanup(emission, dtorKind); // In GC mode, honor objc_precise_lifetime. if (getContext().getLangOpts().getGC() != LangOptions::NonGC && @@ -389,13 +389,13 @@ void CIRGenFunction::buildAutoVarCleanups(const AutoVarEmission &emission) { /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack /// objects, globals depending on target. -void CIRGenFunction::buildAutoVarDecl(const VarDecl &D) { - AutoVarEmission emission = buildAutoVarAlloca(D); - buildAutoVarInit(emission); - buildAutoVarCleanups(emission); +void CIRGenFunction::emitAutoVarDecl(const VarDecl &D) { + AutoVarEmission emission = emitAutoVarAlloca(D); + emitAutoVarInit(emission); + emitAutoVarCleanups(emission); } -void CIRGenFunction::buildVarDecl(const VarDecl &D) { +void CIRGenFunction::emitVarDecl(const VarDecl &D) { if (D.hasExternalStorage()) { // Don't emit it now, allow it to be emitted lazily on its first use. return; @@ -415,16 +415,16 @@ void CIRGenFunction::buildVarDecl(const VarDecl &D) { // some variables even if we can constant-evaluate them because // we can't guarantee every translation unit will constant-evaluate them. - return buildStaticVarDecl(D, Linkage); + return emitStaticVarDecl(D, Linkage); } if (D.getType().getAddressSpace() == LangAS::opencl_local) - return CGM.getOpenCLRuntime().buildWorkGroupLocalVarDecl(*this, D); + return CGM.getOpenCLRuntime().emitWorkGroupLocalVarDecl(*this, D); assert(D.hasLocalStorage()); CIRGenFunction::VarDeclContext varDeclCtx{*this, &D}; - return buildAutoVarDecl(D); + return emitAutoVarDecl(D); } static std::string getStaticDeclName(CIRGenModule &CGM, const VarDecl &D) { @@ -618,8 +618,8 @@ cir::GlobalOp CIRGenFunction::addInitializerToStaticVarDecl( return GV; } -void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, - cir::GlobalLinkageKind Linkage) { +void CIRGenFunction::emitStaticVarDecl(const VarDecl &D, + cir::GlobalLinkageKind Linkage) { // Check to see if we already have a global variable for this // declaration. This can happen when double-emitting function // bodies, e.g. with complete and base constructors. @@ -701,34 +701,34 @@ void CIRGenFunction::buildStaticVarDecl(const VarDecl &D, } } -void CIRGenFunction::buildNullabilityCheck(LValue LHS, mlir::Value RHS, - SourceLocation Loc) { +void CIRGenFunction::emitNullabilityCheck(LValue LHS, mlir::Value RHS, + SourceLocation Loc) { if (!SanOpts.has(SanitizerKind::NullabilityAssign)) return; llvm_unreachable("NYI"); } -void CIRGenFunction::buildScalarInit(const Expr *init, mlir::Location loc, - LValue lvalue, bool capturedByInit) { +void CIRGenFunction::emitScalarInit(const Expr *init, mlir::Location loc, + LValue lvalue, bool capturedByInit) { Qualifiers::ObjCLifetime lifetime = Qualifiers::ObjCLifetime::OCL_None; assert(!cir::MissingFeatures::objCLifetime()); if (!lifetime) { SourceLocRAIIObject Loc{*this, loc}; - mlir::Value value = buildScalarExpr(init); + mlir::Value value = emitScalarExpr(init); if (capturedByInit) llvm_unreachable("NYI"); assert(!cir::MissingFeatures::emitNullabilityCheck()); - buildStoreThroughLValue(RValue::get(value), lvalue, true); + emitStoreThroughLValue(RValue::get(value), lvalue, true); return; } llvm_unreachable("NYI"); } -void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, - LValue lvalue, bool capturedByInit) { +void CIRGenFunction::emitExprAsInit(const Expr *init, const ValueDecl *D, + LValue lvalue, bool capturedByInit) { SourceLocRAIIObject Loc{*this, getLoc(init->getSourceRange())}; if (capturedByInit) llvm_unreachable("NYI"); @@ -736,22 +736,22 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, QualType type = D->getType(); if (type->isReferenceType()) { - RValue rvalue = buildReferenceBindingToExpr(init); + RValue rvalue = emitReferenceBindingToExpr(init); if (capturedByInit) llvm_unreachable("NYI"); - buildStoreThroughLValue(rvalue, lvalue); + emitStoreThroughLValue(rvalue, lvalue); return; } switch (CIRGenFunction::getEvaluationKind(type)) { case cir::TEK_Scalar: - buildScalarInit(init, getLoc(D->getSourceRange()), lvalue); + emitScalarInit(init, getLoc(D->getSourceRange()), lvalue); return; case cir::TEK_Complex: { - mlir::Value complex = buildComplexExpr(init); + mlir::Value complex = emitComplexExpr(init); if (capturedByInit) llvm_unreachable("NYI"); - buildStoreOfComplex(getLoc(init->getExprLoc()), complex, lvalue, - /*init*/ true); + emitStoreOfComplex(getLoc(init->getExprLoc()), complex, lvalue, + /*init*/ true); return; } case cir::TEK_Aggregate: @@ -764,16 +764,16 @@ void CIRGenFunction::buildExprAsInit(const Expr *init, const ValueDecl *D, else assert(false && "Only VarDecl implemented so far"); // TODO: how can we delay here if D is captured by its initializer? - buildAggExpr(init, - AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, Overlap)); + emitAggExpr(init, + AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, Overlap)); return; } llvm_unreachable("bad evaluation kind"); } -void CIRGenFunction::buildDecl(const Decl &D) { +void CIRGenFunction::emitDecl(const Decl &D) { switch (D.getKind()) { case Decl::ImplicitConceptSpecialization: case Decl::HLSLBuffer: @@ -875,11 +875,11 @@ void CIRGenFunction::buildDecl(const Decl &D) { const VarDecl &VD = cast(D); assert(VD.isLocalVarDecl() && "Should not see file-scope variables inside a function!"); - buildVarDecl(VD); + emitVarDecl(VD); if (auto *DD = dyn_cast(&VD)) for (auto *B : DD->bindings()) if (auto *HD = B->getHoldingVar()) - buildVarDecl(*HD); + emitVarDecl(*HD); return; } @@ -893,7 +893,7 @@ void CIRGenFunction::buildDecl(const Decl &D) { if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo()); if (Ty->isVariablyModifiedType()) - buildVariablyModifiedType(Ty); + emitVariablyModifiedType(Ty); return; } } @@ -1095,12 +1095,11 @@ void CIRGenFunction::pushRegularPartialArrayCleanup(mlir::Value arrayBegin, /// \param useEHCleanup - whether to push an EH cleanup to destroy /// the remaining elements in case the destruction of a single /// element throws -void CIRGenFunction::buildArrayDestroy(mlir::Value begin, mlir::Value end, - QualType elementType, - CharUnits elementAlign, - Destroyer *destroyer, - bool checkZeroLength, - bool useEHCleanup) { +void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, + CharUnits elementAlign, + Destroyer *destroyer, + bool checkZeroLength, bool useEHCleanup) { assert(!elementType->isArrayType()); if (checkZeroLength) { llvm_unreachable("NYI"); @@ -1148,7 +1147,7 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, if (!arrayType) return destroyer(*this, addr, type); - auto length = buildArrayLength(arrayType, type, addr); + auto length = emitArrayLength(arrayType, type, addr); CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement( getContext().getTypeSizeInChars(type)); @@ -1170,8 +1169,8 @@ void CIRGenFunction::emitDestroy(Address addr, QualType type, auto begin = addr.getPointer(); mlir::Value end; // Use this for future non-constant counts. - buildArrayDestroy(begin, end, type, elementAlign, destroyer, checkZeroLength, - useEHCleanupForArray); + emitArrayDestroy(begin, end, type, elementAlign, destroyer, checkZeroLength, + useEHCleanupForArray); if (constantCount.use_empty()) constantCount.erase(); } @@ -1196,7 +1195,7 @@ void CIRGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { } /// Enter a destroy cleanup for the given local variable. -void CIRGenFunction::buildAutoVarTypeCleanup( +void CIRGenFunction::emitAutoVarTypeCleanup( const CIRGenFunction::AutoVarEmission &emission, QualType::DestructionKind dtorKind) { assert(dtorKind != QualType::DK_none); diff --git a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp index 007a5a3b2932..0b9fa80536de 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDeclCXX.cpp @@ -20,7 +20,7 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -void CIRGenModule::buildCXXGlobalInitFunc() { +void CIRGenModule::emitCXXGlobalInitFunc() { while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) CXXGlobalInits.pop_back(); @@ -31,9 +31,9 @@ void CIRGenModule::buildCXXGlobalInitFunc() { assert(0 && "NYE"); } -void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, - cir::GlobalOp Addr, - bool PerformInit) { +void CIRGenModule::emitCXXGlobalVarDeclInitFunc(const VarDecl *D, + cir::GlobalOp Addr, + bool PerformInit) { // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, // __constant__ and __shared__ variables defined in namespace scope, // that are of class type, cannot have a non-empty constructor. All @@ -49,5 +49,5 @@ void CIRGenModule::buildCXXGlobalVarDeclInitFunc(const VarDecl *D, if (I != DelayedCXXInitPosition.end() && I->second == ~0U) return; - buildCXXGlobalVarDeclInit(D, Addr, PerformInit); + emitCXXGlobalVarDeclInit(D, Addr, PerformInit); } diff --git a/clang/lib/CIR/CodeGen/CIRGenException.cpp b/clang/lib/CIR/CodeGen/CIRGenException.cpp index 84a4176b36e9..b7a10fb4ef96 100644 --- a/clang/lib/CIR/CodeGen/CIRGenException.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenException.cpp @@ -191,16 +191,16 @@ const EHPersonality &EHPersonality::get(CIRGenFunction &CGF) { return get(CGF.CGM, dyn_cast_or_null(FD)); } -void CIRGenFunction::buildCXXThrowExpr(const CXXThrowExpr *E) { +void CIRGenFunction::emitCXXThrowExpr(const CXXThrowExpr *E) { if (const Expr *SubExpr = E->getSubExpr()) { QualType ThrowType = SubExpr->getType(); if (ThrowType->isObjCObjectPointerType()) { llvm_unreachable("NYI"); } else { - CGM.getCXXABI().buildThrow(*this, E); + CGM.getCXXABI().emitThrow(*this, E); } } else { - CGM.getCXXABI().buildRethrow(*this, /*isNoReturn=*/true); + CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true); } // In LLVM codegen the expression emitters expect to leave this @@ -225,10 +225,10 @@ struct FreeException final : EHScopeStack::Cleanup { } // end anonymous namespace // Emits an exception expression into the given location. This -// differs from buildAnyExprToMem only in that, if a final copy-ctor +// differs from emitAnyExprToMem only in that, if a final copy-ctor // call is required, an exception within that copy ctor causes // std::terminate to be invoked. -void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { +void CIRGenFunction::emitAnyExprToExn(const Expr *e, Address addr) { // Make sure the exception object is cleaned up if there's an // exception during initialization. pushFullExprCleanup(EHCleanup, addr.getPointer()); @@ -247,8 +247,8 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { // evaluated but before the exception is caught. But the best way // to handle that is to teach EmitAggExpr to do the final copy // differently if it can't be elided. - buildAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), - /*IsInit*/ true); + emitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), + /*IsInit*/ true); // Deactivate the cleanup block. auto op = typedAddr.getPointer().getDefiningOp(); @@ -257,9 +257,9 @@ void CIRGenFunction::buildAnyExprToExn(const Expr *e, Address addr) { DeactivateCleanupBlock(cleanup, op); } -void CIRGenFunction::buildEHResumeBlock(bool isCleanup, - mlir::Block *ehResumeBlock, - mlir::Location loc) { +void CIRGenFunction::emitEHResumeBlock(bool isCleanup, + mlir::Block *ehResumeBlock, + mlir::Location loc) { auto ip = getBuilder().saveInsertionPoint(); getBuilder().setInsertionPointToStart(ehResumeBlock); @@ -293,11 +293,11 @@ mlir::Block *CIRGenFunction::getEHResumeBlock(bool isCleanup, if (!ehResumeBlock->empty()) return ehResumeBlock; - buildEHResumeBlock(isCleanup, ehResumeBlock, tryOp.getLoc()); + emitEHResumeBlock(isCleanup, ehResumeBlock, tryOp.getLoc()); return ehResumeBlock; } -mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { +mlir::LogicalResult CIRGenFunction::emitCXXTryStmt(const CXXTryStmt &S) { auto loc = getLoc(S.getSourceRange()); mlir::OpBuilder::InsertPoint scopeIP; @@ -313,14 +313,14 @@ mlir::LogicalResult CIRGenFunction::buildCXXTryStmt(const CXXTryStmt &S) { { mlir::OpBuilder::InsertionGuard guard(getBuilder()); getBuilder().restoreInsertionPoint(scopeIP); - r = buildCXXTryStmtUnderScope(S); + r = emitCXXTryStmtUnderScope(S); getBuilder().create(loc); } return r; } mlir::LogicalResult -CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { +CIRGenFunction::emitCXXTryStmtUnderScope(const CXXTryStmt &S) { const llvm::Triple &T = getTarget().getTriple(); // If we encounter a try statement on in an OpenMP target region offloaded to // a GPU, we treat it as a basic block. @@ -354,7 +354,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { // Don't emit the code right away for catch clauses, for // now create the regions and consume the try scope result. // Note that clauses are later populated in - // CIRGenFunction::buildLandingPad. + // CIRGenFunction::emitLandingPad. [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &result) { mlir::OpBuilder::InsertionGuard guard(b); @@ -384,7 +384,7 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { { CIRGenFunction::LexicalScope tryBodyScope{ *this, loc, getBuilder().getInsertionBlock()}; - if (buildStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getTryBlock(), /*useCurrentScope=*/true).failed()) return mlir::failure(); } } @@ -402,9 +402,8 @@ CIRGenFunction::buildCXXTryStmtUnderScope(const CXXTryStmt &S) { /// Emit the structure of the dispatch block for the given catch scope. /// It is an invariant that the dispatch block already exists. -static void buildCatchDispatchBlock(CIRGenFunction &CGF, - EHCatchScope &catchScope, - cir::TryOp tryOp) { +static void emitCatchDispatchBlock(CIRGenFunction &CGF, + EHCatchScope &catchScope, cir::TryOp tryOp) { if (EHPersonality::get(CGF).isWasmPersonality()) llvm_unreachable("NYI"); if (EHPersonality::get(CGF).usesFuncletPads()) @@ -522,7 +521,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { } // Emit the structure of the EH dispatch for this catch. - buildCatchDispatchBlock(*this, CatchScope, tryOp); + emitCatchDispatchBlock(*this, CatchScope, tryOp); // Copy the handler blocks off before we pop the EH stack. Emitting // the handlers might scribble on this memory. @@ -569,7 +568,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { assert(!cir::MissingFeatures::incrementProfileCounter()); // Perform the body of the catch. - (void)buildStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); + (void)emitStmt(C->getHandlerBlock(), /*useCurrentScope=*/true); // [except.handle]p11: // The currently handled exception is rethrown if control @@ -620,7 +619,7 @@ void CIRGenFunction::exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { llvm_unreachable("Invalid EHScope Kind!"); } -mlir::Operation *CIRGenFunction::buildLandingPad(cir::TryOp tryOp) { +mlir::Operation *CIRGenFunction::emitLandingPad(cir::TryOp tryOp) { assert(EHStack.requiresLandingPad()); assert(!CGM.getLangOpts().IgnoreExceptions && "LandingPad should not be emitted when -fignore-exceptions are in " @@ -881,7 +880,7 @@ mlir::Operation *CIRGenFunction::getInvokeDestImpl(cir::TryOp tryOp) { llvm::errs() << "PersonalityFn: " << Personality.PersonalityFn << "\n"; llvm_unreachable("NYI"); } else { - LP = buildLandingPad(tryOp); + LP = emitLandingPad(tryOp); } assert(LP); diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 94ed2c54f1ae..7844c3571128 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -43,7 +43,7 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -static cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { +static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); if (FD->hasAttr()) { @@ -56,17 +56,17 @@ static cir::FuncOp buildFunctionDeclPointer(CIRGenModule &CGM, GlobalDecl GD) { return V; } -static Address buildPreserveStructAccess(CIRGenFunction &CGF, LValue base, - Address addr, const FieldDecl *field) { +static Address emitPreserveStructAccess(CIRGenFunction &CGF, LValue base, + Address addr, const FieldDecl *field) { llvm_unreachable("NYI"); } /// Get the address of a zero-sized field within a record. The resulting address /// doesn't necessarily have the right type. -static Address buildAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, - const FieldDecl *field, - llvm::StringRef fieldName, - unsigned fieldIndex) { +static Address emitAddrOfFieldStorage(CIRGenFunction &CGF, Address Base, + const FieldDecl *field, + llvm::StringRef fieldName, + unsigned fieldIndex) { if (field->isZeroSize(CGF.getContext())) llvm_unreachable("NYI"); @@ -112,11 +112,11 @@ static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { return false; } -static Address buildPointerWithAlignment(const Expr *expr, - LValueBaseInfo *baseInfo, - TBAAAccessInfo *tbaaInfo, - KnownNonNull_t isKnownNonNull, - CIRGenFunction &cgf) { +static Address emitPointerWithAlignment(const Expr *expr, + LValueBaseInfo *baseInfo, + TBAAAccessInfo *tbaaInfo, + KnownNonNull_t isKnownNonNull, + CIRGenFunction &cgf) { // We allow this with ObjC object pointers because of fragile ABIs. assert(expr->getType()->isPointerType() || expr->getType()->isObjCObjectPointerType()); @@ -125,7 +125,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // Casts: if (const CastExpr *CE = dyn_cast(expr)) { if (const auto *ECE = dyn_cast(CE)) - cgf.CGM.buildExplicitCastExprType(ECE, &cgf); + cgf.CGM.emitExplicitCastExprType(ECE, &cgf); switch (CE->getCastKind()) { // Non-converting casts (but not C's implicit conversion from void*). @@ -139,7 +139,7 @@ static Address buildPointerWithAlignment(const Expr *expr, assert(!cir::MissingFeatures::tbaa()); LValueBaseInfo innerBaseInfo; - Address addr = cgf.buildPointerWithAlignment( + Address addr = cgf.emitPointerWithAlignment( CE->getSubExpr(), &innerBaseInfo, tbaaInfo, isKnownNonNull); if (baseInfo) *baseInfo = innerBaseInfo; @@ -181,7 +181,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo. case CK_ArrayToPointerDecay: - return cgf.buildArrayToPointerDecay(CE->getSubExpr()); + return cgf.emitArrayToPointerDecay(CE->getSubExpr()); case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { @@ -189,7 +189,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // conservatively pretend that the complete object is of the base class // type. assert(!cir::MissingFeatures::tbaa()); - Address Addr = cgf.buildPointerWithAlignment(CE->getSubExpr(), baseInfo); + Address Addr = cgf.emitPointerWithAlignment(CE->getSubExpr(), baseInfo); auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); return cgf.getAddressOfBaseClass( Addr, Derived, CE->path_begin(), CE->path_end(), @@ -207,7 +207,7 @@ static Address buildPointerWithAlignment(const Expr *expr, if (const UnaryOperator *UO = dyn_cast(expr)) { // TODO(cir): maybe we should use cir.unary for pointers here instead. if (UO->getOpcode() == UO_AddrOf) { - LValue LV = cgf.buildLValue(UO->getSubExpr()); + LValue LV = cgf.emitLValue(UO->getSubExpr()); if (baseInfo) *baseInfo = LV.getBaseInfo(); assert(!cir::MissingFeatures::tbaa()); @@ -232,7 +232,7 @@ static Address buildPointerWithAlignment(const Expr *expr, // Otherwise, use the alignment of the type. return cgf.makeNaturalAddressForPointer( - cgf.buildScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(), + cgf.emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(), /*ForPointeeType=*/true, baseInfo, tbaaInfo, isKnownNonNull); } @@ -264,8 +264,8 @@ static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, .isVolatileQualified(); } -LValue CIRGenFunction::buildLValueForBitField(LValue base, - const FieldDecl *field) { +LValue CIRGenFunction::emitLValueForBitField(LValue base, + const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); const RecordDecl *rec = field->getParent(); @@ -294,12 +294,11 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, TBAAAccessInfo()); } -LValue CIRGenFunction::buildLValueForField(LValue base, - const FieldDecl *field) { +LValue CIRGenFunction::emitLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); if (field->isBitField()) - return buildLValueForBitField(base, field); + return emitLValueForBitField(base, field); // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -334,7 +333,7 @@ LValue CIRGenFunction::buildLValueForField(LValue base, unsigned fieldIndex = field->getFieldIndex(); if (CGM.LambdaFieldToName.count(field)) fieldName = CGM.LambdaFieldToName[field]; - addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + addr = emitAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); if (CGM.getCodeGenOpts().StrictVTablePointers && hasAnyVptr(FieldType, getContext())) @@ -358,10 +357,10 @@ LValue CIRGenFunction::buildLValueForField(LValue base, if (CGM.LambdaFieldToName.count(field)) fieldName = CGM.LambdaFieldToName[field]; - addr = buildAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); + addr = emitAddrOfFieldStorage(*this, addr, field, fieldName, fieldIndex); } else // Remember the original struct field index - addr = buildPreserveStructAccess(*this, base, addr, field); + addr = emitPreserveStructAccess(*this, base, addr, field); } // If this is a reference field, load the reference right now. @@ -370,8 +369,8 @@ LValue CIRGenFunction::buildLValueForField(LValue base, LValue RefLVal = makeAddrLValue(addr, FieldType, FieldBaseInfo); if (RecordCVR & Qualifiers::Volatile) RefLVal.getQuals().addVolatile(); - addr = buildLoadOfReference(RefLVal, getLoc(field->getSourceRange()), - &FieldBaseInfo); + addr = emitLoadOfReference(RefLVal, getLoc(field->getSourceRange()), + &FieldBaseInfo); // Qualifiers on the struct don't apply to the referencee. RecordCVR = 0; @@ -401,18 +400,18 @@ LValue CIRGenFunction::buildLValueForField(LValue base, return LV; } -LValue CIRGenFunction::buildLValueForFieldInitialization( +LValue CIRGenFunction::emitLValueForFieldInitialization( LValue Base, const clang::FieldDecl *Field, llvm::StringRef FieldName) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) - return buildLValueForField(Base, Field); + return emitLValueForField(Base, Field); auto &layout = CGM.getTypes().getCIRGenRecordLayout(Field->getParent()); unsigned FieldIndex = layout.getCIRFieldNo(Field); - Address V = buildAddrOfFieldStorage(*this, Base.getAddress(), Field, - FieldName, FieldIndex); + Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field, FieldName, + FieldIndex); // Make sure that the address is pointing to the right type. auto memTy = getTypes().convertTypeForMem(FieldType); @@ -428,8 +427,7 @@ LValue CIRGenFunction::buildLValueForFieldInitialization( return makeAddrLValue(V, FieldType, FieldBaseInfo); } -LValue -CIRGenFunction::buildCompoundLiteralLValue(const CompoundLiteralExpr *E) { +LValue CIRGenFunction::emitCompoundLiteralLValue(const CompoundLiteralExpr *E) { if (E->isFileScope()) { llvm_unreachable("NYI"); } @@ -443,8 +441,8 @@ CIRGenFunction::buildCompoundLiteralLValue(const CompoundLiteralExpr *E) { const Expr *InitExpr = E->getInitializer(); LValue Result = makeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); - buildAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), - /*Init*/ true); + emitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), + /*Init*/ true); // Block-scope compound literals are destroyed at the end of the enclosing // scope in C. @@ -465,7 +463,7 @@ static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) { return true; } -static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { +static CIRGenCallee emitDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { const auto *FD = cast(GD.getDecl()); if (auto builtinID = FD->getBuiltinID()) { @@ -502,7 +500,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { return CIRGenCallee::forBuiltin(builtinID, FD); } - auto CalleePtr = buildFunctionDeclPointer(CGM, GD); + auto CalleePtr = emitFunctionDeclPointer(CGM, GD); assert(!CGM.getLangOpts().CUDA && "NYI"); @@ -524,25 +522,25 @@ bool CIRGenFunction::hasBooleanRepresentation(QualType Ty) { return false; } -CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { +CIRGenCallee CIRGenFunction::emitCallee(const clang::Expr *E) { E = E->IgnoreParens(); // Look through function-to-pointer decay. if (const auto *ICE = dyn_cast(E)) { if (ICE->getCastKind() == CK_FunctionToPointerDecay || ICE->getCastKind() == CK_BuiltinFnToFnPtr) { - return buildCallee(ICE->getSubExpr()); + return emitCallee(ICE->getSubExpr()); } // Resolve direct calls. } else if (const auto *DRE = dyn_cast(E)) { const auto *FD = dyn_cast(DRE->getDecl()); assert(FD && "DeclRef referring to FunctionDecl only thing supported so far"); - return buildDirectCallee(CGM, FD); + return emitDirectCallee(CGM, FD); } else if (auto ME = dyn_cast(E)) { if (auto FD = dyn_cast(ME->getMemberDecl())) { - buildIgnoredExpr(ME->getBase()); - return buildDirectCallee(CGM, FD); + emitIgnoredExpr(ME->getBase()); + return emitDirectCallee(CGM, FD); } } @@ -553,11 +551,11 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { mlir::Value calleePtr; QualType functionType; if (auto ptrType = E->getType()->getAs()) { - calleePtr = buildScalarExpr(E); + calleePtr = emitScalarExpr(E); functionType = ptrType->getPointeeType(); } else { functionType = E->getType(); - calleePtr = buildLValue(E).getPointer(); + calleePtr = emitLValue(E).getPointer(); } assert(functionType->isFunctionType()); @@ -573,30 +571,30 @@ CIRGenCallee CIRGenFunction::buildCallee(const clang::Expr *E) { assert(false && "Nothing else supported yet!"); } -mlir::Value CIRGenFunction::buildToMemory(mlir::Value Value, QualType Ty) { +mlir::Value CIRGenFunction::emitToMemory(mlir::Value Value, QualType Ty) { // Bool has a different representation in memory than in registers. return Value; } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue) { +void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue) { // TODO: constant matrix type, no init, non temporal, TBAA - buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), false, false); + emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), false, false); } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, - bool isVolatile, QualType ty, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, bool isInit, - bool isNontemporal) { - value = buildToMemory(value, ty); +void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr, + bool isVolatile, QualType ty, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isInit, + bool isNontemporal) { + value = emitToMemory(value, ty); LValue atomicLValue = LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); if (ty->isAtomicType() || (!isInit && LValueIsSuitableForInlineAtomic(atomicLValue))) { - buildAtomicStore(RValue::get(value), atomicLValue, isInit); + emitAtomicStore(RValue::get(value), atomicLValue, isInit); return; } @@ -640,29 +638,29 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, Address addr, llvm_unreachable("NYI"); } -void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, - bool isInit) { +void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue, + bool isInit) { if (lvalue.getType()->isConstantMatrixType()) { llvm_unreachable("NYI"); } - buildStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); + emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); } /// Given an expression that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. -RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { +RValue CIRGenFunction::emitLoadOfLValue(LValue LV, SourceLocation Loc) { assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); if (LV.isBitField()) - return buildLoadOfBitfieldLValue(LV, Loc); + return emitLoadOfBitfieldLValue(LV, Loc); if (LV.isSimple()) - return RValue::get(buildLoadOfScalar(LV, Loc)); + return RValue::get(emitLoadOfScalar(LV, Loc)); if (LV.isVectorElt()) { auto load = builder.createLoad(getLoc(Loc), LV.getVectorAddress()); @@ -671,7 +669,7 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { } if (LV.isExtVectorElt()) { - return buildLoadOfExtVectorElementLValue(LV); + return emitLoadOfExtVectorElementLValue(LV); } llvm_unreachable("NYI"); @@ -686,7 +684,7 @@ int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx, // If this is a reference to a subset of the elements of a vector, create an // appropriate shufflevector. -RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { +RValue CIRGenFunction::emitLoadOfExtVectorElementLValue(LValue LV) { mlir::Location loc = LV.getExtVectorPointer().getLoc(); mlir::Value Vec = builder.createLoad(loc, LV.getExtVectorAddress()); @@ -719,8 +717,7 @@ RValue CIRGenFunction::buildLoadOfExtVectorElementLValue(LValue LV) { return RValue::get(Vec); } -RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, - SourceLocation Loc) { +RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc) { const CIRGenBitFieldInfo &info = LV.getBitFieldInfo(); // Get the output type. @@ -737,8 +734,8 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, return RValue::get(field); } -void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, - LValue Dst) { +void CIRGenFunction::emitStoreThroughExtVectorComponentLValue(RValue Src, + LValue Dst) { mlir::Location loc = Dst.getExtVectorPointer().getLoc(); // HLSL allows storing to scalar values through ExtVector component LValues. @@ -811,8 +808,8 @@ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue(RValue Src, Dst.isVolatileQualified()); } -void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, - bool isInit) { +void CIRGenFunction::emitStoreThroughLValue(RValue Src, LValue Dst, + bool isInit) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { // Read/modify/write the vector, inserting the new element @@ -825,11 +822,11 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, } if (Dst.isExtVectorElt()) - return buildStoreThroughExtVectorComponentLValue(Src, Dst); + return emitStoreThroughExtVectorComponentLValue(Src, Dst); assert(Dst.isBitField() && "NIY LValue type"); mlir::Value result; - return buildStoreThroughBitfieldLValue(Src, Dst, result); + return emitStoreThroughBitfieldLValue(Src, Dst, result); } assert(Dst.isSimple() && "only implemented simple"); @@ -847,11 +844,11 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst, } assert(Src.isScalar() && "Can't emit an agg store with this method"); - buildStoreOfScalar(Src.getScalarVal(), Dst, isInit); + emitStoreOfScalar(Src.getScalarVal(), Dst, isInit); } -void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value &Result) { +void CIRGenFunction::emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result) { // According to the AACPS: // When a volatile bit-field is written, and its container does not overlap // with any non-bit-field member, its container must be read exactly once @@ -876,8 +873,8 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, Src.getScalarVal(), info, Dst.isVolatileQualified(), useVolatile); } -static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, - const VarDecl *VD) { +static LValue emitGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, + const VarDecl *VD) { QualType T = E->getType(); // If it's thread_local, emit a call to its wrapper function instead. @@ -918,17 +915,17 @@ static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, return LV; } -static LValue buildCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, - mlir::Value ThisValue) { +static LValue emitCapturedFieldLValue(CIRGenFunction &CGF, const FieldDecl *FD, + mlir::Value ThisValue) { QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); - return CGF.buildLValueForField(LV, FD); + return CGF.emitLValueForField(LV, FD); } -static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, - GlobalDecl GD) { +static LValue emitFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, + GlobalDecl GD) { const FunctionDecl *FD = cast(GD.getDecl()); - auto funcOp = buildFunctionDeclPointer(CGF.CGM, GD); + auto funcOp = emitFunctionDeclPointer(CGF.CGM, GD); auto loc = CGF.getLoc(E->getSourceRange()); CharUnits align = CGF.getContext().getDeclAlign(FD); @@ -950,7 +947,7 @@ static LValue buildFunctionDeclLValue(CIRGenFunction &CGF, const Expr *E, AlignmentSource::Decl); } -LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { +LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); QualType T = E->getType(); @@ -969,7 +966,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (E->refersToEnclosingVariableOrCapture()) { VD = VD->getCanonicalDecl(); if (auto *FD = LambdaCaptureFields.lookup(VD)) - return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + return emitCapturedFieldLValue(*this, FD, CXXABIThisValue); assert(!cir::MissingFeatures::CGCapturedStmtInfo() && "NYI"); // TODO[OpenMP]: Find the appropiate captured variable value and return // it. @@ -995,7 +992,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *VD = dyn_cast(ND)) { // Check if this is a global variable if (VD->hasLinkage() || VD->isStaticDataMember()) - return buildGlobalVarDeclLValue(*this, E, VD); + return emitGlobalVarDeclLValue(*this, E, VD); Address addr = Address::invalid(); @@ -1034,8 +1031,8 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { // Drill into reference types. LValue LV = VD->getType()->isReferenceType() - ? buildLoadOfReferenceLValue(addr, getLoc(E->getSourceRange()), - VD->getType(), AlignmentSource::Decl) + ? emitLoadOfReferenceLValue(addr, getLoc(E->getSourceRange()), + VD->getType(), AlignmentSource::Decl) : makeAddrLValue(addr, T, AlignmentSource::Decl); // Statics are defined as globals, so they are not include in the function's @@ -1067,7 +1064,7 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { } if (const auto *FD = dyn_cast(ND)) { - LValue LV = buildFunctionDeclLValue(*this, E, FD); + LValue LV = emitFunctionDeclLValue(*this, E, FD); // Emit debuginfo for the function declaration if the target wants to. if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) @@ -1082,9 +1079,9 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { if (const auto *BD = dyn_cast(ND)) { if (E->refersToEnclosingVariableOrCapture()) { auto *FD = LambdaCaptureFields.lookup(BD); - return buildCapturedFieldLValue(*this, FD, CXXABIThisValue); + return emitCapturedFieldLValue(*this, FD, CXXABIThisValue); } - return buildLValue(BD->getBinding()); + return emitLValue(BD->getBinding()); } // We can form DeclRefExprs naming GUID declarations when reconstituting @@ -1099,31 +1096,30 @@ LValue CIRGenFunction::buildDeclRefLValue(const DeclRefExpr *E) { } LValue -CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) { +CIRGenFunction::emitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { assert((E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) && "unexpected binary operator opcode"); auto baseAddr = Address::invalid(); if (E->getOpcode() == BO_PtrMemD) - baseAddr = buildLValue(E->getLHS()).getAddress(); + baseAddr = emitLValue(E->getLHS()).getAddress(); else - baseAddr = buildPointerWithAlignment(E->getLHS()); + baseAddr = emitPointerWithAlignment(E->getLHS()); const auto *memberPtrTy = E->getRHS()->getType()->castAs(); - auto memberPtr = buildScalarExpr(E->getRHS()); + auto memberPtr = emitScalarExpr(E->getRHS()); LValueBaseInfo baseInfo; // TODO(cir): add TBAA assert(!cir::MissingFeatures::tbaa()); - auto memberAddr = buildCXXMemberDataPointerAddress(E, baseAddr, memberPtr, - memberPtrTy, &baseInfo); + auto memberAddr = emitCXXMemberDataPointerAddress(E, baseAddr, memberPtr, + memberPtrTy, &baseInfo); return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo); } -LValue -CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { +LValue CIRGenFunction::emitExtVectorElementExpr(const ExtVectorElementExpr *E) { // Emit the base vector as an l-value. LValue base; @@ -1134,7 +1130,7 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { LValueBaseInfo BaseInfo; // TODO(cir): Support TBAA assert(!cir::MissingFeatures::tbaa()); - Address Ptr = buildPointerWithAlignment(E->getBase(), &BaseInfo); + Address Ptr = emitPointerWithAlignment(E->getBase(), &BaseInfo); const auto *PT = E->getBase()->getType()->castAs(); base = makeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo); base.getQuals().removeObjCGCAttr(); @@ -1142,12 +1138,12 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. assert(E->getBase()->getType()->isVectorType()); - base = buildLValue(E->getBase()); + base = emitLValue(E->getBase()); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(E->getBase()->getType()->isVectorType() && "Result must be a vector"); - mlir::Value Vec = buildScalarExpr(E->getBase()); + mlir::Value Vec = emitScalarExpr(E->getBase()); // Store the vector to memory (because LValue wants an address). QualType BaseTy = E->getBase()->getType(); @@ -1187,15 +1183,15 @@ CIRGenFunction::buildExtVectorElementExpr(const ExtVectorElementExpr *E) { base.getBaseInfo(), base.getTBAAInfo()); } -LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { +LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { - buildIgnoredExpr(E->getLHS()); - return buildLValue(E->getRHS()); + emitIgnoredExpr(E->getLHS()); + return emitLValue(E->getRHS()); } if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) - return buildPointerToDataMemberBinaryExpr(E); + return emitPointerToDataMemberBinaryExpr(E); assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); @@ -1208,15 +1204,15 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { clang::Qualifiers::ObjCLifetime::OCL_None && "not implemented"); - RValue RV = buildAnyExpr(E->getRHS()); - LValue LV = buildLValue(E->getLHS()); + RValue RV = emitAnyExpr(E->getRHS()); + LValue LV = emitLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; if (LV.isBitField()) { mlir::Value result; - buildStoreThroughBitfieldLValue(RV, LV, result); + emitStoreThroughBitfieldLValue(RV, LV, result); } else { - buildStoreThroughLValue(RV, LV); + emitStoreThroughLValue(RV, LV); } if (getLangOpts().OpenMP) CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this, @@ -1225,7 +1221,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { } case cir::TEK_Complex: - return buildComplexAssignmentLValue(E); + return emitComplexAssignmentLValue(E); case cir::TEK_Aggregate: assert(0 && "not implemented"); } @@ -1234,11 +1230,11 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { /// Given an expression of pointer type, try to /// derive a more accurate bound on the alignment of the pointer. -Address CIRGenFunction::buildPointerWithAlignment( +Address CIRGenFunction::emitPointerWithAlignment( const Expr *expr, LValueBaseInfo *baseInfo, TBAAAccessInfo *tbaaInfo, KnownNonNull_t isKnownNonNull) { - Address addr = ::buildPointerWithAlignment(expr, baseInfo, tbaaInfo, - isKnownNonNull, *this); + Address addr = ::emitPointerWithAlignment(expr, baseInfo, tbaaInfo, + isKnownNonNull, *this); if (isKnownNonNull && !addr.isKnownNonNull()) addr.setKnownNonNull(); return addr; @@ -1256,15 +1252,15 @@ mlir::Value CIRGenFunction::evaluateExprAsBool(const Expr *E) { SourceLocation Loc = E->getExprLoc(); // TODO(cir): CGFPOptionsRAII for FP stuff. if (!E->getType()->isAnyComplexType()) - return buildScalarConversion(buildScalarExpr(E), E->getType(), BoolTy, Loc); + return emitScalarConversion(emitScalarExpr(E), E->getType(), BoolTy, Loc); llvm_unreachable("complex to scalar not implemented"); } -LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { +LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *E) { // __extension__ doesn't affect lvalue-ness. if (E->getOpcode() == UO_Extension) - return buildLValue(E->getSubExpr()); + return emitLValue(E->getSubExpr()); QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); switch (E->getOpcode()) { @@ -1276,7 +1272,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { LValueBaseInfo BaseInfo; // TODO: add TBAAInfo - Address Addr = buildPointerWithAlignment(E->getSubExpr(), &BaseInfo); + Address Addr = emitPointerWithAlignment(E->getSubExpr(), &BaseInfo); // Tag 'load' with deref attribute. if (auto loadOp = @@ -1291,7 +1287,7 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { } case UO_Real: case UO_Imag: { - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); assert(LV.isSimple() && "real/imag on non-ordinary l-value"); // __real is valid on scalars. This is a faster way of testing that. @@ -1307,8 +1303,8 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { auto Loc = getLoc(E->getExprLoc()); Address Component = (E->getOpcode() == UO_Real - ? buildAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) - : buildAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); + ? emitAddrOfRealComponent(Loc, LV.getAddress(), LV.getType()) + : emitAddrOfImagComponent(Loc, LV.getAddress(), LV.getType())); // TODO(cir): TBAA info. assert(!cir::MissingFeatures::tbaa()); LValue ElemLV = makeAddrLValue(Component, T, LV.getBaseInfo()); @@ -1319,12 +1315,12 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { case UO_PreDec: { bool isInc = E->isIncrementOp(); bool isPre = E->isPrefix(); - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); if (E->getType()->isAnyComplexType()) { - buildComplexPrePostIncDec(E, LV, isInc, true /*isPre*/); + emitComplexPrePostIncDec(E, LV, isInc, true /*isPre*/); } else { - buildScalarPrePostIncDec(E, LV, isInc, isPre); + emitScalarPrePostIncDec(E, LV, isInc, isPre); } return LV; @@ -1334,46 +1330,46 @@ LValue CIRGenFunction::buildUnaryOpLValue(const UnaryOperator *E) { /// Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. -RValue CIRGenFunction::buildAnyExpr(const Expr *E, AggValueSlot aggSlot, - bool ignoreResult) { +RValue CIRGenFunction::emitAnyExpr(const Expr *E, AggValueSlot aggSlot, + bool ignoreResult) { switch (CIRGenFunction::getEvaluationKind(E->getType())) { case cir::TEK_Scalar: - return RValue::get(buildScalarExpr(E)); + return RValue::get(emitScalarExpr(E)); case cir::TEK_Complex: - return RValue::getComplex(buildComplexExpr(E)); + return RValue::getComplex(emitComplexExpr(E)); case cir::TEK_Aggregate: { if (!ignoreResult && aggSlot.isIgnored()) aggSlot = CreateAggTemp(E->getType(), getLoc(E->getSourceRange()), getCounterAggTmpAsString()); - buildAggExpr(E, aggSlot); + emitAggExpr(E, aggSlot); return aggSlot.asRValue(); } } llvm_unreachable("bad evaluation kind"); } -RValue CIRGenFunction::buildCallExpr(const clang::CallExpr *E, - ReturnValueSlot ReturnValue) { +RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue) { assert(!E->getCallee()->getType()->isBlockPointerType() && "ObjC Blocks NYI"); if (const auto *CE = dyn_cast(E)) - return buildCXXMemberCallExpr(CE, ReturnValue); + return emitCXXMemberCallExpr(CE, ReturnValue); assert(!dyn_cast(E) && "CUDA NYI"); if (const auto *CE = dyn_cast(E)) if (const CXXMethodDecl *MD = dyn_cast_or_null(CE->getCalleeDecl())) - return buildCXXOperatorMemberCallExpr(CE, MD, ReturnValue); + return emitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); - CIRGenCallee callee = buildCallee(E->getCallee()); + CIRGenCallee callee = emitCallee(E->getCallee()); if (callee.isBuiltin()) - return buildBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, - ReturnValue); + return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), E, + ReturnValue); assert(!callee.isPsuedoDestructor() && "NYI"); - return buildCall(E->getCallee()->getType(), callee, E, ReturnValue); + return emitCall(E->getCallee()->getType(), callee, E, ReturnValue); } RValue CIRGenFunction::GetUndefRValue(QualType ty) { @@ -1398,18 +1394,18 @@ RValue CIRGenFunction::GetUndefRValue(QualType ty) { llvm_unreachable("bad evaluation kind"); } -LValue CIRGenFunction::buildStmtExprLValue(const StmtExpr *E) { +LValue CIRGenFunction::emitStmtExprLValue(const StmtExpr *E) { // Can only get l-value for message expression returning aggregate type - RValue RV = buildAnyExprToTemp(E); + RValue RV = emitAnyExprToTemp(E); return makeAddrLValue(RV.getAggregateAddress(), E->getType(), AlignmentSource::Decl); } -RValue CIRGenFunction::buildCall(clang::QualType CalleeType, - const CIRGenCallee &OrigCallee, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue, - mlir::Value Chain) { +RValue CIRGenFunction::emitCall(clang::QualType CalleeType, + const CIRGenCallee &OrigCallee, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue, + mlir::Value Chain) { // Get the actual function type. The callee type will always be a pointer to // function type or a block pointer type. assert(CalleeType->isFunctionPointerType() && @@ -1461,8 +1457,8 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, } } - buildCallArgs(Args, dyn_cast(FnType), E->arguments(), - E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); + emitCallArgs(Args, dyn_cast(FnType), E->arguments(), + E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); const CIRGenFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( Args, FnType, /*ChainCall=*/Chain.getAsOpaquePointer()); @@ -1513,8 +1509,8 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, assert(!MustTailCall && "Must tail NYI"); cir::CIRCallOpInterface callOP; - RValue Call = buildCall(FnInfo, Callee, ReturnValue, Args, &callOP, - E == MustTailCall, getLoc(E->getExprLoc()), E); + RValue Call = emitCall(FnInfo, Callee, ReturnValue, Args, &callOP, + E == MustTailCall, getLoc(E->getExprLoc()), E); assert(!getDebugInfo() && "Debug Info NYI"); @@ -1522,21 +1518,21 @@ RValue CIRGenFunction::buildCall(clang::QualType CalleeType, } /// Emit code to compute the specified expression, ignoring the result. -void CIRGenFunction::buildIgnoredExpr(const Expr *E) { +void CIRGenFunction::emitIgnoredExpr(const Expr *E) { if (E->isPRValue()) - return (void)buildAnyExpr(E, AggValueSlot::ignored(), true); + return (void)emitAnyExpr(E, AggValueSlot::ignored(), true); // Just emit it as an l-value and drop the result. - buildLValue(E); + emitLValue(E); } -Address CIRGenFunction::buildArrayToPointerDecay(const Expr *E, - LValueBaseInfo *BaseInfo) { +Address CIRGenFunction::emitArrayToPointerDecay(const Expr *E, + LValueBaseInfo *BaseInfo) { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); // Expressions of array type can't be bitfields or vector elements. - LValue LV = buildLValue(E); + LValue LV = emitLValue(E); Address Addr = LV.getAddress(); // If the array type was an incomplete type, we need to make sure @@ -1648,11 +1644,11 @@ static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, } static mlir::Value -buildArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, - mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, - ArrayRef indices, bool inbounds, - bool signedIndices, bool shouldDecay, - const llvm::Twine &name = "arrayidx") { +emitArraySubscriptPtr(CIRGenFunction &CGF, mlir::Location beginLoc, + mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, + ArrayRef indices, bool inbounds, + bool signedIndices, bool shouldDecay, + const llvm::Twine &name = "arrayidx") { assert(indices.size() == 1 && "cannot handle multiple indices yet"); auto idx = indices.back(); auto &CGM = CGF.getCIRGenModule(); @@ -1673,7 +1669,7 @@ static QualType getFixedSizeElementType(const ASTContext &ctx, return eltType; } -static Address buildArraySubscriptPtr( +static Address emitArraySubscriptPtr( CIRGenFunction &CGF, mlir::Location beginLoc, mlir::Location endLoc, Address addr, ArrayRef indices, QualType eltType, bool inbounds, bool signedIndices, mlir::Location loc, bool shouldDecay, @@ -1694,9 +1690,9 @@ static Address buildArraySubscriptPtr( auto LastIndex = getConstantIndexOrNull(indices.back()); if (!LastIndex || (!CGF.IsInPreservedAIRegion && !isPreserveAIArrayBase(CGF, Base))) { - eltPtr = buildArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), - addr.getElementType(), indices, inbounds, - signedIndices, shouldDecay, name); + eltPtr = emitArraySubscriptPtr(CGF, beginLoc, endLoc, addr.getPointer(), + addr.getElementType(), indices, inbounds, + signedIndices, shouldDecay, name); } else { // assert(!UnimplementedFeature::generateDebugInfo() && "NYI"); // assert(indices.size() == 1 && "cannot handle multiple indices yet"); @@ -1711,18 +1707,18 @@ static Address buildArraySubscriptPtr( return Address(eltPtr, CGF.getTypes().convertTypeForMem(eltType), eltAlign); } -LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, - bool Accessed) { +LValue CIRGenFunction::emitArraySubscriptExpr(const ArraySubscriptExpr *E, + bool Accessed) { // The index must always be an integer, which is not an aggregate. Emit it // in lexical order (this complexity is, sadly, required by C++17). mlir::Value IdxPre = - (E->getLHS() == E->getIdx()) ? buildScalarExpr(E->getIdx()) : nullptr; + (E->getLHS() == E->getIdx()) ? emitScalarExpr(E->getIdx()) : nullptr; bool SignedIndices = false; auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> mlir::Value { mlir::Value Idx = IdxPre; if (E->getLHS() != E->getIdx()) { assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); - Idx = buildScalarExpr(E->getIdx()); + Idx = emitScalarExpr(E->getIdx()); } QualType IdxTy = E->getIdx()->getType(); @@ -1745,7 +1741,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { - LValue lhs = buildLValue(E->getBase()); + LValue lhs = emitLValue(E->getBase()); auto index = EmitIdxAfterBase(/*Promote=*/false); return LValue::MakeVectorElt(lhs.getAddress(), index, E->getBase()->getType(), lhs.getBaseInfo(), @@ -1767,7 +1763,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // The base must be a pointer, which is not an aggregate. Emit // it. It needs to be emitted first in case it's what captures // the VLA bounds. - Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); // The element count here is the total number of non-VLA elements. @@ -1777,7 +1773,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, Idx = builder.createMul(Idx, numElements); QualType ptrType = E->getBase()->getType(); - Addr = buildArraySubscriptPtr( + Addr = emitArraySubscriptPtr( *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, @@ -1796,14 +1792,14 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // For simple multidimensional array indexing, set the 'accessed' flag // for better bounds-checking of the base expression. if (const auto *ASE = dyn_cast(Array)) - ArrayLV = buildArraySubscriptExpr(ASE, /*Accessed=*/true); + ArrayLV = emitArraySubscriptExpr(ASE, /*Accessed=*/true); else - ArrayLV = buildLValue(Array); + ArrayLV = emitLValue(Array); auto Idx = EmitIdxAfterBase(/*Promote=*/true); // Propagate the alignment from the array itself to the result. QualType arrayType = Array->getType(); - Addr = buildArraySubscriptPtr( + Addr = emitArraySubscriptPtr( *this, CGM.getLoc(Array->getBeginLoc()), CGM.getLoc(Array->getEndLoc()), ArrayLV.getAddress(), {Idx}, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, @@ -1816,10 +1812,10 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // The base must be a pointer; emit it with an estimate of its alignment. // TODO(cir): EltTBAAInfo assert(!cir::MissingFeatures::tbaa() && "TBAA is NYI"); - Addr = buildPointerWithAlignment(E->getBase(), &EltBaseInfo); + Addr = emitPointerWithAlignment(E->getBase(), &EltBaseInfo); auto Idx = EmitIdxAfterBase(/*Promote*/ true); QualType ptrType = E->getBase()->getType(); - Addr = buildArraySubscriptPtr( + Addr = emitArraySubscriptPtr( *this, CGM.getLoc(E->getBeginLoc()), CGM.getLoc(E->getEndLoc()), Addr, Idx, E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, CGM.getLoc(E->getExprLoc()), /*shouldDecay=*/false, @@ -1835,7 +1831,7 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, return LV; } -LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { +LValue CIRGenFunction::emitStringLiteralLValue(const StringLiteral *E) { auto sym = CGM.getAddrOfConstantStringFromLiteral(E).getSymbol(); auto cstGlobal = mlir::SymbolTable::lookupSymbolIn(CGM.getModule(), sym); @@ -1861,7 +1857,7 @@ LValue CIRGenFunction::buildStringLiteralLValue(const StringLiteral *E) { /// we need the address of an aggregate in order to access one of its members. /// This can happen for all the reasons that casts are permitted with aggregate /// result, including noop aggregate casts, and cast from scalar to union. -LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { +LValue CIRGenFunction::emitCastLValue(const CastExpr *E) { switch (E->getCastKind()) { case CK_HLSLArrayRValue: case CK_HLSLVectorTruncation: @@ -1925,10 +1921,10 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { assert(0 && "NYI"); case CK_Dynamic: { - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); Address V = LV.getAddress(); const auto *DCE = cast(E); - return MakeNaturalAlignAddrLValue(buildDynamicCast(V, DCE), E->getType()); + return MakeNaturalAlignAddrLValue(emitDynamicCast(V, DCE), E->getType()); } case CK_ConstructorConversion: @@ -1936,12 +1932,12 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_LValueToRValue: - return buildLValue(E->getSubExpr()); + return emitLValue(E->getSubExpr()); case CK_NoOp: { // CK_NoOp can model a qualification conversion, which can remove an array // bound and change the IR type. - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); // Propagate the volatile qualifier to LValue, if exists in E. if (E->changesVolatileQualification()) llvm_unreachable("NYI"); @@ -1963,7 +1959,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { E->getSubExpr()->getType()->castAs(); auto *DerivedClassDecl = cast(DerivedClassTy->getDecl()); - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); Address This = LV.getAddress(); // Perform the derived-to-base conversion @@ -1986,7 +1982,7 @@ LValue CIRGenFunction::buildCastLValue(const CastExpr *E) { assert(0 && "NYI"); } case CK_AddressSpaceConversion: { - LValue LV = buildLValue(E->getSubExpr()); + LValue LV = emitLValue(E->getSubExpr()); QualType DestTy = getContext().getPointerType(E->getType()); auto SrcAS = builder.getAddrSpaceAttr(E->getSubExpr()->getType().getAddressSpace()); @@ -2021,12 +2017,12 @@ static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &CGF, return nullptr; } -LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { +LValue CIRGenFunction::emitCheckedLValue(const Expr *E, TypeCheckKind TCK) { LValue LV; if (SanOpts.has(SanitizerKind::ArrayBounds) && isa(E)) assert(0 && "not implemented"); else - LV = buildLValue(E); + LV = emitLValue(E); if (!isa(E) && !LV.isBitField() && LV.isSimple()) { SanitizerSet SkippedChecks; if (const auto *ME = dyn_cast(E)) { @@ -2036,8 +2032,8 @@ LValue CIRGenFunction::buildCheckedLValue(const Expr *E, TypeCheckKind TCK) { if (IsBaseCXXThis || isa(ME->getBase())) SkippedChecks.set(SanitizerKind::Null, true); } - buildTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), E->getType(), - LV.getAlignment(), SkippedChecks); + emitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), E->getType(), + LV.getAlignment(), SkippedChecks); } return LV; } @@ -2066,10 +2062,10 @@ bool CIRGenFunction::isWrappedCXXThis(const Expr *Obj) { return true; } -LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { +LValue CIRGenFunction::emitMemberExpr(const MemberExpr *E) { if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { - buildIgnoredExpr(E->getBase()); - return buildDeclRefLValue(DRE); + emitIgnoredExpr(E->getBase()); + return emitDeclRefLValue(DRE); } Expr *BaseExpr = E->getBase(); @@ -2077,7 +2073,7 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { LValue BaseLV; if (E->isArrow()) { LValueBaseInfo BaseInfo; - Address Addr = buildPointerWithAlignment(BaseExpr, &BaseInfo); + Address Addr = emitPointerWithAlignment(BaseExpr, &BaseInfo); QualType PtrTy = BaseExpr->getType()->getPointeeType(); SanitizerSet SkippedChecks; bool IsBaseCXXThis = isWrappedCXXThis(BaseExpr); @@ -2085,15 +2081,15 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { SkippedChecks.set(SanitizerKind::Alignment, true); if (IsBaseCXXThis || isa(BaseExpr)) SkippedChecks.set(SanitizerKind::Null, true); - buildTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, - /*Alignment=*/CharUnits::Zero(), SkippedChecks); + emitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, + /*Alignment=*/CharUnits::Zero(), SkippedChecks); BaseLV = makeAddrLValue(Addr, PtrTy, BaseInfo); } else - BaseLV = buildCheckedLValue(BaseExpr, TCK_MemberAccess); + BaseLV = emitCheckedLValue(BaseExpr, TCK_MemberAccess); NamedDecl *ND = E->getMemberDecl(); if (auto *Field = dyn_cast(ND)) { - LValue LV = buildLValueForField(BaseLV, Field); + LValue LV = emitLValueForField(BaseLV, Field); assert(!cir::MissingFeatures::setObjCGCLValueClass() && "NYI"); if (getLangOpts().OpenMP) { // If the member was explicitly marked as nontemporal, mark it as @@ -2110,8 +2106,8 @@ LValue CIRGenFunction::buildMemberExpr(const MemberExpr *E) { llvm_unreachable("Unhandled member declaration!"); } -LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { - RValue RV = buildCallExpr(E); +LValue CIRGenFunction::emitCallExprLValue(const CallExpr *E) { + RValue RV = emitCallExpr(E); if (!RV.isScalar()) return makeAddrLValue(RV.getAggregateAddress(), E->getType(), @@ -2125,8 +2121,8 @@ LValue CIRGenFunction::buildCallExprLValue(const CallExpr *E) { } /// Evaluate an expression into a given memory location. -void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, - Qualifiers Quals, bool IsInit) { +void CIRGenFunction::emitAnyExprToMem(const Expr *E, Address Location, + Qualifiers Quals, bool IsInit) { // FIXME: This function should take an LValue as an argument. switch (getEvaluationKind(E->getType())) { case cir::TEK_Complex: @@ -2134,18 +2130,18 @@ void CIRGenFunction::buildAnyExprToMem(const Expr *E, Address Location, return; case cir::TEK_Aggregate: { - buildAggExpr(E, AggValueSlot::forAddr(Location, Quals, - AggValueSlot::IsDestructed_t(IsInit), - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsAliased_t(!IsInit), - AggValueSlot::MayOverlap)); + emitAggExpr(E, AggValueSlot::forAddr(Location, Quals, + AggValueSlot::IsDestructed_t(IsInit), + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsAliased_t(!IsInit), + AggValueSlot::MayOverlap)); return; } case cir::TEK_Scalar: { - RValue RV = RValue::get(buildScalarExpr(E)); + RValue RV = RValue::get(emitScalarExpr(E)); LValue LV = makeAddrLValue(Location, E->getType()); - buildStoreThroughLValue(RV, LV); + emitStoreThroughLValue(RV, LV); return; } } @@ -2262,7 +2258,7 @@ static void pushTemporaryCleanup(CIRGenFunction &CGF, } } -LValue CIRGenFunction::buildMaterializeTemporaryExpr( +LValue CIRGenFunction::emitMaterializeTemporaryExpr( const MaterializeTemporaryExpr *M) { const Expr *E = M->getSubExpr(); @@ -2270,7 +2266,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( !cast(M->getExtendingDecl())->isARCPseudoStrong()) && "Reference should never be pseudo-strong!"); - // FIXME: ideally this would use buildAnyExprToMem, however, we cannot do so + // FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so // as that will cause the lifetime adjustment to be lost for ARC auto ownership = M->getType().getObjCLifetime(); if (ownership != Qualifiers::OCL_None && @@ -2283,7 +2279,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); for (const auto &Ignored : CommaLHSs) - buildIgnoredExpr(Ignored); + emitIgnoredExpr(Ignored); if (const auto *opaque = dyn_cast(E)) assert(0 && "NYI"); @@ -2312,7 +2308,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( break; } - buildAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); + emitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true); } pushTemporaryCleanup(*this, M, E, Object); @@ -2327,7 +2323,7 @@ LValue CIRGenFunction::buildMaterializeTemporaryExpr( return makeAddrLValue(Object, M->getType(), AlignmentSource::Decl); } -LValue CIRGenFunction::buildOpaqueValueLValue(const OpaqueValueExpr *e) { +LValue CIRGenFunction::emitOpaqueValueLValue(const OpaqueValueExpr *e) { assert(OpaqueValueMappingData::shouldBindAsLValue(e)); return getOrCreateOpaqueLValueMapping(e); } @@ -2343,7 +2339,7 @@ CIRGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { return it->second; assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); - return buildLValue(e->getSourceExpr()); + return emitLValue(e->getSourceExpr()); } RValue @@ -2357,7 +2353,7 @@ CIRGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { return it->second; assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); - return buildAnyExpr(e->getSourceExpr()); + return emitAnyExpr(e->getSourceExpr()); } namespace { @@ -2382,7 +2378,7 @@ std::optional HandleConditionalOperatorLValueSimpleCase( if (auto *ThrowExpr = dyn_cast(Live->IgnoreParens())) { llvm_unreachable("NYI"); } - return CGF.buildLValue(Live); + return CGF.emitLValue(Live); } } return std::nullopt; @@ -2392,21 +2388,21 @@ std::optional HandleConditionalOperatorLValueSimpleCase( /// Emit the operand of a glvalue conditional operator. This is either a glvalue /// or a (possibly-parenthesized) throw-expression. If this is a throw, no /// LValue is returned and the current block has been terminated. -static std::optional buildLValueOrThrowExpression(CIRGenFunction &CGF, - const Expr *Operand) { +static std::optional emitLValueOrThrowExpression(CIRGenFunction &CGF, + const Expr *Operand) { if (auto *ThrowExpr = dyn_cast(Operand->IgnoreParens())) { llvm_unreachable("NYI"); } - return CGF.buildLValue(Operand); + return CGF.emitLValue(Operand); } // Create and generate the 3 blocks for a conditional operator. // Leaves the 'current block' in the continuation basic block. template CIRGenFunction::ConditionalInfo -CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, - const FuncTy &BranchGenFunc) { +CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc) { ConditionalInfo Info; auto &CGF = *this; ConditionalEvaluation eval(CGF); @@ -2415,7 +2411,7 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, auto *trueExpr = E->getTrueExpr(); auto *falseExpr = E->getFalseExpr(); - mlir::Value condV = CGF.buildOpOnBoolExpr(loc, E->getCond()); + mlir::Value condV = CGF.emitOpOnBoolExpr(loc, E->getCond()); SmallVector insertPoints{}; mlir::Type yieldTy{}; @@ -2494,13 +2490,13 @@ CIRGenFunction::buildConditionalBlocks(const AbstractConditionalOperator *E, return Info; } -LValue CIRGenFunction::buildConditionalOperatorLValue( +LValue CIRGenFunction::emitConditionalOperatorLValue( const AbstractConditionalOperator *expr) { if (!expr->isGLValue()) { // ?: here should be an aggregate. assert(hasAggregateEvaluationKind(expr->getType()) && "Unexpected conditional operator!"); - return buildAggExprToLValue(expr); + return emitAggExprToLValue(expr); } OpaqueValueMapping binding(*this, expr); @@ -2509,8 +2505,8 @@ LValue CIRGenFunction::buildConditionalOperatorLValue( return *Res; ConditionalInfo Info = - buildConditionalBlocks(expr, [](CIRGenFunction &CGF, const Expr *E) { - return buildLValueOrThrowExpression(CGF, E); + emitConditionalBlocks(expr, [](CIRGenFunction &CGF, const Expr *E) { + return emitLValueOrThrowExpression(CGF, E); }); if ((Info.LHS && !Info.LHS->isSimple()) || @@ -2535,7 +2531,7 @@ LValue CIRGenFunction::buildConditionalOperatorLValue( /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. -LValue CIRGenFunction::buildLValue(const Expr *E) { +LValue CIRGenFunction::emitLValue(const Expr *E) { // FIXME: ApplyDebugLocation DL(*this, E); switch (E->getStmtClass()) { default: { @@ -2544,27 +2540,26 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { assert(0 && "not implemented"); } case Expr::ConditionalOperatorClass: - return buildConditionalOperatorLValue(cast(E)); + return emitConditionalOperatorLValue(cast(E)); case Expr::ArraySubscriptExprClass: - return buildArraySubscriptExpr(cast(E)); + return emitArraySubscriptExpr(cast(E)); case Expr::ExtVectorElementExprClass: - return buildExtVectorElementExpr(cast(E)); + return emitExtVectorElementExpr(cast(E)); case Expr::BinaryOperatorClass: - return buildBinaryOperatorLValue(cast(E)); + return emitBinaryOperatorLValue(cast(E)); case Expr::CompoundAssignOperatorClass: { QualType Ty = E->getType(); if (const AtomicType *AT = Ty->getAs()) assert(0 && "not yet implemented"); if (!Ty->isAnyComplexType()) - return buildCompoundAssignmentLValue(cast(E)); - return buildComplexCompoundAssignmentLValue( - cast(E)); + return emitCompoundAssignmentLValue(cast(E)); + return emitComplexCompoundAssignmentLValue(cast(E)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: case Expr::UserDefinedLiteralClass: - return buildCallExprLValue(cast(E)); + return emitCallExprLValue(cast(E)); case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast(E); LValue LV; @@ -2576,7 +2571,7 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { CIRGenFunction::LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - LV = buildLValue(cleanups->getSubExpr()); + LV = emitLValue(cleanups->getSubExpr()); if (LV.isSimple()) { // Defend against branches out of gnu statement expressions // surrounded by cleanups. @@ -2593,19 +2588,19 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { return LV; } case Expr::ParenExprClass: - return buildLValue(cast(E)->getSubExpr()); + return emitLValue(cast(E)->getSubExpr()); case Expr::DeclRefExprClass: - return buildDeclRefLValue(cast(E)); + return emitDeclRefLValue(cast(E)); case Expr::UnaryOperatorClass: - return buildUnaryOpLValue(cast(E)); + return emitUnaryOpLValue(cast(E)); case Expr::StringLiteralClass: - return buildStringLiteralLValue(cast(E)); + return emitStringLiteralLValue(cast(E)); case Expr::MemberExprClass: - return buildMemberExpr(cast(E)); + return emitMemberExpr(cast(E)); case Expr::CompoundLiteralExprClass: - return buildCompoundLiteralLValue(cast(E)); + return emitCompoundLiteralLValue(cast(E)); case Expr::PredefinedExprClass: - return buildPredefinedLValue(cast(E)); + return emitPredefinedLValue(cast(E)); case Expr::CXXFunctionalCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: @@ -2613,22 +2608,22 @@ LValue CIRGenFunction::buildLValue(const Expr *E) { case Expr::ObjCBridgedCastExprClass: emitError(getLoc(E->getExprLoc()), "l-value not implemented for '") << E->getStmtClassName() << "'"; - assert(0 && "Use buildCastLValue below, remove me when adding testcase"); + assert(0 && "Use emitCastLValue below, remove me when adding testcase"); case Expr::CStyleCastExprClass: case Expr::CXXStaticCastExprClass: case Expr::CXXDynamicCastExprClass: case Expr::ImplicitCastExprClass: - return buildCastLValue(cast(E)); + return emitCastLValue(cast(E)); case Expr::OpaqueValueExprClass: - return buildOpaqueValueLValue(cast(E)); + return emitOpaqueValueLValue(cast(E)); case Expr::MaterializeTemporaryExprClass: - return buildMaterializeTemporaryExpr(cast(E)); + return emitMaterializeTemporaryExpr(cast(E)); case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); case Expr::StmtExprClass: - return buildStmtExprLValue(cast(E)); + return emitStmtExprLValue(cast(E)); } return LValue::makeAddr(Address::invalid(), E->getType()); @@ -2644,7 +2639,7 @@ RValue CIRGenFunction::convertTempToRValue(Address addr, clang::QualType type, case cir::TEK_Aggregate: return lvalue.asAggregateRValue(); case cir::TEK_Scalar: - return RValue::get(buildLoadOfScalar(lvalue, loc)); + return RValue::get(emitLoadOfScalar(lvalue, loc)); } llvm_unreachable("NYI"); } @@ -2661,9 +2656,9 @@ bool CIRGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. -mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, - const Stmt *thenS, - const Stmt *elseS) { +mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond, + const Stmt *thenS, + const Stmt *elseS) { // Attempt to be more accurate as possible with IfOp location, generate // one fused location that has either 2 or 4 total locations, depending // on else's availability. @@ -2678,18 +2673,18 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, elseLoc = getStmtLoc(*elseS); mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success(); - buildIfOnBoolExpr( + emitIfOnBoolExpr( cond, /*thenBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()}; - resThen = buildStmt(thenS, /*useCurrentScope=*/true); + resThen = emitStmt(thenS, /*useCurrentScope=*/true); }, thenLoc, /*elseBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { assert(elseLoc && "Invalid location for elseS."); LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()}; - resElse = buildStmt(elseS, /*useCurrentScope=*/true); + resElse = emitStmt(elseS, /*useCurrentScope=*/true); }, elseLoc); @@ -2699,7 +2694,7 @@ mlir::LogicalResult CIRGenFunction::buildIfOnBoolExpr(const Expr *cond, /// Emit an `if` on a boolean condition, filling `then` and `else` into /// appropriated regions. -cir::IfOp CIRGenFunction::buildIfOnBoolExpr( +cir::IfOp CIRGenFunction::emitIfOnBoolExpr( const clang::Expr *cond, llvm::function_ref thenBuilder, mlir::Location thenLoc, @@ -2712,7 +2707,7 @@ cir::IfOp CIRGenFunction::buildIfOnBoolExpr( auto loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs); // Emit the code with the fully general case. - mlir::Value condV = buildOpOnBoolExpr(loc, cond); + mlir::Value condV = emitOpOnBoolExpr(loc, cond); return builder.create(loc, condV, elseLoc.has_value(), /*thenBuilder=*/thenBuilder, /*elseBuilder=*/elseBuilder); @@ -2720,8 +2715,8 @@ cir::IfOp CIRGenFunction::buildIfOnBoolExpr( /// TODO(cir): PGO data /// TODO(cir): see EmitBranchOnBoolExpr for extra ideas). -mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, - const Expr *cond) { +mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc, + const Expr *cond) { // TODO(CIR): scoped ApplyDebugLocation DL(*this, Cond); // TODO(CIR): __builtin_unpredictable and profile counts? cond = cond->IgnoreParens(); @@ -2741,25 +2736,25 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, if (const ConditionalOperator *CondOp = dyn_cast(cond)) { auto *trueExpr = CondOp->getTrueExpr(); auto *falseExpr = CondOp->getFalseExpr(); - mlir::Value condV = buildOpOnBoolExpr(loc, CondOp->getCond()); + mlir::Value condV = emitOpOnBoolExpr(loc, CondOp->getCond()); auto ternaryOpRes = builder .create( loc, condV, /*thenBuilder=*/ [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = buildScalarExpr(trueExpr); + auto lhs = emitScalarExpr(trueExpr); b.create(loc, lhs); }, /*elseBuilder=*/ [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = buildScalarExpr(falseExpr); + auto rhs = emitScalarExpr(falseExpr); b.create(loc, rhs); }) .getResult(); - return buildScalarConversion(ternaryOpRes, CondOp->getType(), - getContext().BoolTy, CondOp->getExprLoc()); + return emitScalarConversion(ternaryOpRes, CondOp->getType(), + getContext().BoolTy, CondOp->getExprLoc()); } if (const CXXThrowExpr *Throw = dyn_cast(cond)) { @@ -2778,10 +2773,10 @@ mlir::Value CIRGenFunction::buildOpOnBoolExpr(mlir::Location loc, return evaluateExprAsBool(cond); } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, - mlir::Location loc, CharUnits alignment, - bool insertIntoFnEntryBlock, - mlir::Value arraySize) { +mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { mlir::Block *entryBlock = insertIntoFnEntryBlock ? getCurFunctionEntryBlock() : currLexScope->getEntryBlock(); @@ -2796,14 +2791,14 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, entryBlock = &scopeOp.getRegion().front(); } - return buildAlloca(name, ty, loc, alignment, - builder.getBestAllocaInsertPoint(entryBlock), arraySize); + return emitAlloca(name, ty, loc, alignment, + builder.getBestAllocaInsertPoint(entryBlock), arraySize); } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, - mlir::Location loc, CharUnits alignment, - mlir::OpBuilder::InsertPoint ip, - mlir::Value arraySize) { +mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty, + mlir::Location loc, CharUnits alignment, + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize) { // CIR uses its own alloca AS rather than follow the target data layout like // original CodeGen. The data layout awareness should be done in the lowering // pass instead. @@ -2824,29 +2819,29 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, mlir::Type ty, return addr; } -mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, - mlir::Location loc, CharUnits alignment, - bool insertIntoFnEntryBlock, - mlir::Value arraySize) { - return buildAlloca(name, getCIRType(ty), loc, alignment, - insertIntoFnEntryBlock, arraySize); +mlir::Value CIRGenFunction::emitAlloca(StringRef name, QualType ty, + mlir::Location loc, CharUnits alignment, + bool insertIntoFnEntryBlock, + mlir::Value arraySize) { + return emitAlloca(name, getCIRType(ty), loc, alignment, + insertIntoFnEntryBlock, arraySize); } -mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - SourceLocation loc) { - return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), getLoc(loc), lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), lvalue.isNontemporal()); +mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue, + SourceLocation loc) { + return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(loc), lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), lvalue.isNontemporal()); } -mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, - mlir::Location loc) { - return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), - lvalue.getType(), loc, lvalue.getBaseInfo(), - lvalue.getTBAAInfo(), lvalue.isNontemporal()); +mlir::Value CIRGenFunction::emitLoadOfScalar(LValue lvalue, + mlir::Location loc) { + return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), loc, lvalue.getBaseInfo(), + lvalue.getTBAAInfo(), lvalue.isNontemporal()); } -mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { +mlir::Value CIRGenFunction::emitFromMemory(mlir::Value Value, QualType Ty) { if (!Ty->isBooleanType() && hasBooleanRepresentation(Ty)) { llvm_unreachable("NIY"); } @@ -2854,20 +2849,20 @@ mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { return Value; } -mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, - QualType ty, SourceLocation loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal) { - return buildLoadOfScalar(addr, isVolatile, ty, getLoc(loc), baseInfo, - tbaaInfo, isNontemporal); +mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, + QualType ty, SourceLocation loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, + bool isNontemporal) { + return emitLoadOfScalar(addr, isVolatile, ty, getLoc(loc), baseInfo, tbaaInfo, + isNontemporal); } -mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, - QualType ty, mlir::Location loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal) { +mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile, + QualType ty, mlir::Location loc, + LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, + bool isNontemporal) { // Atomic operations have to be done on integral types LValue atomicLValue = LValue::makeAddr(addr, ty, getContext(), baseInfo, tbaaInfo); @@ -2891,7 +2886,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, // Shuffle vector to get vec3. V = builder.createVecShuffle(loc, V, ArrayRef{0, 1, 2}); - return buildFromMemory(V, ty); + return emitFromMemory(V, ty); } } @@ -2912,18 +2907,18 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address addr, bool isVolatile, assert(!cir::MissingFeatures::tbaa() && "NYI"); assert(!cir::MissingFeatures::emitScalarRangeCheck() && "NYI"); - return buildFromMemory(Load, ty); + return emitFromMemory(Load, ty); } // Note: this function also emit constructor calls to support a MSVC extensions // allowing explicit constructor function call. -RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, - ReturnValueSlot ReturnValue) { +RValue CIRGenFunction::emitCXXMemberCallExpr(const CXXMemberCallExpr *CE, + ReturnValueSlot ReturnValue) { const Expr *callee = CE->getCallee()->IgnoreParens(); if (isa(callee)) - return buildCXXMemberPointerCallExpr(CE, ReturnValue); + return emitCXXMemberPointerCallExpr(CE, ReturnValue); const auto *ME = cast(callee); const auto *MD = cast(ME->getMemberDecl()); @@ -2937,13 +2932,13 @@ RValue CIRGenFunction::buildCXXMemberCallExpr(const CXXMemberCallExpr *CE, bool IsArrow = ME->isArrow(); const Expr *Base = ME->getBase(); - return buildCXXMemberOrOperatorMemberCallExpr( + return emitCXXMemberOrOperatorMemberCallExpr( CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); } -RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { +RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *E) { // Emit the expression as an lvalue. - LValue LV = buildLValue(E); + LValue LV = emitLValue(E); assert(LV.isSimple()); auto Value = LV.getPointer(); @@ -2954,9 +2949,9 @@ RValue CIRGenFunction::buildReferenceBindingToExpr(const Expr *E) { return RValue::get(Value); } -Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, - LValueBaseInfo *pointeeBaseInfo, - TBAAAccessInfo *pointeeTBAAInfo) { +Address CIRGenFunction::emitLoadOfReference(LValue refLVal, mlir::Location loc, + LValueBaseInfo *pointeeBaseInfo, + TBAAAccessInfo *pointeeTBAAInfo) { assert(!refLVal.isVolatile() && "NYI"); cir::LoadOp load = builder.create(loc, refLVal.getAddress().getElementType(), @@ -2972,15 +2967,15 @@ Address CIRGenFunction::buildLoadOfReference(LValue refLVal, mlir::Location loc, return Address(load, getTypes().convertTypeForMem(pointeeType), align); } -LValue CIRGenFunction::buildLoadOfReferenceLValue(LValue RefLVal, - mlir::Location Loc) { +LValue CIRGenFunction::emitLoadOfReferenceLValue(LValue RefLVal, + mlir::Location Loc) { LValueBaseInfo PointeeBaseInfo; - Address PointeeAddr = buildLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); + Address PointeeAddr = emitLoadOfReference(RefLVal, Loc, &PointeeBaseInfo); return makeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), PointeeBaseInfo); } -void CIRGenFunction::buildUnreachable(SourceLocation Loc) { +void CIRGenFunction::emitUnreachable(SourceLocation Loc) { if (SanOpts.has(SanitizerKind::Unreachable)) llvm_unreachable("NYI"); builder.create(getLoc(Loc)); @@ -3054,8 +3049,8 @@ cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, const Twine &Name, mlir::Value ArraySize, bool insertIntoFnEntryBlock) { - return cast(buildAlloca(Name.str(), Ty, Loc, CharUnits(), - insertIntoFnEntryBlock, ArraySize) + return cast(emitAlloca(Name.str(), Ty, Loc, CharUnits(), + insertIntoFnEntryBlock, ArraySize) .getDefiningOp()); } @@ -3067,7 +3062,7 @@ cir::AllocaOp CIRGenFunction::CreateTempAlloca(mlir::Type Ty, mlir::Value ArraySize) { assert(ip.isSet() && "Insertion point is not set"); return cast( - buildAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) + emitAlloca(Name.str(), Ty, Loc, CharUnits(), ip, ArraySize) .getDefiningOp()); } @@ -3210,10 +3205,10 @@ CIRGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { // This should probably fire even for if (isa(value)) { if (!getContext().DeclMustBeEmitted(cast(value))) - buildDeclRefExprDbgValue(refExpr, result.Val); + emitDeclRefExprDbgValue(refExpr, result.Val); } else { assert(isa(value)); - buildDeclRefExprDbgValue(refExpr, result.Val); + emitDeclRefExprDbgValue(refExpr, result.Val); } // If we emitted a reference constant, we need to dereference that. @@ -3228,17 +3223,17 @@ CIRGenFunction::tryEmitAsConstant(const MemberExpr *ME) { llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::buildScalarConstant( +mlir::Value CIRGenFunction::emitScalarConstant( const CIRGenFunction::ConstantEmission &Constant, Expr *E) { assert(Constant && "not a constant"); if (Constant.isReference()) - return buildLoadOfLValue(Constant.getReferenceLValue(*this, E), - E->getExprLoc()) + return emitLoadOfLValue(Constant.getReferenceLValue(*this, E), + E->getExprLoc()) .getScalarVal(); return builder.getConstant(getLoc(E->getSourceRange()), Constant.getValue()); } -LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { +LValue CIRGenFunction::emitPredefinedLValue(const PredefinedExpr *E) { const auto *SL = E->getFunctionName(); assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); auto Fn = dyn_cast(CurFn); @@ -3253,5 +3248,5 @@ LValue CIRGenFunction::buildPredefinedLValue(const PredefinedExpr *E) { llvm_unreachable("NYI"); } - return buildStringLiteralLValue(SL); + return emitStringLiteralLValue(SL); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp index 2218838ac7d6..f13cb8600f9a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprAgg.cpp @@ -135,23 +135,23 @@ class AggExprEmitter : public StmtVisitor { /// Given an expression with aggregate type that represents a value lvalue, /// this method emits the address of the lvalue, then loads the result into /// DestPtr. - void buildAggLoadOfLValue(const Expr *E); + void emitAggLoadOfLValue(const Expr *E); enum ExprValueKind { EVK_RValue, EVK_NonRValue }; /// Perform the final copy to DestPtr, if desired. - void buildFinalDestCopy(QualType type, RValue src); + void emitFinalDestCopy(QualType type, RValue src); /// Perform the final copy to DestPtr, if desired. SrcIsRValue is true if /// source comes from an RValue. - void buildFinalDestCopy(QualType type, const LValue &src, - ExprValueKind SrcValueKind = EVK_NonRValue); - void buildCopy(QualType type, const AggValueSlot &dest, - const AggValueSlot &src); + void emitFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind = EVK_NonRValue); + void emitCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src); - void buildArrayInit(Address DestPtr, cir::ArrayType AType, QualType ArrayQTy, - Expr *ExprToVisit, ArrayRef Args, - Expr *ArrayFiller); + void emitArrayInit(Address DestPtr, cir::ArrayType AType, QualType ArrayQTy, + Expr *ExprToVisit, ArrayRef Args, + Expr *ArrayFiller); AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) @@ -182,7 +182,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } void VisitCoawaitExpr(CoawaitExpr *E) { - CGF.buildCoawaitExpr(*E, Dest, IsResultUnused); + CGF.emitCoawaitExpr(*E, Dest, IsResultUnused); } void VisitCoyieldExpr(CoyieldExpr *E) { llvm_unreachable("NYI"); } void VisitUnaryCoawait(UnaryOperator *E) { llvm_unreachable("NYI"); } @@ -193,13 +193,13 @@ class AggExprEmitter : public StmtVisitor { void VisitConstantExpr(ConstantExpr *E) { llvm_unreachable("NYI"); } // l-values - void VisitDeclRefExpr(DeclRefExpr *E) { buildAggLoadOfLValue(E); } - void VisitMemberExpr(MemberExpr *E) { buildAggLoadOfLValue(E); } - void VisitUnaryDeref(UnaryOperator *E) { buildAggLoadOfLValue(E); } + void VisitDeclRefExpr(DeclRefExpr *E) { emitAggLoadOfLValue(E); } + void VisitMemberExpr(MemberExpr *E) { emitAggLoadOfLValue(E); } + void VisitUnaryDeref(UnaryOperator *E) { emitAggLoadOfLValue(E); } void VisitStringLiteral(StringLiteral *E) { llvm_unreachable("NYI"); } void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { - buildAggLoadOfLValue(E); + emitAggLoadOfLValue(E); } void VisitPredefinedExpr(const PredefinedExpr *E) { llvm_unreachable("NYI"); } @@ -209,7 +209,7 @@ class AggExprEmitter : public StmtVisitor { void VisitStmtExpr(const StmtExpr *E) { assert(!cir::MissingFeatures::stmtExprEvaluation() && "NYI"); - CGF.buildCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); + CGF.emitCompoundStmt(*E->getSubStmt(), /*getLast=*/true, Dest); } void VisitBinaryOperator(const BinaryOperator *E) { llvm_unreachable("NYI"); } @@ -229,7 +229,7 @@ class AggExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } - LValue lhs = CGF.buildLValue(E->getLHS()); + LValue lhs = CGF.emitLValue(E->getLHS()); // If we have an atomic type, evaluate into the destination and then // do an atomic copy. @@ -248,10 +248,10 @@ class AggExprEmitter : public StmtVisitor { if (!lhsSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) assert(!cir::MissingFeatures::atomicTypes()); - CGF.buildAggExpr(E->getRHS(), lhsSlot); + CGF.emitAggExpr(E->getRHS(), lhsSlot); // Copy into the destination if the assignment isn't ignored. - buildFinalDestCopy(E->getType(), lhs); + emitFinalDestCopy(E->getType(), lhs); if (!Dest.isIgnored() && !Dest.isExternallyDestructed() && E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct) @@ -302,7 +302,7 @@ class AggExprEmitter : public StmtVisitor { CGF, CGF.getLoc(E->getSourceRange())}; // Emit an array containing the elements. The array is externally // destructed if the std::initializer_list object is. - LValue Array = CGF.buildLValue(E->getSubExpr()); + LValue Array = CGF.emitLValue(E->getSubExpr()); assert(Array.isSimple() && "initializer_list array not a simple lvalue"); Address ArrayPtr = Array.getAddress(); @@ -321,9 +321,9 @@ class AggExprEmitter : public StmtVisitor { AggValueSlot Dest = EnsureSlot(loc, E->getType()); LValue DestLV = CGF.makeAddrLValue(Dest.getAddress(), E->getType()); LValue Start = - CGF.buildLValueForFieldInitialization(DestLV, *Field, Field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); mlir::Value ArrayStart = ArrayPtr.emitRawPointer(); - CGF.buildStoreThroughLValue(RValue::get(ArrayStart), Start); + CGF.emitStoreThroughLValue(RValue::get(ArrayStart), Start); ++Field; assert(Field != Record->field_end() && "Expected std::initializer_list to have two fields"); @@ -335,10 +335,10 @@ class AggExprEmitter : public StmtVisitor { mlir::Value Size = sizeOp.getRes(); Builder.getUIntNTy(ArrayType->getSizeBitWidth()); LValue EndOrLength = - CGF.buildLValueForFieldInitialization(DestLV, *Field, Field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, *Field, Field->getName()); if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { // Length. - CGF.buildStoreThroughLValue(RValue::get(Size), EndOrLength); + CGF.emitStoreThroughLValue(RValue::get(Size), EndOrLength); } else { // End pointer. assert(Field->getType()->isPointerType() && @@ -349,7 +349,7 @@ class AggExprEmitter : public StmtVisitor { auto ArrayEnd = Builder.getArrayElement(loc, loc, ArrayPtr.getPointer(), ArrayPtr.getElementType(), Size, false); - CGF.buildStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); + CGF.emitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); } assert(++Field == Record->field_end() && "Expected std::initializer_list to only have two fields"); @@ -367,9 +367,9 @@ class AggExprEmitter : public StmtVisitor { void VisitVAArgExpr(VAArgExpr *E) { llvm_unreachable("NYI"); } - void buildInitializationToLValue(Expr *E, LValue LV); + void emitInitializationToLValue(Expr *E, LValue LV); - void buildNullInitializationToLValue(mlir::Location loc, LValue Address); + void emitNullInitializationToLValue(mlir::Location loc, LValue Address); void VisitCXXThrowExpr(const CXXThrowExpr *E) { llvm_unreachable("NYI"); } void VisitAtomicExpr(AtomicExpr *E) { llvm_unreachable("NYI"); } }; @@ -381,27 +381,27 @@ class AggExprEmitter : public StmtVisitor { /// Given an expression with aggregate type that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result into DestPtr. -void AggExprEmitter::buildAggLoadOfLValue(const Expr *E) { - LValue LV = CGF.buildLValue(E); +void AggExprEmitter::emitAggLoadOfLValue(const Expr *E) { + LValue LV = CGF.emitLValue(E); // If the type of the l-value is atomic, then do an atomic load. if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV) || cir::MissingFeatures::atomicTypes()) llvm_unreachable("atomic load is NYI"); - buildFinalDestCopy(E->getType(), LV); + emitFinalDestCopy(E->getType(), LV); } /// Perform the final copy to DestPtr, if desired. -void AggExprEmitter::buildFinalDestCopy(QualType type, RValue src) { +void AggExprEmitter::emitFinalDestCopy(QualType type, RValue src) { assert(src.isAggregate() && "value must be aggregate value!"); LValue srcLV = CGF.makeAddrLValue(src.getAggregateAddress(), type); - buildFinalDestCopy(type, srcLV, EVK_RValue); + emitFinalDestCopy(type, srcLV, EVK_RValue); } /// Perform the final copy to DestPtr, if desired. -void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, - ExprValueKind SrcValueKind) { +void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src, + ExprValueKind SrcValueKind) { // If Dest is ignored, then we're evaluating an aggregate expression // in a context that doesn't care about the result. Note that loads // from volatile l-values force the existence of a non-ignored @@ -425,15 +425,15 @@ void AggExprEmitter::buildFinalDestCopy(QualType type, const LValue &src, AggValueSlot srcAgg = AggValueSlot::forLValue( src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased, AggValueSlot::MayOverlap); - buildCopy(type, Dest, srcAgg); + emitCopy(type, Dest, srcAgg); } /// Perform a copy from the source into the destination. /// /// \param type - the type of the aggregate being copied; qualifiers are /// ignored -void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, - const AggValueSlot &src) { +void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest, + const AggValueSlot &src) { if (dest.requiresGCollection()) llvm_unreachable("garbage collection is NYI"); @@ -442,8 +442,8 @@ void AggExprEmitter::buildCopy(QualType type, const AggValueSlot &dest, // the two sides. LValue DestLV = CGF.makeAddrLValue(dest.getAddress(), type); LValue SrcLV = CGF.makeAddrLValue(src.getAddress(), type); - CGF.buildAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), - dest.isVolatile() || src.isVolatile()); + CGF.emitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), + dest.isVolatile() || src.isVolatile()); } // FIXME(cir): This function could be shared with traditional LLVM codegen @@ -470,9 +470,9 @@ static bool isTrivialFiller(Expr *E) { return false; } -void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, - QualType ArrayQTy, Expr *ExprToVisit, - ArrayRef Args, Expr *ArrayFiller) { +void AggExprEmitter::emitArrayInit(Address DestPtr, cir::ArrayType AType, + QualType ArrayQTy, Expr *ExprToVisit, + ArrayRef Args, Expr *ArrayFiller) { uint64_t NumInitElements = Args.size(); uint64_t NumArrayElements = AType.getSize(); @@ -539,7 +539,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, LValue elementLV = CGF.makeAddrLValue( Address(element, cirElementType, elementAlign), elementType); - buildInitializationToLValue(Args[i], elementLV); + emitInitializationToLValue(Args[i], elementLV); } // Check whether there's a non-trivial array-fill expression. @@ -572,7 +572,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, auto tmpAddr = CGF.CreateTempAlloca( cirElementPtrType, CGF.getPointerAlign(), loc, "arrayinit.temp"); LValue tmpLV = CGF.makeAddrLValue(tmpAddr, elementPtrType); - CGF.buildStoreThroughLValue(RValue::get(element), tmpLV); + CGF.emitStoreThroughLValue(RValue::get(element), tmpLV); // Compute the end of array auto numArrayElementsConst = builder.getConstInt( @@ -602,9 +602,9 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, Address(currentElement, cirElementType, elementAlign), elementType); if (ArrayFiller) - buildInitializationToLValue(ArrayFiller, elementLV); + emitInitializationToLValue(ArrayFiller, elementLV); else - buildNullInitializationToLValue(loc, elementLV); + emitNullInitializationToLValue(loc, elementLV); // Tell the EH cleanup that we finished with the last element. assert(!endOfInit.isValid() && "destructed types NIY"); @@ -614,7 +614,7 @@ void AggExprEmitter::buildArrayInit(Address DestPtr, cir::ArrayType AType, loc, mlir::cast(CGF.PtrDiffTy), 1); auto nextElement = builder.create( loc, cirElementPtrType, currentElement, one); - CGF.buildStoreThroughLValue(RValue::get(nextElement), tmpLV); + CGF.emitStoreThroughLValue(RValue::get(nextElement), tmpLV); builder.createYield(loc); }); @@ -777,8 +777,8 @@ static bool isSimpleZero(const Expr *E, CIRGenFunction &CGF) { return false; } -void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, - LValue lv) { +void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc, + LValue lv) { QualType type = lv.getType(); // If the destination slot is already zeroed out before the aggregate is @@ -788,25 +788,25 @@ void AggExprEmitter::buildNullInitializationToLValue(mlir::Location loc, if (CGF.hasScalarEvaluationKind(type)) { // For non-aggregates, we can store the appropriate null constant. - auto null = CGF.CGM.buildNullConstant(type, loc); + auto null = CGF.CGM.emitNullConstant(type, loc); // Note that the following is not equivalent to // EmitStoreThroughBitfieldLValue for ARC types. if (lv.isBitField()) { mlir::Value result; - CGF.buildStoreThroughBitfieldLValue(RValue::get(null), lv, result); + CGF.emitStoreThroughBitfieldLValue(RValue::get(null), lv, result); } else { assert(lv.isSimple()); - CGF.buildStoreOfScalar(null, lv, /* isInitialization */ true); + CGF.emitStoreOfScalar(null, lv, /* isInitialization */ true); } } else { // There's a potential optimization opportunity in combining // memsets; that would be easy for arrays, but relatively // difficult for structures with the current code. - CGF.buildNullInitialization(loc, lv.getAddress(), lv.getType()); + CGF.emitNullInitialization(loc, lv.getAddress(), lv.getType()); } } -void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { +void AggExprEmitter::emitInitializationToLValue(Expr *E, LValue LV) { QualType type = LV.getType(); // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? @@ -821,13 +821,13 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { if (isa(E) || isa(E)) { auto loc = E->getSourceRange().isValid() ? CGF.getLoc(E->getSourceRange()) : *CGF.currSrcLoc; - return buildNullInitializationToLValue(loc, LV); + return emitNullInitializationToLValue(loc, LV); } else if (isa(E)) { // Do nothing. return; } else if (type->isReferenceType()) { - RValue RV = CGF.buildReferenceBindingToExpr(E); - return CGF.buildStoreThroughLValue(RV, LV); + RValue RV = CGF.emitReferenceBindingToExpr(E); + return CGF.emitStoreThroughLValue(RV, LV); } switch (CGF.getEvaluationKind(type)) { @@ -835,7 +835,7 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { llvm_unreachable("NYI"); return; case cir::TEK_Aggregate: - CGF.buildAggExpr( + CGF.emitAggExpr( E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, @@ -843,9 +843,9 @@ void AggExprEmitter::buildInitializationToLValue(Expr *E, LValue LV) { return; case cir::TEK_Scalar: if (LV.isSimple()) { - CGF.buildScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); + CGF.emitScalarInit(E, CGF.getLoc(E->getSourceRange()), LV); } else { - CGF.buildStoreThroughLValue(RValue::get(CGF.buildScalarExpr(E)), LV); + CGF.emitStoreThroughLValue(RValue::get(CGF.emitScalarExpr(E)), LV); } return; } @@ -859,14 +859,14 @@ void AggExprEmitter::VisitMaterializeTemporaryExpr( void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); - CGF.buildCXXConstructExpr(E, Slot); + CGF.emitCXXConstructExpr(E, Slot); } void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Dest.isPotentiallyAliased() && E->getType().isPODType(CGF.getContext())) { // For a POD type, just emit a load of the lvalue + a copy, because our // compound literal might alias the destination. - buildAggLoadOfLValue(E); + emitAggLoadOfLValue(E); return; } @@ -879,7 +879,7 @@ void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Destruct) Slot.setExternallyDestructed(); - CGF.buildAggExpr(E->getInitializer(), Slot); + CGF.emitAggExpr(E->getInitializer(), Slot); if (Destruct) if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) @@ -934,12 +934,12 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { // Emit initialization LValue LV = - CGF.buildLValueForFieldInitialization(SlotLV, *CurField, fieldName); + CGF.emitLValueForFieldInitialization(SlotLV, *CurField, fieldName); if (CurField->hasCapturedVLAType()) { llvm_unreachable("NYI"); } - buildInitializationToLValue(captureInit, LV); + emitInitializationToLValue(captureInit, LV); // Push a destructor if necessary. if (QualType::DestructionKind DtorKind = @@ -954,16 +954,16 @@ void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { void AggExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) - CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + CGF.CGM.emitExplicitCastExprType(ECE, &CGF); switch (E->getCastKind()) { case CK_LValueToRValueBitCast: { if (Dest.isIgnored()) { - CGF.buildAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), - /*ignoreResult=*/true); + CGF.emitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); break; } - LValue SourceLV = CGF.buildLValue(E->getSubExpr()); + LValue SourceLV = CGF.emitLValue(E->getSubExpr()); Address SourceAddress = SourceLV.getAddress(); Address DestAddress = Dest.getAddress(); @@ -984,16 +984,16 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_ToUnion: { // Evaluate even if the destination is ignored. if (Dest.isIgnored()) { - CGF.buildAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), - /*ignoreResult=*/true); + CGF.emitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); break; } // GCC union extension QualType Ty = E->getSubExpr()->getType(); Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty)); - buildInitializationToLValue(E->getSubExpr(), - CGF.makeAddrLValue(CastPtr, Ty)); + emitInitializationToLValue(E->getSubExpr(), + CGF.makeAddrLValue(CastPtr, Ty)); break; } @@ -1099,7 +1099,7 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) { } withReturnValueSlot( - E, [&](ReturnValueSlot Slot) { return CGF.buildCallExpr(E, Slot); }); + E, [&](ReturnValueSlot Slot) { return CGF.emitCallExpr(E, Slot); }); } void AggExprEmitter::withReturnValueSlot( @@ -1136,7 +1136,7 @@ void AggExprEmitter::withReturnValueSlot( return; assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); - buildFinalDestCopy(E->getType(), Src); + emitFinalDestCopy(E->getType(), Src); if (!RequiresDestruction) { // If there's no dtor to run, the copy was the last use of our temporary. @@ -1166,8 +1166,8 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { if (E->getType()->isAnyComplexType()) llvm_unreachable("NYI"); - auto LHS = CGF.buildAnyExpr(E->getLHS()).getScalarVal(); - auto RHS = CGF.buildAnyExpr(E->getRHS()).getScalarVal(); + auto LHS = CGF.emitAnyExpr(E->getLHS()).getScalarVal(); + auto RHS = CGF.emitAnyExpr(E->getRHS()).getScalarVal(); mlir::Value ResultScalar; if (ArgTy->isNullPtrType()) { @@ -1196,9 +1196,9 @@ void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { // Emit the address of the first (and only) field in the comparison category // type, and initialize it from the constant integer value produced above. const FieldDecl *ResultField = *CmpInfo.Record->field_begin(); - LValue FieldLV = CGF.buildLValueForFieldInitialization( - DestLV, ResultField, ResultField->getName()); - CGF.buildStoreThroughLValue(RValue::get(ResultScalar), FieldLV); + LValue FieldLV = CGF.emitLValueForFieldInitialization(DestLV, ResultField, + ResultField->getName()); + CGF.emitStoreThroughLValue(RValue::get(ResultScalar), FieldLV); // All done! The result is in the Dest slot. } @@ -1249,8 +1249,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Handle initialization of an array. if (ExprToVisit->getType()->isConstantArrayType()) { auto AType = cast(Dest.getAddress().getElementType()); - buildArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), - ExprToVisit, InitExprs, ArrayFiller); + emitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit, + InitExprs, ArrayFiller); return; } else if (ExprToVisit->getType()->isVariableArrayType()) { llvm_unreachable("variable arrays NYI"); @@ -1311,14 +1311,14 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( FieldDecl *Field = InitializedFieldInUnion; LValue FieldLoc = - CGF.buildLValueForFieldInitialization(DestLV, Field, Field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, Field, Field->getName()); if (NumInitElements) { // Store the initializer into the field - buildInitializationToLValue(InitExprs[0], FieldLoc); + emitInitializationToLValue(InitExprs[0], FieldLoc); } else { // Default-initialize to null. - buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), - FieldLoc); + emitNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + FieldLoc); } return; @@ -1342,7 +1342,7 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( CGF.getTypes().isZeroInitializable(ExprToVisit->getType())) break; LValue LV = - CGF.buildLValueForFieldInitialization(DestLV, field, field->getName()); + CGF.emitLValueForFieldInitialization(DestLV, field, field->getName()); // We never generate write-barries for initialized fields. assert(!cir::MissingFeatures::setNonGC()); @@ -1350,11 +1350,11 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Store the initializer into the field. CIRGenFunction::SourceLocRAIIObject loc{ CGF, CGF.getLoc(record->getSourceRange())}; - buildInitializationToLValue(InitExprs[curInitIndex++], LV); + emitInitializationToLValue(InitExprs[curInitIndex++], LV); } else { // We're out of initializers; default-initialize to null - buildNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), - LV); + emitNullInitializationToLValue(CGF.getLoc(ExprToVisit->getSourceRange()), + LV); } // Push a destructor if necessary. @@ -1389,7 +1389,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { // Push that destructor we promised. if (!wasExternallyDestructed) - CGF.buildCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); + CGF.emitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); } void AggExprEmitter::VisitAbstractConditionalOperator( @@ -1409,7 +1409,7 @@ void AggExprEmitter::VisitAbstractConditionalOperator( E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct; isExternallyDestructed |= destructNonTrivialCStruct; - CGF.buildIfOnBoolExpr( + CGF.emitIfOnBoolExpr( E->getCond(), /*thenBuilder=*/ [&](mlir::OpBuilder &, mlir::Location) { eval.begin(CGF); @@ -1447,24 +1447,23 @@ void AggExprEmitter::VisitAbstractConditionalOperator( } void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { - CGF.buildIgnoredExpr(E->getLHS()); + CGF.emitIgnoredExpr(E->getLHS()); Visit(E->getRHS()); } void AggExprEmitter::VisitCXXInheritedCtorInitExpr( const CXXInheritedCtorInitExpr *E) { AggValueSlot Slot = EnsureSlot(CGF.getLoc(E->getSourceRange()), E->getType()); - CGF.buildInheritedCXXConstructorCall(E->getConstructor(), - E->constructsVBase(), Slot.getAddress(), - E->inheritedFromVBase(), E); + CGF.emitInheritedCXXConstructorCall(E->getConstructor(), E->constructsVBase(), + Slot.getAddress(), + E->inheritedFromVBase(), E); } void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { QualType T = E->getType(); mlir::Location loc = CGF.getLoc(E->getSourceRange()); AggValueSlot Slot = EnsureSlot(loc, T); - buildNullInitializationToLValue(loc, - CGF.makeAddrLValue(Slot.getAddress(), T)); + emitNullInitializationToLValue(loc, CGF.makeAddrLValue(Slot.getAddress(), T)); } //===----------------------------------------------------------------------===// @@ -1599,7 +1598,7 @@ AggValueSlot::Overlap_t CIRGenFunction::getOverlapForBaseInit( return AggValueSlot::MayOverlap; } -void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { +void CIRGenFunction::emitAggExpr(const Expr *E, AggValueSlot Slot) { assert(E && CIRGenFunction::hasAggregateEvaluationKind(E->getType()) && "Invalid aggregate expression to emit"); assert((Slot.getAddress().isValid() || Slot.isIgnored()) && @@ -1611,9 +1610,9 @@ void CIRGenFunction::buildAggExpr(const Expr *E, AggValueSlot Slot) { AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast(E)); } -void CIRGenFunction::buildAggregateCopy(LValue Dest, LValue Src, QualType Ty, - AggValueSlot::Overlap_t MayOverlap, - bool isVolatile) { +void CIRGenFunction::emitAggregateCopy(LValue Dest, LValue Src, QualType Ty, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile) { // TODO(cir): this function needs improvements, commented code for now since // this will be touched again soon. assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); @@ -1737,13 +1736,13 @@ CIRGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { return AggValueSlot::MayOverlap; } -LValue CIRGenFunction::buildAggExprToLValue(const Expr *E) { +LValue CIRGenFunction::emitAggExprToLValue(const Expr *E) { assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); Address Temp = CreateMemTemp(E->getType(), getLoc(E->getSourceRange())); LValue LV = makeAddrLValue(Temp, E->getType()); - buildAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap)); + emitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap)); return LV; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp index 136480f9e277..be3ec6071def 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprCXX.cpp @@ -30,10 +30,10 @@ struct MemberCallInfo { }; } // namespace -static RValue buildNewDeleteCall(CIRGenFunction &CGF, - const FunctionDecl *CalleeDecl, - const FunctionProtoType *CalleeType, - const CallArgList &Args); +static RValue emitNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args); static MemberCallInfo commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, @@ -68,8 +68,8 @@ commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, } else if (CE) { // Special case: skip first argument of CXXOperatorCall (it is "this"). unsigned ArgsToSkip = isa(CE) ? 1 : 0; - CGF.buildCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), - CE->getDirectCallee()); + CGF.emitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), + CE->getDirectCallee()); } else { assert( FPT->getNumParams() == 0 && @@ -79,7 +79,7 @@ commonBuildCXXMemberOrOperatorCall(CIRGenFunction &CGF, const CXXMethodDecl *MD, return {required, PrefixSize}; } -RValue CIRGenFunction::buildCXXMemberOrOperatorCall( +RValue CIRGenFunction::emitCXXMemberOrOperatorCall( const CXXMethodDecl *MD, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE, CallArgList *RtlArgs) { @@ -92,8 +92,8 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorCall( Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); assert((CE || currSrcLoc) && "expected source location"); mlir::Location loc = CE ? getLoc(CE->getExprLoc()) : *currSrcLoc; - return buildCall(FnInfo, Callee, ReturnValue, Args, nullptr, - CE && CE == MustTailCall, loc, CE); + return emitCall(FnInfo, Callee, ReturnValue, Args, nullptr, + CE && CE == MustTailCall, loc, CE); } // TODO(cir): this can be shared with LLVM codegen @@ -106,8 +106,8 @@ static CXXRecordDecl *getCXXRecord(const Expr *E) { } RValue -CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, - ReturnValueSlot ReturnValue) { +CIRGenFunction::emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue) { const BinaryOperator *BO = cast(E->getCallee()->IgnoreParens()); const Expr *BaseExpr = BO->getLHS(); @@ -119,15 +119,15 @@ CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, // Emit the 'this' pointer. Address This = Address::invalid(); if (BO->getOpcode() == BO_PtrMemI) - This = buildPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull); + This = emitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull); else - This = buildLValue(BaseExpr).getAddress(); + This = emitLValue(BaseExpr).getAddress(); - buildTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(), - QualType(MPT->getClass(), 0)); + emitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(), + QualType(MPT->getClass(), 0)); // Get the member function pointer. - mlir::Value MemFnPtr = buildScalarExpr(MemFnExpr); + mlir::Value MemFnPtr = emitScalarExpr(MemFnExpr); // Resolve the member function pointer to the actual callee and adjust the // "this" pointer for call. @@ -138,19 +138,19 @@ CIRGenFunction::buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, // Prepare the call arguments. CallArgList ArgsList; ArgsList.add(RValue::get(AdjustedThis), getContext().VoidPtrTy); - buildCallArgs(ArgsList, FPT, E->arguments()); + emitCallArgs(ArgsList, FPT, E->arguments()); RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); // Build the call. CIRGenCallee Callee(FPT, CalleePtr.getDefiningOp()); - return buildCall(CGM.getTypes().arrangeCXXMethodCall(ArgsList, FPT, required, - /*PrefixSize=*/0), - Callee, ReturnValue, ArgsList, nullptr, E == MustTailCall, - Loc); + return emitCall(CGM.getTypes().arrangeCXXMethodCall(ArgsList, FPT, required, + /*PrefixSize=*/0), + Callee, ReturnValue, ArgsList, nullptr, E == MustTailCall, + Loc); } -RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( +RValue CIRGenFunction::emitCXXMemberOrOperatorMemberCallExpr( const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, const Expr *Base) { @@ -183,7 +183,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // one or the one of the full expression, we would have to build // a derived-to-base cast to compute the correct this pointer, but // we don't have support for that yet, so do a virtual call. - assert(!cir::MissingFeatures::buildDerivedToBaseCastForDevirt()); + assert(!cir::MissingFeatures::emitDerivedToBaseCastForDevirt()); DevirtualizedMethod = nullptr; } } @@ -206,12 +206,12 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // See further note on TrivialAssignment, we don't handle this during // codegen, differently than LLVM, which early optimizes like this: // if (TrivialAssignment) { - // TrivialAssignmentRHS = buildLValue(CE->getArg(1)); + // TrivialAssignmentRHS = emitLValue(CE->getArg(1)); // } else { RtlArgs = &RtlArgStorage; - buildCallArgs(*RtlArgs, MD->getType()->castAs(), - drop_begin(CE->arguments(), 1), CE->getDirectCallee(), - /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); + emitCallArgs(*RtlArgs, MD->getType()->castAs(), + drop_begin(CE->arguments(), 1), CE->getDirectCallee(), + /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); } } @@ -219,10 +219,10 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( if (IsArrow) { LValueBaseInfo BaseInfo; assert(!cir::MissingFeatures::tbaa()); - Address ThisValue = buildPointerWithAlignment(Base, &BaseInfo); + Address ThisValue = emitPointerWithAlignment(Base, &BaseInfo); This = makeAddrLValue(ThisValue, Base->getType(), BaseInfo); } else { - This = buildLValue(Base); + This = emitLValue(Base); } if (const CXXConstructorDecl *Ctor = dyn_cast(MD)) { @@ -244,8 +244,8 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( // We don't early optimize like LLVM does: // LValue RHS = isa(CE) ? TrivialAssignmentRHS // : - // buildLValue(*CE->arg_begin()); - // buildAggregateAssign(This, RHS, CE->getType()); + // emitLValue(*CE->arg_begin()); + // emitAggregateAssign(This, RHS, CE->getType()); // return RValue::get(This.getPointer()); } else { assert(MD->getParent()->mayInsertExtraPadding() && @@ -284,7 +284,7 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( SkippedChecks.set(SanitizerKind::Null, true); } - if (cir::MissingFeatures::buildTypeCheck()) + if (cir::MissingFeatures::emitTypeCheck()) llvm_unreachable("NYI"); // C++ [class.virtual]p12: @@ -318,9 +318,9 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( IsArrow ? Base->getType()->getPointeeType() : Base->getType(); // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen) // because in practice it always null even in OG. - buildCXXDestructorCall(globalDecl, Callee, This.getPointer(), thisTy, - /*ImplicitParam=*/nullptr, - /*ImplicitParamTy=*/QualType(), CE); + emitCXXDestructorCall(globalDecl, Callee, This.getPointer(), thisTy, + /*ImplicitParam=*/nullptr, + /*ImplicitParamTy=*/QualType(), CE); } return RValue::get(nullptr); } @@ -355,25 +355,25 @@ RValue CIRGenFunction::buildCXXMemberOrOperatorMemberCallExpr( This.setAddress(NewThisAddr); } - return buildCXXMemberOrOperatorCall( + return emitCXXMemberOrOperatorCall( CalleeDecl, Callee, ReturnValue, This.getPointer(), /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); } RValue -CIRGenFunction::buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, - const CXXMethodDecl *MD, - ReturnValueSlot ReturnValue) { +CIRGenFunction::emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue) { assert(MD->isInstance() && "Trying to emit a member call expr on a static method!"); - return buildCXXMemberOrOperatorMemberCallExpr( + return emitCXXMemberOrOperatorMemberCallExpr( E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, /*IsArrow=*/false, E->getArg(0)); } -static void buildNullBaseClassInitialization(CIRGenFunction &CGF, - Address DestPtr, - const CXXRecordDecl *Base) { +static void emitNullBaseClassInitialization(CIRGenFunction &CGF, + Address DestPtr, + const CXXRecordDecl *Base) { if (Base->isEmpty()) return; @@ -422,7 +422,7 @@ static void buildNullBaseClassInitialization(CIRGenFunction &CGF, // TODO(cir): `nullConstantForBase` might be better off as a value instead // of an mlir::TypedAttr? Once this moves out of skeleton, make sure to double // check on what's better. - mlir::Attribute nullConstantForBase = CGF.CGM.buildNullConstantForBase(Base); + mlir::Attribute nullConstantForBase = CGF.CGM.emitNullConstantForBase(Base); if (!CGF.getBuilder().isNullValue(nullConstantForBase)) { llvm_unreachable("NYI"); // Otherwise, just memset the whole thing to zero. This is legal @@ -433,8 +433,8 @@ static void buildNullBaseClassInitialization(CIRGenFunction &CGF, } } -void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, - AggValueSlot Dest) { +void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *E, + AggValueSlot Dest) { assert(!Dest.isIgnored() && "Must have a destination!"); const auto *CD = E->getConstructor(); @@ -446,13 +446,13 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, switch (E->getConstructionKind()) { case CXXConstructionKind::Delegating: case CXXConstructionKind::Complete: - buildNullInitialization(getLoc(E->getSourceRange()), Dest.getAddress(), - E->getType()); + emitNullInitialization(getLoc(E->getSourceRange()), Dest.getAddress(), + E->getType()); break; case CXXConstructionKind::VirtualBase: case CXXConstructionKind::NonVirtualBase: - buildNullBaseClassInitialization(*this, Dest.getAddress(), - CD->getParent()); + emitNullBaseClassInitialization(*this, Dest.getAddress(), + CD->getParent()); break; } } @@ -475,13 +475,13 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, assert(SrcObj->isTemporaryObject(getContext(), CD->getParent())); assert( getContext().hasSameUnqualifiedType(E->getType(), SrcObj->getType())); - buildAggExpr(SrcObj, Dest); + emitAggExpr(SrcObj, Dest); return; } if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) { - buildCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, - Dest.isSanitizerChecked()); + emitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, + Dest.isSanitizerChecked()); } else { clang::CXXCtorType Type = Ctor_Complete; bool ForVirtualBase = false; @@ -504,7 +504,7 @@ void CIRGenFunction::buildCXXConstructExpr(const CXXConstructExpr *E, break; } - buildCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); + emitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); } } @@ -549,11 +549,10 @@ static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) { return Params; } -static mlir::Value buildCXXNewAllocSize(CIRGenFunction &CGF, - const CXXNewExpr *e, - unsigned minElements, - mlir::Value &numElements, - mlir::Value &sizeWithoutCookie) { +static mlir::Value emitCXXNewAllocSize(CIRGenFunction &CGF, const CXXNewExpr *e, + unsigned minElements, + mlir::Value &numElements, + mlir::Value &sizeWithoutCookie) { QualType type = e->getAllocatedType(); if (!e->isArray()) { @@ -655,7 +654,7 @@ class CallDeleteDuringNew final : public EHScopeStack::Cleanup { } // Call 'operator delete'. - buildNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); + emitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); } }; } // namespace @@ -724,11 +723,11 @@ static void EnterNewDeleteCleanup(CIRGenFunction &CGF, const CXXNewExpr *E, static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, QualType AllocType, Address NewPtr, AggValueSlot::Overlap_t MayOverlap) { - // FIXME: Refactor with buildExprAsInit. + // FIXME: Refactor with emitExprAsInit. switch (CGF.getEvaluationKind(AllocType)) { case cir::TEK_Scalar: - CGF.buildScalarInit(Init, CGF.getLoc(Init->getSourceRange()), - CGF.makeAddrLValue(NewPtr, AllocType), false); + CGF.emitScalarInit(Init, CGF.getLoc(Init->getSourceRange()), + CGF.makeAddrLValue(NewPtr, AllocType), false); return; case cir::TEK_Complex: llvm_unreachable("NYI"); @@ -739,17 +738,17 @@ static void StoreAnyExprIntoOneUnit(CIRGenFunction &CGF, const Expr *Init, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, MayOverlap, AggValueSlot::IsNotZeroed, AggValueSlot::IsSanitizerChecked); - CGF.buildAggExpr(Init, Slot); + CGF.emitAggExpr(Init, Slot); return; } } llvm_unreachable("bad evaluation kind"); } -static void buildNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, - QualType ElementType, mlir::Type ElementTy, - Address NewPtr, mlir::Value NumElements, - mlir::Value AllocSizeWithoutCookie) { +static void emitNewInitializer(CIRGenFunction &CGF, const CXXNewExpr *E, + QualType ElementType, mlir::Type ElementTy, + Address NewPtr, mlir::Value NumElements, + mlir::Value AllocSizeWithoutCookie) { assert(!cir::MissingFeatures::generateDebugInfo()); if (E->isArray()) { llvm_unreachable("NYI"); @@ -785,7 +784,7 @@ struct CallObjectDelete final : EHScopeStack::Cleanup { : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} void Emit(CIRGenFunction &CGF, Flags flags) override { - CGF.buildDeleteCall(OperatorDelete, Ptr, ElementType); + CGF.emitDeleteCall(OperatorDelete, Ptr, ElementType); } }; } // namespace @@ -800,8 +799,8 @@ static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, // dynamic type, the static type shall be a base class of the dynamic type // of the object to be deleted and the static type shall have a virtual // destructor or the behavior is undefined. - CGF.buildTypeCheck(CIRGenFunction::TCK_MemberCall, DE->getExprLoc(), - Ptr.getPointer(), ElementType); + CGF.emitTypeCheck(CIRGenFunction::TCK_MemberCall, DE->getExprLoc(), + Ptr.getPointer(), ElementType); const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); assert(!OperatorDelete->isDestroyingOperatorDelete()); @@ -878,9 +877,9 @@ static bool EmitObjectDelete(CIRGenFunction &CGF, const CXXDeleteExpr *DE, return false; } -void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { +void CIRGenFunction::emitCXXDeleteExpr(const CXXDeleteExpr *E) { const Expr *Arg = E->getArgument(); - Address Ptr = buildPointerWithAlignment(Arg); + Address Ptr = emitPointerWithAlignment(Arg); // Null check the pointer. // @@ -918,7 +917,7 @@ void CIRGenFunction::buildCXXDeleteExpr(const CXXDeleteExpr *E) { } } -mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { +mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *E) { // The element type being allocated. QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); @@ -940,7 +939,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { mlir::Value numElements = nullptr; mlir::Value allocSizeWithoutCookie = nullptr; - mlir::Value allocSize = buildCXXNewAllocSize( + mlir::Value allocSize = emitCXXNewAllocSize( *this, E, minElements, numElements, allocSizeWithoutCookie); CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); @@ -954,7 +953,7 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { const Expr *arg = *E->placement_arguments().begin(); LValueBaseInfo BaseInfo; - allocation = buildPointerWithAlignment(arg, &BaseInfo); + allocation = emitPointerWithAlignment(arg, &BaseInfo); // The pointer expression will, in many cases, be an opaque void*. // In these cases, discard the computed alignment and use the @@ -989,13 +988,13 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { } // FIXME: Why do we not pass a CalleeDecl here? - buildCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), - /*AC*/ - AbstractCallee(), - /*ParamsToSkip*/ - ParamsToSkip); + emitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), + /*AC*/ + AbstractCallee(), + /*ParamsToSkip*/ + ParamsToSkip); RValue RV = - buildNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); + emitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); // Set !heapallocsite metadata on the call to operator new. assert(!cir::MissingFeatures::generateDebugInfo()); @@ -1116,13 +1115,13 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { // we'll null check the wrong pointer here. SanitizerSet SkippedChecks; SkippedChecks.set(SanitizerKind::Null, nullCheck); - buildTypeCheck(CIRGenFunction::TCK_ConstructorCall, - E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), - result.getPointer(), allocType, result.getAlignment(), - SkippedChecks, numElements); + emitTypeCheck(CIRGenFunction::TCK_ConstructorCall, + E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(), + result.getPointer(), allocType, result.getAlignment(), + SkippedChecks, numElements); - buildNewInitializer(*this, E, allocType, elementTy, result, numElements, - allocSizeWithoutCookie); + emitNewInitializer(*this, E, allocType, elementTy, result, numElements, + allocSizeWithoutCookie); auto resultPtr = result.getPointer(); if (E->isArray()) { llvm_unreachable("NYI"); @@ -1151,12 +1150,12 @@ mlir::Value CIRGenFunction::buildCXXNewExpr(const CXXNewExpr *E) { return resultPtr; } -RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, - const CIRGenCallee &Callee, - mlir::Value This, QualType ThisTy, - mlir::Value ImplicitParam, - QualType ImplicitParamTy, - const CallExpr *CE) { +RValue CIRGenFunction::emitCXXDestructorCall(GlobalDecl Dtor, + const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, + const CallExpr *CE) { const CXXMethodDecl *DtorDecl = cast(Dtor.getDecl()); assert(!ThisTy.isNull()); @@ -1173,25 +1172,25 @@ RValue CIRGenFunction::buildCXXDestructorCall(GlobalDecl Dtor, commonBuildCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam, ImplicitParamTy, CE, Args, nullptr); assert((CE || Dtor.getDecl()) && "expected source location provider"); - return buildCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, - ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, - CE ? getLoc(CE->getExprLoc()) - : getLoc(Dtor.getDecl()->getSourceRange())); + return emitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee, + ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall, + CE ? getLoc(CE->getExprLoc()) + : getLoc(Dtor.getDecl()->getSourceRange())); } /// Emit a call to an operator new or operator delete function, as implicitly /// created by new-expressions and delete-expressions. -static RValue buildNewDeleteCall(CIRGenFunction &CGF, - const FunctionDecl *CalleeDecl, - const FunctionProtoType *CalleeType, - const CallArgList &Args) { +static RValue emitNewDeleteCall(CIRGenFunction &CGF, + const FunctionDecl *CalleeDecl, + const FunctionProtoType *CalleeType, + const CallArgList &Args) { cir::CIRCallOpInterface CallOrTryCall; auto CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); CIRGenCallee Callee = CIRGenCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); - RValue RV = CGF.buildCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( - Args, CalleeType, /*ChainCall=*/false), - Callee, ReturnValueSlot(), Args, &CallOrTryCall); + RValue RV = CGF.emitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( + Args, CalleeType, /*ChainCall=*/false), + Callee, ReturnValueSlot(), Args, &CallOrTryCall); /// C++1y [expr.new]p10: /// [In a new-expression,] an implementation is allowed to omit a call @@ -1202,11 +1201,11 @@ static RValue buildNewDeleteCall(CIRGenFunction &CGF, return RV; } -RValue CIRGenFunction::buildBuiltinNewDeleteCall(const FunctionProtoType *type, - const CallExpr *theCall, - bool isDelete) { +RValue CIRGenFunction::emitBuiltinNewDeleteCall(const FunctionProtoType *type, + const CallExpr *theCall, + bool isDelete) { CallArgList args; - buildCallArgs(args, type, theCall->arguments()); + emitCallArgs(args, type, theCall->arguments()); // Find the allocation or deallocation function that we're calling. ASTContext &ctx = getContext(); DeclarationName name = @@ -1215,14 +1214,14 @@ RValue CIRGenFunction::buildBuiltinNewDeleteCall(const FunctionProtoType *type, for (auto *decl : ctx.getTranslationUnitDecl()->lookup(name)) if (auto *fd = dyn_cast(decl)) if (ctx.hasSameType(fd->getType(), QualType(type, 0))) - return buildNewDeleteCall(*this, fd, type, args); + return emitNewDeleteCall(*this, fd, type, args); llvm_unreachable("predeclared global operator new/delete is missing"); } -void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, - mlir::Value Ptr, QualType DeleteTy, - mlir::Value NumElements, - CharUnits CookieSize) { +void CIRGenFunction::emitDeleteCall(const FunctionDecl *DeleteFD, + mlir::Value Ptr, QualType DeleteTy, + mlir::Value NumElements, + CharUnits CookieSize) { assert((!NumElements && CookieSize.isZero()) || DeleteFD->getOverloadedOperator() == OO_Array_Delete); @@ -1281,7 +1280,7 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, "unknown parameter to usual delete function"); // Emit the call to delete. - buildNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); + emitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); // If call argument lowering didn't use the destroying_delete_t alloca, // remove it again. @@ -1290,8 +1289,8 @@ void CIRGenFunction::buildDeleteCall(const FunctionDecl *DeleteFD, } } -static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, - mlir::Location Loc, QualType DestTy) { +static mlir::Value emitDynamicCastToNull(CIRGenFunction &CGF, + mlir::Location Loc, QualType DestTy) { mlir::Type DestCIRTy = CGF.ConvertType(DestTy); assert(mlir::isa(DestCIRTy) && "result of dynamic_cast should be a ptr"); @@ -1302,7 +1301,7 @@ static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, auto *CurrentRegion = CGF.getBuilder().getBlock()->getParent(); /// C++ [expr.dynamic.cast]p9: /// A failed cast to reference type throws std::bad_cast - CGF.CGM.getCXXABI().buildBadCastCall(CGF, Loc); + CGF.CGM.getCXXABI().emitBadCastCall(CGF, Loc); // The call to bad_cast will terminate the current block. Create a new block // to hold any follow up code. @@ -1312,11 +1311,11 @@ static mlir::Value buildDynamicCastToNull(CIRGenFunction &CGF, return NullPtrValue; } -mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, - const CXXDynamicCastExpr *DCE) { +mlir::Value CIRGenFunction::emitDynamicCast(Address ThisAddr, + const CXXDynamicCastExpr *DCE) { auto loc = getLoc(DCE->getSourceRange()); - CGM.buildExplicitCastExprType(DCE, this); + CGM.emitExplicitCastExprType(DCE, this); QualType destTy = DCE->getTypeAsWritten(); QualType srcTy = DCE->getSubExpr()->getType(); @@ -1340,13 +1339,13 @@ mlir::Value CIRGenFunction::buildDynamicCast(Address ThisAddr, } assert(srcRecordTy->isRecordType() && "source type must be a record type!"); - buildTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), - srcRecordTy); + emitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(), + srcRecordTy); if (DCE->isAlwaysNull()) - return buildDynamicCastToNull(*this, loc, destTy); + return emitDynamicCastToNull(*this, loc, destTy); auto destCirTy = mlir::cast(ConvertType(destTy)); - return CGM.getCXXABI().buildDynamicCast(*this, loc, srcRecordTy, destRecordTy, - destCirTy, isRefCast, ThisAddr); + return CGM.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy, + destCirTy, isRefCast, ThisAddr); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 61b1979f0ebf..df4aab399cfc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -31,23 +31,23 @@ class ComplexExprEmitter : public StmtVisitor { /// Given an expression with complex type that represents a value l-value, /// this method emits the address of the l-value, then loads and returns the /// result. - mlir::Value buildLoadOfLValue(const Expr *E) { - return buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()); + mlir::Value emitLoadOfLValue(const Expr *E) { + return emitLoadOfLValue(CGF.emitLValue(E), E->getExprLoc()); } - mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc); + mlir::Value emitLoadOfLValue(LValue LV, SourceLocation Loc); /// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. - void buildStoreOfComplex(mlir::Location Loc, mlir::Value Val, LValue LV, - bool isInit); + void emitStoreOfComplex(mlir::Location Loc, mlir::Value Val, LValue LV, + bool isInit); /// Emit a cast from complex value Val to DestType. - mlir::Value buildComplexToComplexCast(mlir::Value Val, QualType SrcType, - QualType DestType, SourceLocation Loc); - /// Emit a cast from scalar value Val to DestType. - mlir::Value buildScalarToComplexCast(mlir::Value Val, QualType SrcType, + mlir::Value emitComplexToComplexCast(mlir::Value Val, QualType SrcType, QualType DestType, SourceLocation Loc); + /// Emit a cast from scalar value Val to DestType. + mlir::Value emitScalarToComplexCast(mlir::Value Val, QualType SrcType, + QualType DestType, SourceLocation Loc); //===--------------------------------------------------------------------===// // Visitor Methods @@ -89,8 +89,8 @@ class ComplexExprEmitter : public StmtVisitor { Expr *E) { assert(Constant && "not a constant"); if (Constant.isReference()) - return buildLoadOfLValue(Constant.getReferenceLValue(CGF, E), - E->getExprLoc()); + return emitLoadOfLValue(Constant.getReferenceLValue(CGF, E), + E->getExprLoc()); auto valueAttr = Constant.getValue(); return Builder.getConstant(CGF.getLoc(E->getSourceRange()), valueAttr); @@ -100,7 +100,7 @@ class ComplexExprEmitter : public StmtVisitor { mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) return emitConstant(Constant, E); - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { llvm_unreachable("NYI"); @@ -120,13 +120,13 @@ class ComplexExprEmitter : public StmtVisitor { // FIXME: CompoundLiteralExpr - mlir::Value buildCast(CastKind CK, Expr *Op, QualType DestTy); + mlir::Value emitCast(CastKind CK, Expr *Op, QualType DestTy); mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *E) { // Unlike for scalars, we don't have to worry about function->ptr demotion // here. if (E->changesVolatileQualification()) - return buildLoadOfLValue(E); - return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); + return emitLoadOfLValue(E); + return emitCast(E->getCastKind(), E->getSubExpr(), E->getType()); } mlir::Value VisitCastExpr(CastExpr *E); mlir::Value VisitCallExpr(const CallExpr *E); @@ -189,22 +189,22 @@ class ComplexExprEmitter : public StmtVisitor { FPOptions FPFeatures{}; }; - BinOpInfo buildBinOps(const BinaryOperator *E, - QualType PromotionTy = QualType()); - mlir::Value buildPromoted(const Expr *E, QualType PromotionTy); - mlir::Value buildPromotedComplexOperand(const Expr *E, QualType PromotionTy); + BinOpInfo emitBinOps(const BinaryOperator *E, + QualType PromotionTy = QualType()); + mlir::Value emitPromoted(const Expr *E, QualType PromotionTy); + mlir::Value emitPromotedComplexOperand(const Expr *E, QualType PromotionTy); - LValue buildCompoundAssignLValue( + LValue emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val); - mlir::Value buildCompoundAssign( + mlir::Value emitCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &)); - mlir::Value buildBinAdd(const BinOpInfo &Op); - mlir::Value buildBinSub(const BinOpInfo &Op); - mlir::Value buildBinMul(const BinOpInfo &Op); - mlir::Value buildBinDiv(const BinOpInfo &Op); + mlir::Value emitBinAdd(const BinOpInfo &Op); + mlir::Value emitBinSub(const BinOpInfo &Op); + mlir::Value emitBinMul(const BinOpInfo &Op); + mlir::Value emitBinDiv(const BinOpInfo &Op); QualType HigherPrecisionTypeForComplexArithmetic(QualType ElementType, bool IsDivOpCode) { @@ -254,9 +254,9 @@ class ComplexExprEmitter : public StmtVisitor { QualType promotionTy = getPromotionType( \ E->getType(), \ (E->getOpcode() == BinaryOperatorKind::BO_Div) ? true : false); \ - mlir::Value result = buildBin##OP(buildBinOps(E, promotionTy)); \ + mlir::Value result = emitBin##OP(emitBinOps(E, promotionTy)); \ if (!promotionTy.isNull()) \ - result = CGF.buildUnPromotedValue(result, E->getType()); \ + result = CGF.emitUnPromotedValue(result, E->getType()); \ return result; \ } @@ -272,16 +272,16 @@ class ComplexExprEmitter : public StmtVisitor { // Compound assignments. mlir::Value VisitBinAddAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinAdd); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinAdd); } mlir::Value VisitBinSubAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinSub); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinSub); } mlir::Value VisitBinMulAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinMul); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinMul); } mlir::Value VisitBinDivAssign(const CompoundAssignOperator *E) { - return buildCompoundAssign(E, &ComplexExprEmitter::buildBinDiv); + return emitCompoundAssign(E, &ComplexExprEmitter::emitBinDiv); } // GCC rejects rem/and/or/xor for integer complex. @@ -289,10 +289,10 @@ class ComplexExprEmitter : public StmtVisitor { // No comparisons produce a complex result. - LValue buildBinAssignLValue(const BinaryOperator *E, mlir::Value &Val); + LValue emitBinAssignLValue(const BinaryOperator *E, mlir::Value &Val); mlir::Value VisitBinAssign(const BinaryOperator *E) { mlir::Value Val; - LValue LV = buildBinAssignLValue(E, Val); + LValue LV = emitBinAssignLValue(E, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) @@ -303,7 +303,7 @@ class ComplexExprEmitter : public StmtVisitor { if (!LV.isVolatileQualified()) return Val; - return buildLoadOfLValue(LV, E->getExprLoc()); + return emitLoadOfLValue(LV, E->getExprLoc()); }; mlir::Value VisitBinComma(const BinaryOperator *E) { llvm_unreachable("NYI"); @@ -345,8 +345,8 @@ static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder, return builder.createComplexCreate(loc, real, imag); } -mlir::Value ComplexExprEmitter::buildLoadOfLValue(LValue LV, - SourceLocation Loc) { +mlir::Value ComplexExprEmitter::emitLoadOfLValue(LValue LV, + SourceLocation Loc) { assert(LV.isSimple() && "non-simple complex l-value?"); if (LV.getType()->isAtomicType()) llvm_unreachable("NYI"); @@ -355,9 +355,8 @@ mlir::Value ComplexExprEmitter::buildLoadOfLValue(LValue LV, return Builder.createLoad(CGF.getLoc(Loc), SrcPtr, LV.isVolatileQualified()); } -void ComplexExprEmitter::buildStoreOfComplex(mlir::Location Loc, - mlir::Value Val, LValue LV, - bool isInit) { +void ComplexExprEmitter::emitStoreOfComplex(mlir::Location Loc, mlir::Value Val, + LValue LV, bool isInit) { if (LV.getType()->isAtomicType() || (!isInit && CGF.LValueIsSuitableForInlineAtomic(LV))) llvm_unreachable("NYI"); @@ -366,10 +365,10 @@ void ComplexExprEmitter::buildStoreOfComplex(mlir::Location Loc, Builder.createStore(Loc, Val, DestAddr, LV.isVolatileQualified()); } -mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, - QualType SrcType, - QualType DestType, - SourceLocation Loc) { +mlir::Value ComplexExprEmitter::emitComplexToComplexCast(mlir::Value Val, + QualType SrcType, + QualType DestType, + SourceLocation Loc) { if (SrcType == DestType) return Val; @@ -393,10 +392,10 @@ mlir::Value ComplexExprEmitter::buildComplexToComplexCast(mlir::Value Val, CGF.ConvertType(DestType)); } -mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, - QualType SrcType, - QualType DestType, - SourceLocation Loc) { +mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value Val, + QualType SrcType, + QualType DestType, + SourceLocation Loc) { cir::CastKind CastOpKind; if (SrcType->isFloatingType()) CastOpKind = cir::CastKind::float_to_complex; @@ -409,8 +408,8 @@ mlir::Value ComplexExprEmitter::buildScalarToComplexCast(mlir::Value Val, CGF.ConvertType(DestType)); } -mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, - QualType DestTy) { +mlir::Value ComplexExprEmitter::emitCast(CastKind CK, Expr *Op, + QualType DestTy) { switch (CK) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); @@ -490,8 +489,8 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, case CK_FloatingRealToComplex: case CK_IntegralRealToComplex: { assert(!cir::MissingFeatures::CGFPOptionsRAII()); - return buildScalarToComplexCast(CGF.buildScalarExpr(Op), Op->getType(), - DestTy, Op->getExprLoc()); + return emitScalarToComplexCast(CGF.emitScalarExpr(Op), Op->getType(), + DestTy, Op->getExprLoc()); } case CK_FloatingComplexCast: @@ -499,8 +498,8 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: { assert(!cir::MissingFeatures::CGFPOptionsRAII()); - return buildComplexToComplexCast(Visit(Op), Op->getType(), DestTy, - Op->getExprLoc()); + return emitComplexToComplexCast(Visit(Op), Op->getType(), DestTy, + Op->getExprLoc()); } } @@ -509,23 +508,23 @@ mlir::Value ComplexExprEmitter::buildCast(CastKind CK, Expr *Op, mlir::Value ComplexExprEmitter::VisitCastExpr(CastExpr *E) { if (const auto *ECE = dyn_cast(E)) - CGF.CGM.buildExplicitCastExprType(ECE, &CGF); + CGF.CGM.emitExplicitCastExprType(ECE, &CGF); if (E->changesVolatileQualification()) - return buildLoadOfLValue(E); - return buildCast(E->getCastKind(), E->getSubExpr(), E->getType()); + return emitLoadOfLValue(E); + return emitCast(E->getCastKind(), E->getSubExpr(), E->getType()); } mlir::Value ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType(CGF.getContext())->isReferenceType()) - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); - return CGF.buildCallExpr(E).getComplexVal(); + return CGF.emitCallExpr(E).getComplexVal(); } mlir::Value ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) { - LValue LV = CGF.buildLValue(E->getSubExpr()); - return CGF.buildComplexPrePostIncDec(E, LV, isInc, isPre); + LValue LV = CGF.emitLValue(E->getSubExpr()); + return CGF.emitComplexPrePostIncDec(E, LV, isInc, isPre); } mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E, @@ -535,7 +534,7 @@ mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E, : PromotionType; mlir::Value result = VisitPlus(E, promotionTy); if (!promotionTy.isNull()) - return CGF.buildUnPromotedValue(result, E->getSubExpr()->getType()); + return CGF.emitUnPromotedValue(result, E->getSubExpr()->getType()); return result; } @@ -543,7 +542,7 @@ mlir::Value ComplexExprEmitter::VisitPlus(const UnaryOperator *E, QualType PromotionType) { mlir::Value Op; if (!PromotionType.isNull()) - Op = CGF.buildPromotedComplexExpr(E->getSubExpr(), PromotionType); + Op = CGF.emitPromotedComplexExpr(E->getSubExpr(), PromotionType); else Op = Visit(E->getSubExpr()); @@ -558,7 +557,7 @@ mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E, : PromotionType; mlir::Value result = VisitMinus(E, promotionTy); if (!promotionTy.isNull()) - return CGF.buildUnPromotedValue(result, E->getSubExpr()->getType()); + return CGF.emitUnPromotedValue(result, E->getSubExpr()->getType()); return result; } @@ -566,7 +565,7 @@ mlir::Value ComplexExprEmitter::VisitMinus(const UnaryOperator *E, QualType PromotionType) { mlir::Value Op; if (!PromotionType.isNull()) - Op = CGF.buildPromotedComplexExpr(E->getSubExpr(), PromotionType); + Op = CGF.emitPromotedComplexExpr(E->getSubExpr(), PromotionType); else Op = Visit(E->getSubExpr()); @@ -581,11 +580,11 @@ mlir::Value ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { } ComplexExprEmitter::BinOpInfo -ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { +ComplexExprEmitter::emitBinOps(const BinaryOperator *E, QualType PromotionTy) { BinOpInfo Ops{CGF.getLoc(E->getExprLoc())}; - Ops.LHS = buildPromotedComplexOperand(E->getLHS(), PromotionTy); - Ops.RHS = buildPromotedComplexOperand(E->getRHS(), PromotionTy); + Ops.LHS = emitPromotedComplexOperand(E->getLHS(), PromotionTy); + Ops.RHS = emitPromotedComplexOperand(E->getRHS(), PromotionTy); if (!PromotionTy.isNull()) Ops.Ty = PromotionTy; else @@ -594,14 +593,14 @@ ComplexExprEmitter::buildBinOps(const BinaryOperator *E, QualType PromotionTy) { return Ops; } -mlir::Value ComplexExprEmitter::buildPromoted(const Expr *E, - QualType PromotionTy) { +mlir::Value ComplexExprEmitter::emitPromoted(const Expr *E, + QualType PromotionTy) { E = E->IgnoreParens(); if (const auto *BO = dyn_cast(E)) { switch (BO->getOpcode()) { #define HANDLE_BINOP(OP) \ case BO_##OP: \ - return buildBin##OP(buildBinOps(BO, PromotionTy)); + return emitBin##OP(emitBinOps(BO, PromotionTy)); HANDLE_BINOP(Add) HANDLE_BINOP(Sub) HANDLE_BINOP(Mul) @@ -622,16 +621,16 @@ mlir::Value ComplexExprEmitter::buildPromoted(const Expr *E, } auto result = Visit(const_cast(E)); if (!PromotionTy.isNull()) - return CGF.buildPromotedValue(result, PromotionTy); + return CGF.emitPromotedValue(result, PromotionTy); return result; } mlir::Value -ComplexExprEmitter::buildPromotedComplexOperand(const Expr *E, - QualType PromotionTy) { +ComplexExprEmitter::emitPromotedComplexOperand(const Expr *E, + QualType PromotionTy) { if (E->getType()->isAnyComplexType()) { if (!PromotionTy.isNull()) - return CGF.buildPromotedComplexExpr(E, PromotionTy); + return CGF.emitPromotedComplexExpr(E, PromotionTy); return Visit(const_cast(E)); } @@ -639,15 +638,15 @@ ComplexExprEmitter::buildPromotedComplexOperand(const Expr *E, if (!PromotionTy.isNull()) { QualType ComplexElementTy = PromotionTy->castAs()->getElementType(); - Real = CGF.buildPromotedScalarExpr(E, ComplexElementTy); + Real = CGF.emitPromotedScalarExpr(E, ComplexElementTy); } else - Real = CGF.buildScalarExpr(E); + Real = CGF.emitScalarExpr(E); return createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), Real); } -LValue ComplexExprEmitter::buildCompoundAssignLValue( +LValue ComplexExprEmitter::emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &), RValue &Val) { QualType LHSTy = E->getLHS()->getType(); @@ -676,19 +675,19 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( if (!PromotionTypeRHS.isNull()) OpInfo.RHS = createComplexFromReal( CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), - CGF.buildPromotedScalarExpr(E->getRHS(), PromotionTypeRHS)); + CGF.emitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS)); else { assert(CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType())); OpInfo.RHS = createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), - CGF.buildScalarExpr(E->getRHS())); + CGF.emitScalarExpr(E->getRHS())); } } else { if (!PromotionTypeRHS.isNull()) { OpInfo.RHS = createComplexFromReal( CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), - CGF.buildPromotedComplexExpr(E->getRHS(), PromotionTypeRHS)); + CGF.emitPromotedComplexExpr(E->getRHS(), PromotionTypeRHS)); } else { assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType())); @@ -696,20 +695,20 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( } } - LValue LHS = CGF.buildLValue(E->getLHS()); + LValue LHS = CGF.emitLValue(E->getLHS()); // Load from the l-value and convert it. SourceLocation Loc = E->getExprLoc(); QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType()); if (LHSTy->isAnyComplexType()) { - mlir::Value LHSVal = buildLoadOfLValue(LHS, Loc); + mlir::Value LHSVal = emitLoadOfLValue(LHS, Loc); if (!PromotionTypeLHS.isNull()) OpInfo.LHS = - buildComplexToComplexCast(LHSVal, LHSTy, PromotionTypeLHS, Loc); + emitComplexToComplexCast(LHSVal, LHSTy, PromotionTypeLHS, Loc); else - OpInfo.LHS = buildComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); + OpInfo.LHS = emitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); } else { - mlir::Value LHSVal = CGF.buildLoadOfScalar(LHS, Loc); + mlir::Value LHSVal = CGF.emitLoadOfScalar(LHS, Loc); // For floating point real operands we can directly pass the scalar form // to the binary operator emission and potentially get more efficient code. if (LHSTy->isRealFloatingType()) { @@ -719,17 +718,17 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( cast(PromotionTypeLHS)->getElementType(); if (!CGF.getContext().hasSameUnqualifiedType(PromotedComplexElementTy, PromotionTypeLHS)) - LHSVal = CGF.buildScalarConversion(LHSVal, LHSTy, - PromotedComplexElementTy, Loc); + LHSVal = CGF.emitScalarConversion(LHSVal, LHSTy, + PromotedComplexElementTy, Loc); } else { if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy)) LHSVal = - CGF.buildScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc); + CGF.emitScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc); } OpInfo.LHS = createComplexFromReal(CGF.getBuilder(), CGF.getLoc(E->getExprLoc()), LHSVal); } else { - OpInfo.LHS = buildScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); + OpInfo.LHS = emitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc); } } @@ -739,25 +738,25 @@ LValue ComplexExprEmitter::buildCompoundAssignLValue( // Truncate the result and store it into the LHS lvalue. if (LHSTy->isAnyComplexType()) { mlir::Value ResVal = - buildComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc); - buildStoreOfComplex(CGF.getLoc(E->getExprLoc()), ResVal, LHS, - /*isInit*/ false); + emitComplexToComplexCast(Result, OpInfo.Ty, LHSTy, Loc); + emitStoreOfComplex(CGF.getLoc(E->getExprLoc()), ResVal, LHS, + /*isInit*/ false); Val = RValue::getComplex(ResVal); } else { mlir::Value ResVal = - CGF.buildComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc); - CGF.buildStoreOfScalar(ResVal, LHS, /*isInit*/ false); + CGF.emitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy, Loc); + CGF.emitStoreOfScalar(ResVal, LHS, /*isInit*/ false); Val = RValue::get(ResVal); } return LHS; } -mlir::Value ComplexExprEmitter::buildCompoundAssign( +mlir::Value ComplexExprEmitter::emitCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ComplexExprEmitter::*Func)(const BinOpInfo &)) { RValue Val; - LValue LV = buildCompoundAssignLValue(E, Func, Val); + LValue LV = emitCompoundAssignLValue(E, Func, Val); // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) @@ -767,15 +766,15 @@ mlir::Value ComplexExprEmitter::buildCompoundAssign( if (!LV.isVolatileQualified()) return Val.getComplexVal(); - return buildLoadOfLValue(LV, E->getExprLoc()); + return emitLoadOfLValue(LV, E->getExprLoc()); } -mlir::Value ComplexExprEmitter::buildBinAdd(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexAdd(Op.Loc, Op.LHS, Op.RHS); } -mlir::Value ComplexExprEmitter::buildBinSub(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexSub(Op.Loc, Op.LHS, Op.RHS); } @@ -796,22 +795,22 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) { } } -mlir::Value ComplexExprEmitter::buildBinMul(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexMul( Op.Loc, Op.LHS, Op.RHS, getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); } -mlir::Value ComplexExprEmitter::buildBinDiv(const BinOpInfo &Op) { +mlir::Value ComplexExprEmitter::emitBinDiv(const BinOpInfo &Op) { assert(!cir::MissingFeatures::CGFPOptionsRAII()); return CGF.getBuilder().createComplexDiv( Op.Loc, Op.LHS, Op.RHS, getComplexRangeAttr(Op.FPFeatures.getComplexRange()), FPHasBeenPromoted); } -LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, - mlir::Value &Val) { +LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *E, + mlir::Value &Val) { assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), E->getRHS()->getType()) && "Invalid assignment"); @@ -820,10 +819,10 @@ LValue ComplexExprEmitter::buildBinAssignLValue(const BinaryOperator *E, Val = Visit(E->getRHS()); // Compute the address to store into. - LValue LHS = CGF.buildLValue(E->getLHS()); + LValue LHS = CGF.emitLValue(E->getLHS()); // Store the result value into the LHS lvalue. - buildStoreOfComplex(CGF.getLoc(E->getExprLoc()), Val, LHS, /*isInit*/ false); + emitStoreOfComplex(CGF.getLoc(E->getExprLoc()), Val, LHS, /*isInit*/ false); return LHS; } @@ -855,8 +854,8 @@ ComplexExprEmitter::VisitImaginaryLiteral(const ImaginaryLiteral *IL) { mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { if (E->getNumInits() == 2) { - mlir::Value Real = CGF.buildScalarExpr(E->getInit(0)); - mlir::Value Imag = CGF.buildScalarExpr(E->getInit(1)); + mlir::Value Real = CGF.emitScalarExpr(E->getInit(0)); + mlir::Value Imag = CGF.emitScalarExpr(E->getInit(1)); return Builder.createComplexCreate(CGF.getLoc(E->getExprLoc()), Real, Imag); } @@ -869,13 +868,13 @@ mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { return Builder.getZero(CGF.getLoc(E->getExprLoc()), CGF.ConvertType(Ty)); } -mlir::Value CIRGenFunction::buildPromotedComplexExpr(const Expr *E, - QualType PromotionType) { - return ComplexExprEmitter(*this).buildPromoted(E, PromotionType); +mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *E, + QualType PromotionType) { + return ComplexExprEmitter(*this).emitPromoted(E, PromotionType); } -mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, - QualType PromotionType) { +mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result, + QualType PromotionType) { assert(mlir::isa( mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); @@ -883,8 +882,8 @@ mlir::Value CIRGenFunction::buildPromotedValue(mlir::Value result, ConvertType(PromotionType)); } -mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, - QualType UnPromotionType) { +mlir::Value CIRGenFunction::emitUnPromotedValue(mlir::Value result, + QualType UnPromotionType) { assert(mlir::isa( mlir::cast(result.getType()).getElementTy()) && "integral complex will never be promoted"); @@ -892,43 +891,43 @@ mlir::Value CIRGenFunction::buildUnPromotedValue(mlir::Value result, ConvertType(UnPromotionType)); } -mlir::Value CIRGenFunction::buildComplexExpr(const Expr *E) { +mlir::Value CIRGenFunction::emitComplexExpr(const Expr *E) { assert(E && getComplexType(E->getType()) && "Invalid complex expression to emit"); return ComplexExprEmitter(*this).Visit(const_cast(E)); } -void CIRGenFunction::buildComplexExprIntoLValue(const Expr *E, LValue dest, - bool isInit) { +void CIRGenFunction::emitComplexExprIntoLValue(const Expr *E, LValue dest, + bool isInit) { assert(E && getComplexType(E->getType()) && "Invalid complex expression to emit"); ComplexExprEmitter Emitter(*this); mlir::Value Val = Emitter.Visit(const_cast(E)); - Emitter.buildStoreOfComplex(getLoc(E->getExprLoc()), Val, dest, isInit); + Emitter.emitStoreOfComplex(getLoc(E->getExprLoc()), Val, dest, isInit); } -void CIRGenFunction::buildStoreOfComplex(mlir::Location Loc, mlir::Value V, - LValue dest, bool isInit) { - ComplexExprEmitter(*this).buildStoreOfComplex(Loc, V, dest, isInit); +void CIRGenFunction::emitStoreOfComplex(mlir::Location Loc, mlir::Value V, + LValue dest, bool isInit) { + ComplexExprEmitter(*this).emitStoreOfComplex(Loc, V, dest, isInit); } -Address CIRGenFunction::buildAddrOfRealComponent(mlir::Location loc, - Address addr, - QualType complexType) { +Address CIRGenFunction::emitAddrOfRealComponent(mlir::Location loc, + Address addr, + QualType complexType) { return builder.createRealPtr(loc, addr); } -Address CIRGenFunction::buildAddrOfImagComponent(mlir::Location loc, - Address addr, - QualType complexType) { +Address CIRGenFunction::emitAddrOfImagComponent(mlir::Location loc, + Address addr, + QualType complexType) { return builder.createImagPtr(loc, addr); } -LValue CIRGenFunction::buildComplexAssignmentLValue(const BinaryOperator *E) { +LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *E) { assert(E->getOpcode() == BO_Assign); mlir::Value Val; // ignored - LValue LVal = ComplexExprEmitter(*this).buildBinAssignLValue(E, Val); + LValue LVal = ComplexExprEmitter(*this).emitBinAssignLValue(E, Val); if (getLangOpts().OpenMP) llvm_unreachable("NYI"); return LVal; @@ -940,36 +939,36 @@ using CompoundFunc = static CompoundFunc getComplexOp(BinaryOperatorKind Op) { switch (Op) { case BO_MulAssign: - return &ComplexExprEmitter::buildBinMul; + return &ComplexExprEmitter::emitBinMul; case BO_DivAssign: - return &ComplexExprEmitter::buildBinDiv; + return &ComplexExprEmitter::emitBinDiv; case BO_SubAssign: - return &ComplexExprEmitter::buildBinSub; + return &ComplexExprEmitter::emitBinSub; case BO_AddAssign: - return &ComplexExprEmitter::buildBinAdd; + return &ComplexExprEmitter::emitBinAdd; default: llvm_unreachable("unexpected complex compound assignment"); } } -LValue CIRGenFunction::buildComplexCompoundAssignmentLValue( +LValue CIRGenFunction::emitComplexCompoundAssignmentLValue( const CompoundAssignOperator *E) { CompoundFunc Op = getComplexOp(E->getOpcode()); RValue Val; - return ComplexExprEmitter(*this).buildCompoundAssignLValue(E, Op, Val); + return ComplexExprEmitter(*this).emitCompoundAssignLValue(E, Op, Val); } -mlir::Value CIRGenFunction::buildComplexPrePostIncDec(const UnaryOperator *E, - LValue LV, bool isInc, - bool isPre) { - mlir::Value InVal = buildLoadOfComplex(LV, E->getExprLoc()); +mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { + mlir::Value InVal = emitLoadOfComplex(LV, E->getExprLoc()); auto Loc = getLoc(E->getExprLoc()); auto OpKind = isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; mlir::Value IncVal = builder.createUnaryOp(Loc, OpKind, InVal); // Store the updated result through the lvalue. - buildStoreOfComplex(Loc, IncVal, LV, /*init*/ false); + emitStoreOfComplex(Loc, IncVal, LV, /*init*/ false); if (getLangOpts().OpenMP) llvm_unreachable("NYI"); @@ -978,6 +977,6 @@ mlir::Value CIRGenFunction::buildComplexPrePostIncDec(const UnaryOperator *E, return isPre ? IncVal : InVal; } -mlir::Value CIRGenFunction::buildLoadOfComplex(LValue src, SourceLocation loc) { - return ComplexExprEmitter(*this).buildLoadOfLValue(src, loc); +mlir::Value CIRGenFunction::emitLoadOfComplex(LValue src, SourceLocation loc) { + return ComplexExprEmitter(*this).emitLoadOfLValue(src, loc); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp index d4d031158d90..ae42f2ff411a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprConst.cpp @@ -44,10 +44,10 @@ namespace { class ConstExprEmitter; static mlir::Attribute -buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, - mlir::Type CommonElementType, unsigned ArrayBound, - SmallVectorImpl &Elements, - mlir::TypedAttr Filler); +emitArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler); struct ConstantAggregateBuilderUtils { CIRGenModule &CGM; @@ -905,7 +905,7 @@ class ConstExprEmitter mlir::Attribute VisitCastExpr(CastExpr *E, QualType destType) { if (const auto *ECE = dyn_cast(E)) - CGM.buildExplicitCastExprType(ECE, Emitter.CGF); + CGM.emitExplicitCastExprType(ECE, Emitter.CGF); Expr *subExpr = E->getSubExpr(); switch (E->getCastKind()) { @@ -1057,8 +1057,8 @@ class ConstExprEmitter auto typedFiller = llvm::dyn_cast_or_null(Filler); if (Filler && !typedFiller) llvm_unreachable("We shouldn't be receiving untyped attrs here"); - return buildArrayConstant(CGM, desiredType, CommonElementType, NumElements, - Elts, typedFiller); + return emitArrayConstant(CGM, desiredType, CommonElementType, NumElements, + Elts, typedFiller); } mlir::Attribute EmitRecordInitialization(InitListExpr *ILE, QualType T) { @@ -1163,10 +1163,10 @@ class ConstExprEmitter }; static mlir::Attribute -buildArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, - mlir::Type CommonElementType, unsigned ArrayBound, - SmallVectorImpl &Elements, - mlir::TypedAttr Filler) { +emitArrayConstant(CIRGenModule &CGM, mlir::Type DesiredType, + mlir::Type CommonElementType, unsigned ArrayBound, + SmallVectorImpl &Elements, + mlir::TypedAttr Filler) { auto &builder = CGM.getBuilder(); // Figure out how long the initial prefix of non-zero elements is. @@ -1847,8 +1847,8 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, if (Filler && !typedFiller) llvm_unreachable("this should always be typed"); - return buildArrayConstant(CGM, Desired, CommonElementType, NumElements, - Elts, typedFiller); + return emitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts, + typedFiller); } case APValue::Vector: { const QualType ElementType = @@ -1896,7 +1896,7 @@ mlir::Attribute ConstantEmitter::tryEmitPrivate(const APValue &Value, llvm_unreachable("Unknown APValue kind"); } -mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { +mlir::Value CIRGenModule::emitNullConstant(QualType T, mlir::Location loc) { if (T->getAs()) { return builder.getNullPtr(getTypes().convertTypeForMem(T), loc); } @@ -1919,7 +1919,7 @@ mlir::Value CIRGenModule::buildNullConstant(QualType T, mlir::Location loc) { return {}; } -mlir::Value CIRGenModule::buildMemberPointerConstant(const UnaryOperator *E) { +mlir::Value CIRGenModule::emitMemberPointerConstant(const UnaryOperator *E) { assert(!cir::MissingFeatures::cxxABI()); auto loc = getLoc(E->getSourceRange()); @@ -1975,14 +1975,14 @@ mlir::Attribute ConstantEmitter::emitNullForMemory(mlir::Location loc, CIRGenModule &CGM, QualType T) { auto cstOp = - dyn_cast(CGM.buildNullConstant(T, loc).getDefiningOp()); + dyn_cast(CGM.emitNullConstant(T, loc).getDefiningOp()); assert(cstOp && "expected cir.const op"); return emitForMemory(CGM, cstOp.getValue(), T); } -static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, - const RecordDecl *record, - bool asCompleteObject) { +static mlir::TypedAttr emitNullConstant(CIRGenModule &CGM, + const RecordDecl *record, + bool asCompleteObject) { const CIRGenRecordLayout &layout = CGM.getTypes().getCIRGenRecordLayout(record); mlir::Type ty = (asCompleteObject ? layout.getCIRType() @@ -2045,6 +2045,6 @@ static mlir::TypedAttr buildNullConstant(CIRGenModule &CGM, } mlir::TypedAttr -CIRGenModule::buildNullConstantForBase(const CXXRecordDecl *Record) { - return ::buildNullConstant(*this, Record, false); +CIRGenModule::emitNullConstantForBase(const CXXRecordDecl *Record) { + return ::emitNullConstant(*this, Record, false); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 6763bbccd089..6c4441ba0a1c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -108,26 +108,26 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Type ConvertType(QualType T) { return CGF.ConvertType(T); } - LValue buildLValue(const Expr *E) { return CGF.buildLValue(E); } - LValue buildCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { - return CGF.buildCheckedLValue(E, TCK); + LValue emitLValue(const Expr *E) { return CGF.emitLValue(E); } + LValue emitCheckedLValue(const Expr *E, CIRGenFunction::TypeCheckKind TCK) { + return CGF.emitCheckedLValue(E, TCK); } - mlir::Value buildComplexToScalarConversion(mlir::Location Loc, mlir::Value V, - CastKind Kind, QualType DestTy); + mlir::Value emitComplexToScalarConversion(mlir::Location Loc, mlir::Value V, + CastKind Kind, QualType DestTy); /// Emit a value that corresponds to null for the given type. - mlir::Value buildNullValue(QualType Ty, mlir::Location loc); + mlir::Value emitNullValue(QualType Ty, mlir::Location loc); - mlir::Value buildPromotedValue(mlir::Value result, QualType PromotionType) { + mlir::Value emitPromotedValue(mlir::Value result, QualType PromotionType) { return Builder.createFloatingCast(result, ConvertType(PromotionType)); } - mlir::Value buildUnPromotedValue(mlir::Value result, QualType ExprType) { + mlir::Value emitUnPromotedValue(mlir::Value result, QualType ExprType) { return Builder.createFloatingCast(result, ConvertType(ExprType)); } - mlir::Value buildPromoted(const Expr *E, QualType PromotionType); + mlir::Value emitPromoted(const Expr *E, QualType PromotionType); //===--------------------------------------------------------------------===// // Visitor Methods @@ -161,10 +161,10 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitCoawaitExpr(CoawaitExpr *S) { - return CGF.buildCoawaitExpr(*S).getScalarVal(); + return CGF.emitCoawaitExpr(*S).getScalarVal(); } mlir::Value VisitCoyieldExpr(CoyieldExpr *S) { - return CGF.buildCoyieldExpr(*S).getScalarVal(); + return CGF.emitCoyieldExpr(*S).getScalarVal(); } mlir::Value VisitUnaryCoawait(const UnaryOperator *E) { llvm_unreachable("NYI"); @@ -208,7 +208,7 @@ class ScalarExprEmitter : public StmtVisitor { if (E->getType()->isVoidType()) return nullptr; - return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + return emitNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitGNUNullExpr(const GNUNullExpr *E) { llvm_unreachable("NYI"); @@ -246,22 +246,22 @@ class ScalarExprEmitter : public StmtVisitor { } /// Emits the address of the l-value, then loads and returns the result. - mlir::Value buildLoadOfLValue(const Expr *E) { - LValue LV = CGF.buildLValue(E); + mlir::Value emitLoadOfLValue(const Expr *E) { + LValue LV = CGF.emitLValue(E); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); + return CGF.emitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); } - mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { - return CGF.buildLoadOfLValue(LV, Loc).getScalarVal(); + mlir::Value emitLoadOfLValue(LValue LV, SourceLocation Loc) { + return CGF.emitLoadOfLValue(LV, Loc).getScalarVal(); } // l-values mlir::Value VisitDeclRefExpr(DeclRefExpr *E) { if (CIRGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { - return CGF.buildScalarConstant(Constant, E); + return CGF.emitScalarConstant(Constant, E); } - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *E) { @@ -295,7 +295,7 @@ class ScalarExprEmitter : public StmtVisitor { } // Just load the lvalue formed by the subscript expression. - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { @@ -330,18 +330,16 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *E) { // __builtin_convertvector is an element-wise cast, and is implemented as a // regular cast. The back end handles casts of vectors correctly. - return buildScalarConversion(Visit(E->getSrcExpr()), - E->getSrcExpr()->getType(), E->getType(), - E->getSourceRange().getBegin()); + return emitScalarConversion(Visit(E->getSrcExpr()), + E->getSrcExpr()->getType(), E->getType(), + E->getSourceRange().getBegin()); } - mlir::Value VisitExtVectorElementExpr(Expr *E) { - return buildLoadOfLValue(E); - } + mlir::Value VisitExtVectorElementExpr(Expr *E) { return emitLoadOfLValue(E); } mlir::Value VisitMemberExpr(MemberExpr *E); mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitInitListExpr(InitListExpr *E); @@ -351,7 +349,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { - return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + return emitNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *E) { return VisitCastExpr(E); @@ -362,7 +360,7 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitStmtExpr(StmtExpr *E) { assert(!cir::MissingFeatures::stmtExprEvaluation() && "NYI"); Address retAlloca = - CGF.buildCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); + CGF.emitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); if (!retAlloca.isValid()) return {}; @@ -373,29 +371,29 @@ class ScalarExprEmitter : public StmtVisitor { CGF.getBuilder().hoistAllocaToParentRegion( cast(retAlloca.getDefiningOp())); - return CGF.buildLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), - E->getExprLoc()); + return CGF.emitLoadOfScalar(CGF.makeAddrLValue(retAlloca, E->getType()), + E->getExprLoc()); } // Unary Operators. mlir::Value VisitUnaryPostDec(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, false, false); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, false, false); } mlir::Value VisitUnaryPostInc(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, true, false); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, true, false); } mlir::Value VisitUnaryPreDec(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, false, true); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, false, true); } mlir::Value VisitUnaryPreInc(const UnaryOperator *E) { - LValue LV = buildLValue(E->getSubExpr()); - return buildScalarPrePostIncDec(E, LV, true, true); + LValue LV = emitLValue(E->getSubExpr()); + return emitScalarPrePostIncDec(E, LV, true, true); } - mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, - bool isInc, bool isPre) { + mlir::Value emitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre) { assert(!CGF.getLangOpts().OpenMP && "Not implemented"); QualType type = E->getSubExpr()->getType(); @@ -407,7 +405,7 @@ class ScalarExprEmitter : public StmtVisitor { if (const AtomicType *atomicTy = type->getAs()) { llvm_unreachable("no atomics inc/dec yet"); } else { - value = buildLoadOfLValue(LV, E->getExprLoc()); + value = emitLoadOfLValue(LV, E->getExprLoc()); input = value; } @@ -460,7 +458,7 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable( "perform lossy demotion case for inc/dec not implemented yet"); } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { - value = buildIncDecConsiderOverflowBehavior(E, value, isInc); + value = emitIncDecConsiderOverflowBehavior(E, value, isInc); } else if (E->canOverflow() && type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { llvm_unreachable( @@ -469,7 +467,7 @@ class ScalarExprEmitter : public StmtVisitor { auto Kind = E->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; // NOTE(CIR): clang calls CreateAdd but folds this to a unary op - value = buildUnaryOp(E, Kind, input); + value = emitUnaryOp(E, Kind, input); } // Next most common: pointer increment. } else if (const PointerType *ptr = type->getAs()) { @@ -517,7 +515,7 @@ class ScalarExprEmitter : public StmtVisitor { // Create the inc/dec operation. // NOTE(CIR): clang calls CreateAdd but folds this to a unary op auto kind = (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec); - value = buildUnaryOp(E, kind, value); + value = emitUnaryOp(E, kind, value); } else { // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or // __float128. Convert from float. @@ -570,34 +568,34 @@ class ScalarExprEmitter : public StmtVisitor { // Store the updated result through the lvalue if (LV.isBitField()) - CGF.buildStoreThroughBitfieldLValue(RValue::get(value), LV, value); + CGF.emitStoreThroughBitfieldLValue(RValue::get(value), LV, value); else - CGF.buildStoreThroughLValue(RValue::get(value), LV); + CGF.emitStoreThroughLValue(RValue::get(value), LV); // If this is a postinc, return the value read from memory, otherwise use // the updated value. return isPre ? value : input; } - mlir::Value buildIncDecConsiderOverflowBehavior(const UnaryOperator *E, - mlir::Value InVal, - bool IsInc) { + mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *E, + mlir::Value InVal, + bool IsInc) { // NOTE(CIR): The SignedOverflowBehavior is attached to the global ModuleOp // and the nsw behavior is handled during lowering. auto Kind = E->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: - return buildUnaryOp(E, Kind, InVal); + return emitUnaryOp(E, Kind, InVal); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) - return buildUnaryOp(E, Kind, InVal); + return emitUnaryOp(E, Kind, InVal); llvm_unreachable( "inc/dec overflow behavior SOB_Undefined not implemented yet"); break; case LangOptions::SOB_Trapping: if (!E->canOverflow()) - return buildUnaryOp(E, Kind, InVal); + return emitUnaryOp(E, Kind, InVal); llvm_unreachable( "inc/dec overflow behavior SOB_Trapping not implemented yet"); break; @@ -606,15 +604,15 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitUnaryAddrOf(const UnaryOperator *E) { if (llvm::isa(E->getType())) - return CGF.CGM.buildMemberPointerConstant(E); + return CGF.CGM.emitMemberPointerConstant(E); - return CGF.buildLValue(E->getSubExpr()).getPointer(); + return CGF.emitLValue(E->getSubExpr()).getPointer(); } mlir::Value VisitUnaryDeref(const UnaryOperator *E) { if (E->getType()->isVoidType()) return Visit(E->getSubExpr()); // the actual value should be unused - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitUnaryPlus(const UnaryOperator *E, QualType PromotionType = QualType()) { @@ -623,7 +621,7 @@ class ScalarExprEmitter : public StmtVisitor { : PromotionType; auto result = VisitPlus(E, promotionTy); if (result && !promotionTy.isNull()) - return buildUnPromotedValue(result, E->getType()); + return emitUnPromotedValue(result, E->getType()); return result; } @@ -634,11 +632,11 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value operand; if (!PromotionType.isNull()) - operand = CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + operand = CGF.emitPromotedScalarExpr(E->getSubExpr(), PromotionType); else operand = Visit(E->getSubExpr()); - return buildUnaryOp(E, cir::UnaryOpKind::Plus, operand); + return emitUnaryOp(E, cir::UnaryOpKind::Plus, operand); } mlir::Value VisitUnaryMinus(const UnaryOperator *E, @@ -648,7 +646,7 @@ class ScalarExprEmitter : public StmtVisitor { : PromotionType; auto result = VisitMinus(E, promotionTy); if (result && !promotionTy.isNull()) - return buildUnPromotedValue(result, E->getType()); + return emitUnPromotedValue(result, E->getType()); return result; } @@ -657,19 +655,19 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value operand; if (!PromotionType.isNull()) - operand = CGF.buildPromotedScalarExpr(E->getSubExpr(), PromotionType); + operand = CGF.emitPromotedScalarExpr(E->getSubExpr(), PromotionType); else operand = Visit(E->getSubExpr()); // NOTE: LLVM codegen will lower this directly to either a FNeg // or a Sub instruction. In CIR this will be handled later in LowerToLLVM. - return buildUnaryOp(E, cir::UnaryOpKind::Minus, operand); + return emitUnaryOp(E, cir::UnaryOpKind::Minus, operand); } mlir::Value VisitUnaryNot(const UnaryOperator *E) { TestAndClearIgnoreResultAssign(); mlir::Value op = Visit(E->getSubExpr()); - return buildUnaryOp(E, cir::UnaryOpKind::Not, op); + return emitUnaryOp(E, cir::UnaryOpKind::Not, op); } mlir::Value VisitUnaryLNot(const UnaryOperator *E); @@ -685,8 +683,8 @@ class ScalarExprEmitter : public StmtVisitor { return Visit(E->getSubExpr()); } - mlir::Value buildUnaryOp(const UnaryOperator *E, cir::UnaryOpKind kind, - mlir::Value input) { + mlir::Value emitUnaryOp(const UnaryOperator *E, cir::UnaryOpKind kind, + mlir::Value input) { return Builder.create( CGF.getLoc(E->getSourceRange().getBegin()), input.getType(), kind, input); @@ -710,10 +708,10 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitExprWithCleanups(ExprWithCleanups *E); mlir::Value VisitCXXNewExpr(const CXXNewExpr *E) { - return CGF.buildCXXNewExpr(E); + return CGF.emitCXXNewExpr(E); } mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *E) { - CGF.buildCXXDeleteExpr(E); + CGF.emitCXXDeleteExpr(E); return {}; } mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *E) { @@ -736,10 +734,10 @@ class ScalarExprEmitter : public StmtVisitor { llvm_unreachable("NYI"); } mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { - return buildNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); + return emitNullValue(E->getType(), CGF.getLoc(E->getSourceRange())); } mlir::Value VisitCXXThrowExpr(CXXThrowExpr *E) { - CGF.buildCXXThrowExpr(E); + CGF.emitCXXThrowExpr(E); return nullptr; } mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { @@ -747,7 +745,7 @@ class ScalarExprEmitter : public StmtVisitor { } /// Perform a pointer to boolean conversion. - mlir::Value buildPointerToBoolConversion(mlir::Value V, QualType QT) { + mlir::Value emitPointerToBoolConversion(mlir::Value V, QualType QT) { // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM. // We might want to have a separate pass for these types of conversions. return CGF.getBuilder().createPtrToBoolCast(V); @@ -755,7 +753,7 @@ class ScalarExprEmitter : public StmtVisitor { // Comparisons. #define VISITCOMP(CODE) \ - mlir::Value VisitBin##CODE(const BinaryOperator *E) { return buildCmp(E); } + mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); } VISITCOMP(LT) VISITCOMP(GT) VISITCOMP(LE) @@ -768,17 +766,17 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitBinLAnd(const BinaryOperator *B); mlir::Value VisitBinLOr(const BinaryOperator *B); mlir::Value VisitBinComma(const BinaryOperator *E) { - CGF.buildIgnoredExpr(E->getLHS()); + CGF.emitIgnoredExpr(E->getLHS()); // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen. return Visit(E->getRHS()); } mlir::Value VisitBinPtrMemD(const BinaryOperator *E) { - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitBinPtrMemI(const BinaryOperator *E) { - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { @@ -803,7 +801,7 @@ class ScalarExprEmitter : public StmtVisitor { } mlir::Value VisitAsTypeExpr(AsTypeExpr *E) { llvm_unreachable("NYI"); } mlir::Value VisitAtomicExpr(AtomicExpr *E) { - return CGF.buildAtomicExpr(E).getScalarVal(); + return CGF.emitAtomicExpr(E).getScalarVal(); } // Emit a conversion from the specified type to the specified destination @@ -825,15 +823,15 @@ class ScalarExprEmitter : public StmtVisitor { EmitImplicitIntegerSignChangeChecks( SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} }; - mlir::Value buildScalarCast(mlir::Value Src, QualType SrcType, - QualType DstType, mlir::Type SrcTy, - mlir::Type DstTy, ScalarConversionOpts Opts); + mlir::Value emitScalarCast(mlir::Value Src, QualType SrcType, + QualType DstType, mlir::Type SrcTy, + mlir::Type DstTy, ScalarConversionOpts Opts); - BinOpInfo buildBinOps(const BinaryOperator *E, - QualType PromotionType = QualType()) { + BinOpInfo emitBinOps(const BinaryOperator *E, + QualType PromotionType = QualType()) { BinOpInfo Result; - Result.LHS = CGF.buildPromotedScalarExpr(E->getLHS(), PromotionType); - Result.RHS = CGF.buildPromotedScalarExpr(E->getRHS(), PromotionType); + Result.LHS = CGF.emitPromotedScalarExpr(E->getLHS(), PromotionType); + Result.RHS = CGF.emitPromotedScalarExpr(E->getRHS(), PromotionType); if (!PromotionType.isNull()) Result.FullType = PromotionType; else @@ -850,24 +848,24 @@ class ScalarExprEmitter : public StmtVisitor { return Result; } - mlir::Value buildMul(const BinOpInfo &Ops); - mlir::Value buildDiv(const BinOpInfo &Ops); - mlir::Value buildRem(const BinOpInfo &Ops); - mlir::Value buildAdd(const BinOpInfo &Ops); - mlir::Value buildSub(const BinOpInfo &Ops); - mlir::Value buildShl(const BinOpInfo &Ops); - mlir::Value buildShr(const BinOpInfo &Ops); - mlir::Value buildAnd(const BinOpInfo &Ops); - mlir::Value buildXor(const BinOpInfo &Ops); - mlir::Value buildOr(const BinOpInfo &Ops); + mlir::Value emitMul(const BinOpInfo &Ops); + mlir::Value emitDiv(const BinOpInfo &Ops); + mlir::Value emitRem(const BinOpInfo &Ops); + mlir::Value emitAdd(const BinOpInfo &Ops); + mlir::Value emitSub(const BinOpInfo &Ops); + mlir::Value emitShl(const BinOpInfo &Ops); + mlir::Value emitShr(const BinOpInfo &Ops); + mlir::Value emitAnd(const BinOpInfo &Ops); + mlir::Value emitXor(const BinOpInfo &Ops); + mlir::Value emitOr(const BinOpInfo &Ops); - LValue buildCompoundAssignLValue( + LValue emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &), mlir::Value &Result); mlir::Value - buildCompoundAssign(const CompoundAssignOperator *E, - mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); + emitCompoundAssign(const CompoundAssignOperator *E, + mlir::Value (ScalarExprEmitter::*F)(const BinOpInfo &)); // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM // codegen. @@ -887,13 +885,13 @@ class ScalarExprEmitter : public StmtVisitor { #define HANDLEBINOP(OP) \ mlir::Value VisitBin##OP(const BinaryOperator *E) { \ QualType promotionTy = getPromotionType(E->getType()); \ - auto result = build##OP(buildBinOps(E, promotionTy)); \ + auto result = emit##OP(emitBinOps(E, promotionTy)); \ if (result && !promotionTy.isNull()) \ - result = buildUnPromotedValue(result, E->getType()); \ + result = emitUnPromotedValue(result, E->getType()); \ return result; \ } \ mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ - return buildCompoundAssign(E, &ScalarExprEmitter::build##OP); \ + return emitCompoundAssign(E, &ScalarExprEmitter::emit##OP); \ } HANDLEBINOP(Mul) @@ -908,7 +906,7 @@ class ScalarExprEmitter : public StmtVisitor { HANDLEBINOP(Or) #undef HANDLEBINOP - mlir::Value buildCmp(const BinaryOperator *E) { + mlir::Value emitCmp(const BinaryOperator *E) { mlir::Value Result; QualType LHSTy = E->getLHS()->getType(); QualType RHSTy = E->getRHS()->getType(); @@ -936,7 +934,7 @@ class ScalarExprEmitter : public StmtVisitor { if (const MemberPointerType *MPT = LHSTy->getAs()) { assert(0 && "not implemented"); } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { - BinOpInfo BOInfo = buildBinOps(E); + BinOpInfo BOInfo = emitBinOps(E); mlir::Value LHS = BOInfo.LHS; mlir::Value RHS = BOInfo.RHS; @@ -976,17 +974,17 @@ class ScalarExprEmitter : public StmtVisitor { assert(0 && "not implemented"); } - return buildScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), - E->getExprLoc()); + return emitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), + E->getExprLoc()); } - mlir::Value buildFloatToBoolConversion(mlir::Value src, mlir::Location loc) { + mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) { auto boolTy = Builder.getBoolTy(); return Builder.create(loc, boolTy, cir::CastKind::float_to_bool, src); } - mlir::Value buildIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { + mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) { // Because of the type rules of C, we often end up computing a // logical value, then zero extending it to int, then wanting it // as a logical value again. @@ -999,21 +997,21 @@ class ScalarExprEmitter : public StmtVisitor { /// Convert the specified expression value to a boolean (!cir.bool) truth /// value. This is equivalent to "Val != 0". - mlir::Value buildConversionToBool(mlir::Value Src, QualType SrcType, - mlir::Location loc) { + mlir::Value emitConversionToBool(mlir::Value Src, QualType SrcType, + mlir::Location loc) { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); if (SrcType->isRealFloatingType()) - return buildFloatToBoolConversion(Src, loc); + return emitFloatToBoolConversion(Src, loc); if (auto *MPT = llvm::dyn_cast(SrcType)) assert(0 && "not implemented"); if (SrcType->isIntegerType()) - return buildIntToBoolConversion(Src, loc); + return emitIntToBoolConversion(Src, loc); assert(::mlir::isa(Src.getType())); - return buildPointerToBoolConversion(Src, SrcType); + return emitPointerToBoolConversion(Src, SrcType); } /// Emit a conversion from the specified type to the specified destination @@ -1021,11 +1019,11 @@ class ScalarExprEmitter : public StmtVisitor { /// TODO: do we need ScalarConversionOpts here? Should be done in another /// pass. mlir::Value - buildScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, - SourceLocation Loc, - ScalarConversionOpts Opts = ScalarConversionOpts()) { + emitScalarConversion(mlir::Value Src, QualType SrcType, QualType DstType, + SourceLocation Loc, + ScalarConversionOpts Opts = ScalarConversionOpts()) { // All conversions involving fixed point types should be handled by the - // buildFixedPoint family functions. This is done to prevent bloating up + // emitFixedPoint family functions. This is done to prevent bloating up // this function more, and although fixed point numbers are represented by // integers, we do not want to follow any logic that assumes they should be // treated as integers. @@ -1050,7 +1048,7 @@ class ScalarExprEmitter : public StmtVisitor { // Handle conversions to bool first, they are special: comparisons against // 0. if (DstType->isBooleanType()) - return buildConversionToBool(Src, SrcType, CGF.getLoc(Loc)); + return emitConversionToBool(Src, SrcType, CGF.getLoc(Loc)); mlir::Type DstTy = ConvertType(DstType); @@ -1141,7 +1139,7 @@ class ScalarExprEmitter : public StmtVisitor { DstTy = CGF.FloatTy; } - Res = buildScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); + Res = emitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); if (DstTy != ResTy) { if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { @@ -1166,17 +1164,17 @@ class ScalarExprEmitter : public StmtVisitor { /// Emit the computation of the specified expression of scalar type, /// ignoring the result. -mlir::Value CIRGenFunction::buildScalarExpr(const Expr *E) { +mlir::Value CIRGenFunction::emitScalarExpr(const Expr *E) { assert(E && hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } -mlir::Value CIRGenFunction::buildPromotedScalarExpr(const Expr *E, - QualType PromotionType) { +mlir::Value CIRGenFunction::emitPromotedScalarExpr(const Expr *E, + QualType PromotionType) { if (!PromotionType.isNull()) - return ScalarExprEmitter(*this, builder).buildPromoted(E, PromotionType); + return ScalarExprEmitter(*this, builder).emitPromoted(E, PromotionType); return ScalarExprEmitter(*this, builder).Visit(const_cast(E)); } @@ -1251,9 +1249,9 @@ static std::optional getUnwidenedIntegerType(const ASTContext &Ctx, } /// Emit pointer + index arithmetic. -static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, - const BinOpInfo &op, - bool isSubtraction) { +static mlir::Value emitPointerArithmetic(CIRGenFunction &CGF, + const BinOpInfo &op, + bool isSubtraction) { // Must have binary (not unary) expr here. Unary pointer // increment/decrement doesn't use this path. const BinaryOperator *expr = cast(op.E); @@ -1327,8 +1325,8 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, pointer = CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); } else { - pointer = CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, - isSubtraction, op.E->getExprLoc()); + pointer = CGF.emitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); } return pointer; } @@ -1345,11 +1343,11 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, return CGF.getBuilder().create( CGF.getLoc(op.E->getExprLoc()), pointer.getType(), pointer, index); - return CGF.buildCheckedInBoundsGEP(elemTy, pointer, index, isSigned, - isSubtraction, op.E->getExprLoc()); + return CGF.emitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, + isSubtraction, op.E->getExprLoc()); } -mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &Ops) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: @@ -1386,21 +1384,21 @@ mlir::Value ScalarExprEmitter::buildMul(const BinOpInfo &Ops) { CGF.getCIRType(Ops.FullType), cir::BinOpKind::Mul, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildDiv(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Div, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildRem(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Rem, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &Ops) { if (mlir::isa(Ops.LHS.getType()) || mlir::isa(Ops.RHS.getType())) - return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); + return emitPointerArithmetic(CGF, Ops, /*isSubtraction=*/false); if (Ops.CompType->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: @@ -1440,7 +1438,7 @@ mlir::Value ScalarExprEmitter::buildAdd(const BinOpInfo &Ops) { cir::BinOpKind::Add, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &Ops) { // The LHS is always a pointer if either side is. if (!mlir::isa(Ops.LHS.getType())) { if (Ops.CompType->isSignedIntegerOrEnumerationType()) { @@ -1486,7 +1484,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { // If the RHS is not a pointer, then we have normal pointer // arithmetic. if (!mlir::isa(Ops.RHS.getType())) - return buildPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); + return emitPointerArithmetic(CGF, Ops, /*isSubtraction=*/true); // Otherwise, this is a pointer subtraction @@ -1501,7 +1499,7 @@ mlir::Value ScalarExprEmitter::buildSub(const BinOpInfo &Ops) { Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &Ops) { // TODO: This misses out on the sanitizer check below. if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); @@ -1533,7 +1531,7 @@ mlir::Value ScalarExprEmitter::buildShl(const BinOpInfo &Ops) { Ops.RHS, CGF.getBuilder().getUnitAttr()); } -mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &Ops) { // TODO: This misses out on the sanitizer check below. if (Ops.isFixedPointOp()) llvm_unreachable("NYI"); @@ -1556,17 +1554,17 @@ mlir::Value ScalarExprEmitter::buildShr(const BinOpInfo &Ops) { CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildAnd(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::And, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildXor(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Xor, Ops.LHS, Ops.RHS); } -mlir::Value ScalarExprEmitter::buildOr(const BinOpInfo &Ops) { +mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &Ops) { return Builder.create(CGF.getLoc(Ops.Loc), CGF.getCIRType(Ops.FullType), cir::BinOpKind::Or, Ops.LHS, Ops.RHS); @@ -1598,7 +1596,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_LValueBitCast: case CK_ObjCObjectLValueCast: case CK_LValueToRValueBitCast: { - LValue SourceLVal = CGF.buildLValue(E); + LValue SourceLVal = CGF.emitLValue(E); Address SourceAddr = SourceLVal.getAddress(); mlir::Type DestElemTy = CGF.convertTypeForMem(DestTy); @@ -1613,7 +1611,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (Kind == CK_LValueToRValueBitCast) assert(!cir::MissingFeatures::tbaa()); - return buildLoadOfLValue(DestLVal, CE->getExprLoc()); + return emitLoadOfLValue(DestLVal, CE->getExprLoc()); } case CK_CPointerToObjCPointerCast: @@ -1665,7 +1663,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // eliminate the useless instructions emitted during translating E. if (Result.HasSideEffects) Visit(E); - return CGF.CGM.buildNullConstant(DestTy, CGF.getLoc(E->getExprLoc())); + return CGF.CGM.emitNullConstant(DestTy, CGF.getLoc(E->getExprLoc())); } // Since target may map different address spaces in AST to the same address // space, an address space conversion may end up as a bitcast. @@ -1696,7 +1694,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_BaseToDerived: { const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); - Address Base = CGF.buildPointerWithAlignment(E); + Address Base = CGF.emitPointerWithAlignment(E); Address Derived = CGF.getAddressOfDerivedClass( Base, DerivedClassDecl, CE->path_begin(), CE->path_end(), CGF.shouldNullCheckClassCastValue(CE)); @@ -1714,17 +1712,17 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_DerivedToBase: { // The EmitPointerWithAlignment path does this fine; just discard // the alignment. - return CGF.buildPointerWithAlignment(CE).getPointer(); + return CGF.emitPointerWithAlignment(CE).getPointer(); } case CK_Dynamic: { - Address V = CGF.buildPointerWithAlignment(E); + Address V = CGF.emitPointerWithAlignment(E); const auto *DCE = cast(CE); - return CGF.buildDynamicCast(V, DCE); + return CGF.emitDynamicCast(V, DCE); } case CK_ArrayToPointerDecay: - return CGF.buildArrayToPointerDecay(E).getPointer(); + return CGF.emitArrayToPointerDecay(E).getPointer(); case CK_FunctionToPointerDecay: - return buildLValue(E).getPointer(); + return emitLValue(E).getPointer(); case CK_NullToPointer: { // FIXME: use MustVisitNullValue(E) and evaluate expr. @@ -1736,7 +1734,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { case CK_NullToMemberPointer: { if (MustVisitNullValue(E)) - CGF.buildIgnoredExpr(E); + CGF.emitIgnoredExpr(E); assert(!cir::MissingFeatures::cxxABI()); @@ -1810,7 +1808,7 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { return Builder.createPtrToInt(Visit(E), ConvertType(DestTy)); } case CK_ToVoid: { - CGF.buildIgnoredExpr(E); + CGF.emitIgnoredExpr(E); return nullptr; } case CK_MatrixCast: @@ -1836,8 +1834,8 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (!ICE->isPartOfExplicitCast()) Opts = ScalarConversionOpts(CGF.SanOpts); } - return buildScalarConversion(Visit(E), E->getType(), DestTy, - CE->getExprLoc(), Opts); + return emitScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc(), Opts); } case CK_IntegralToFloating: @@ -1848,29 +1846,29 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { if (Kind == CK_FixedPointToFloating || Kind == CK_FloatingToFixedPoint) llvm_unreachable("Fixed point casts are NYI."); CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(CGF, CE); - return buildScalarConversion(Visit(E), E->getType(), DestTy, - CE->getExprLoc()); + return emitScalarConversion(Visit(E), E->getType(), DestTy, + CE->getExprLoc()); } case CK_BooleanToSignedIntegral: llvm_unreachable("NYI"); case CK_IntegralToBoolean: { - return buildIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); + return emitIntToBoolConversion(Visit(E), CGF.getLoc(CE->getSourceRange())); } case CK_PointerToBoolean: - return buildPointerToBoolConversion(Visit(E), E->getType()); + return emitPointerToBoolConversion(Visit(E), E->getType()); case CK_FloatingToBoolean: - return buildFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); + return emitFloatToBoolConversion(Visit(E), CGF.getLoc(E->getExprLoc())); case CK_MemberPointerToBoolean: llvm_unreachable("NYI"); case CK_FloatingComplexToReal: case CK_IntegralComplexToReal: case CK_FloatingComplexToBoolean: case CK_IntegralComplexToBoolean: { - mlir::Value V = CGF.buildComplexExpr(E); - return buildComplexToScalarConversion(CGF.getLoc(CE->getExprLoc()), V, Kind, - DestTy); + mlir::Value V = CGF.emitComplexExpr(E); + return emitComplexToScalarConversion(CGF.getLoc(CE->getExprLoc()), V, Kind, + DestTy); } case CK_ZeroToOCLOpaqueType: llvm_unreachable("NYI"); @@ -1888,10 +1886,10 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType(CGF.getContext())->isReferenceType()) - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); - auto V = CGF.buildCallExpr(E).getScalarVal(); - assert(!cir::MissingFeatures::buildLValueAlignmentAssumption()); + auto V = CGF.emitCallExpr(E).getScalarVal(); + assert(!cir::MissingFeatures::emitLValueAlignmentAssumption()); return V; } @@ -1902,29 +1900,28 @@ mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { Expr::EvalResult Result; if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { llvm::APSInt Value = Result.Val.getInt(); - CGF.buildIgnoredExpr(E->getBase()); + CGF.emitIgnoredExpr(E->getBase()); return Builder.getConstInt(CGF.getLoc(E->getExprLoc()), Value); } - return buildLoadOfLValue(E); + return emitLoadOfLValue(E); } /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. -mlir::Value CIRGenFunction::buildScalarConversion(mlir::Value Src, - QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { +mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value Src, + QualType SrcTy, QualType DstTy, + SourceLocation Loc) { assert(CIRGenFunction::hasScalarEvaluationKind(SrcTy) && CIRGenFunction::hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this, builder) - .buildScalarConversion(Src, SrcTy, DstTy, Loc); + .emitScalarConversion(Src, SrcTy, DstTy, Loc); } -mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, - QualType SrcTy, - QualType DstTy, - SourceLocation Loc) { +mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value Src, + QualType SrcTy, + QualType DstTy, + SourceLocation Loc) { assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && "Invalid complex -> scalar conversion"); @@ -1941,7 +1938,7 @@ mlir::Value CIRGenFunction::buildComplexToScalarConversion(mlir::Value Src, : cir::CastKind::int_complex_to_real; auto Real = builder.createCast(getLoc(Loc), Kind, Src, ConvertType(ComplexElemTy)); - return buildScalarConversion(Real, ComplexElemTy, DstTy, Loc); + return emitScalarConversion(Real, ComplexElemTy, DstTy, Loc); } /// If the specified expression does not fold @@ -1992,7 +1989,7 @@ mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { if (NumInitElements == 0) { // C++11 value-initialization for the scalar. - return buildNullValue(E->getType(), CGF.getLoc(E->getExprLoc())); + return emitNullValue(E->getType(), CGF.getLoc(E->getExprLoc())); } return Visit(E->getInit(0)); @@ -2031,7 +2028,7 @@ mlir::Value ScalarExprEmitter::VisitReal(const UnaryOperator *E) { // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (E->isGLValue()) - return CGF.buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()) + return CGF.emitLoadOfLValue(CGF.emitLValue(E), E->getExprLoc()) .getScalarVal(); // Otherwise, calculate and project. llvm_unreachable("NYI"); @@ -2049,7 +2046,7 @@ mlir::Value ScalarExprEmitter::VisitImag(const UnaryOperator *E) { // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (E->isGLValue()) - return CGF.buildLoadOfLValue(CGF.buildLValue(E), E->getExprLoc()) + return CGF.emitLoadOfLValue(CGF.emitLValue(E), E->getExprLoc()) .getScalarVal(); // Otherwise, calculate and project. llvm_unreachable("NYI"); @@ -2062,9 +2059,11 @@ mlir::Value ScalarExprEmitter::VisitImag(const UnaryOperator *E) { // floating-point. Conversions involving other types are handled elsewhere. // Conversion to bool is handled elsewhere because that's a comparison against // zero, not a simple cast. This handles both individual scalars and vectors. -mlir::Value ScalarExprEmitter::buildScalarCast( - mlir::Value Src, QualType SrcType, QualType DstType, mlir::Type SrcTy, - mlir::Type DstTy, ScalarConversionOpts Opts) { +mlir::Value ScalarExprEmitter::emitScalarCast(mlir::Value Src, QualType SrcType, + QualType DstType, + mlir::Type SrcTy, + mlir::Type DstTy, + ScalarConversionOpts Opts) { assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && "Internal error: matrix types not handled by this function."); if (mlir::isa(SrcTy) || @@ -2079,7 +2078,7 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } assert(!mlir::isa(SrcTy) && !mlir::isa(DstTy) && - "buildScalarCast given a vector type and a non-vector type"); + "emitScalarCast given a vector type and a non-vector type"); std::optional CastKind; @@ -2126,14 +2125,14 @@ mlir::Value ScalarExprEmitter::buildScalarCast( } LValue -CIRGenFunction::buildCompoundAssignmentLValue(const CompoundAssignOperator *E) { +CIRGenFunction::emitCompoundAssignmentLValue(const CompoundAssignOperator *E) { ScalarExprEmitter Scalar(*this, builder); mlir::Value Result; switch (E->getOpcode()) { #define COMPOUND_OP(Op) \ case BO_##Op##Assign: \ - return Scalar.buildCompoundAssignLValue(E, &ScalarExprEmitter::build##Op, \ - Result) + return Scalar.emitCompoundAssignLValue(E, &ScalarExprEmitter::emit##Op, \ + Result) COMPOUND_OP(Mul); COMPOUND_OP(Div); COMPOUND_OP(Rem); @@ -2174,7 +2173,7 @@ CIRGenFunction::buildCompoundAssignmentLValue(const CompoundAssignOperator *E) { llvm_unreachable("Unhandled compound assignment operator"); } -LValue ScalarExprEmitter::buildCompoundAssignLValue( +LValue ScalarExprEmitter::emitCompoundAssignLValue( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &), mlir::Value &Result) { @@ -2195,7 +2194,7 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType()); if (!PromotionTypeRHS.isNull()) - OpInfo.RHS = CGF.buildPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); + OpInfo.RHS = CGF.emitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); else OpInfo.RHS = Visit(E->getRHS()); @@ -2210,40 +2209,40 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( OpInfo.Loc = E->getSourceRange(); // Load/convert the LHS - LValue LHSLV = CGF.buildLValue(E->getLHS()); + LValue LHSLV = CGF.emitLValue(E->getLHS()); if (const AtomicType *atomicTy = LHSTy->getAs()) { assert(0 && "not implemented"); } - OpInfo.LHS = buildLoadOfLValue(LHSLV, E->getExprLoc()); + OpInfo.LHS = emitLoadOfLValue(LHSLV, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject sourceloc{ CGF, CGF.getLoc(E->getSourceRange())}; SourceLocation Loc = E->getExprLoc(); if (!PromotionTypeLHS.isNull()) - OpInfo.LHS = buildScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, - E->getExprLoc()); + OpInfo.LHS = emitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, + E->getExprLoc()); else - OpInfo.LHS = buildScalarConversion(OpInfo.LHS, LHSTy, - E->getComputationLHSType(), Loc); + OpInfo.LHS = emitScalarConversion(OpInfo.LHS, LHSTy, + E->getComputationLHSType(), Loc); // Expand the binary operator. Result = (this->*Func)(OpInfo); // Convert the result back to the LHS type, // potentially with Implicit Conversion sanitizer check. - Result = buildScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, - ScalarConversionOpts(CGF.SanOpts)); + Result = emitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, + ScalarConversionOpts(CGF.SanOpts)); // Store the result value into the LHS lvalue. Bit-fields are handled // specially because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHSLV.isBitField()) - CGF.buildStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, Result); + CGF.emitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, Result); else - CGF.buildStoreThroughLValue(RValue::get(Result), LHSLV); + CGF.emitStoreThroughLValue(RValue::get(Result), LHSLV); if (CGF.getLangOpts().OpenMP) CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, @@ -2251,8 +2250,10 @@ LValue ScalarExprEmitter::buildCompoundAssignLValue( return LHSLV; } -mlir::Value ScalarExprEmitter::buildComplexToScalarConversion( - mlir::Location Loc, mlir::Value V, CastKind Kind, QualType DestTy) { +mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location Loc, + mlir::Value V, + CastKind Kind, + QualType DestTy) { cir::CastKind CastOpKind; switch (Kind) { case CK_FloatingComplexToReal: @@ -2274,18 +2275,18 @@ mlir::Value ScalarExprEmitter::buildComplexToScalarConversion( return Builder.createCast(Loc, CastOpKind, V, CGF.ConvertType(DestTy)); } -mlir::Value ScalarExprEmitter::buildNullValue(QualType Ty, mlir::Location loc) { - return CGF.buildFromMemory(CGF.CGM.buildNullConstant(Ty, loc), Ty); +mlir::Value ScalarExprEmitter::emitNullValue(QualType Ty, mlir::Location loc) { + return CGF.emitFromMemory(CGF.CGM.emitNullConstant(Ty, loc), Ty); } -mlir::Value ScalarExprEmitter::buildPromoted(const Expr *E, - QualType PromotionType) { +mlir::Value ScalarExprEmitter::emitPromoted(const Expr *E, + QualType PromotionType) { E = E->IgnoreParens(); if (const auto *BO = dyn_cast(E)) { switch (BO->getOpcode()) { #define HANDLE_BINOP(OP) \ case BO_##OP: \ - return build##OP(buildBinOps(BO, PromotionType)); + return emit##OP(emitBinOps(BO, PromotionType)); HANDLE_BINOP(Add) HANDLE_BINOP(Sub) HANDLE_BINOP(Mul) @@ -2310,19 +2311,19 @@ mlir::Value ScalarExprEmitter::buildPromoted(const Expr *E, auto result = Visit(const_cast(E)); if (result) { if (!PromotionType.isNull()) - return buildPromotedValue(result, PromotionType); - return buildUnPromotedValue(result, E->getType()); + return emitPromotedValue(result, PromotionType); + return emitUnPromotedValue(result, E->getType()); } return result; } -mlir::Value ScalarExprEmitter::buildCompoundAssign( +mlir::Value ScalarExprEmitter::emitCompoundAssign( const CompoundAssignOperator *E, mlir::Value (ScalarExprEmitter::*Func)(const BinOpInfo &)) { bool Ignore = TestAndClearIgnoreResultAssign(); mlir::Value RHS; - LValue LHS = buildCompoundAssignLValue(E, Func, RHS); + LValue LHS = emitCompoundAssignLValue(E, Func, RHS); // If the result is clearly ignored, return now. if (Ignore) @@ -2337,7 +2338,7 @@ mlir::Value ScalarExprEmitter::buildCompoundAssign( return RHS; // Otherwise, reload the value. - return buildLoadOfLValue(LHS, E->getExprLoc()); + return emitLoadOfLValue(LHS, E->getExprLoc()); } mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { @@ -2381,19 +2382,19 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // __block variables need to have the rhs evaluated first, plus this should // improve codegen just a little. RHS = Visit(E->getRHS()); - LHS = buildCheckedLValue(E->getLHS(), CIRGenFunction::TCK_Store); + LHS = emitCheckedLValue(E->getLHS(), CIRGenFunction::TCK_Store); // Store the value into the LHS. Bit-fields are handled specially because // the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); + CGF.emitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); } else { - CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); + CGF.emitNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, CGF.getLoc(E->getSourceRange())}; - CGF.buildStoreThroughLValue(RValue::get(RHS), LHS); + CGF.emitStoreThroughLValue(RValue::get(RHS), LHS); } } @@ -2410,7 +2411,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { return RHS; // Otherwise, reload the value. - return buildLoadOfLValue(LHS, E->getExprLoc()); + return emitLoadOfLValue(LHS, E->getExprLoc()); } /// Return true if the specified expression is cheap enough and side-effect-free @@ -2517,7 +2518,7 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( .getResult(); } - mlir::Value condV = CGF.buildOpOnBoolExpr(loc, condExpr); + mlir::Value condV = CGF.emitOpOnBoolExpr(loc, condExpr); CIRGenFunction::ConditionalEvaluation eval(CGF); SmallVector insertPoints{}; mlir::Type yieldTy{}; @@ -2591,11 +2592,11 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( .getResult(); } -mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, - LValue LV, bool isInc, - bool isPre) { +mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *E, + LValue LV, bool isInc, + bool isPre) { return ScalarExprEmitter(*this, builder) - .buildScalarPrePostIncDec(E, LV, isInc, isPre); + .emitScalarPrePostIncDec(E, LV, isInc, isPre); } mlir::Value ScalarExprEmitter::VisitBinLAnd(const clang::BinaryOperator *E) { @@ -2772,7 +2773,7 @@ mlir::Value ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { assert(!cir::MissingFeatures::variablyModifiedTypeEmission() && "NYI"); Address ArgValue = Address::invalid(); - mlir::Value Val = CGF.buildVAArg(VE, ArgValue); + mlir::Value Val = CGF.emitVAArg(VE, ArgValue); return Val; } @@ -2788,11 +2789,11 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( if (E->isArgumentType()) { // sizeof(type) - make sure to emit the VLA size. - CGF.buildVariablyModifiedType(TypeToSize); + CGF.emitVariablyModifiedType(TypeToSize); } else { // C99 6.5.3.4p2: If the argument is an expression of type // VLA, it is evaluated. - CGF.buildIgnoredExpr(E->getArgumentExpr()); + CGF.emitIgnoredExpr(E->getArgumentExpr()); } auto VlaSize = CGF.getVLASize(VAT); @@ -2815,7 +2816,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( E->EvaluateKnownConstInt(CGF.getContext())); } -mlir::Value CIRGenFunction::buildCheckedInBoundsGEP( +mlir::Value CIRGenFunction::emitCheckedInBoundsGEP( mlir::Type ElemTy, mlir::Value Ptr, ArrayRef IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc) { mlir::Type PtrTy = Ptr.getType(); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 3523ca861e47..b31a4ba325ae 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -219,12 +219,11 @@ bool CIRGenFunction::sanitizePerformTypeCheck() const { SanOpts.has(SanitizerKind::Vptr); } -void CIRGenFunction::buildTypeCheck(TypeCheckKind TCK, - clang::SourceLocation Loc, mlir::Value V, - clang::QualType Type, - clang::CharUnits Alignment, - clang::SanitizerSet SkippedChecks, - std::optional ArraySize) { +void CIRGenFunction::emitTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, + mlir::Value V, clang::QualType Type, + clang::CharUnits Alignment, + clang::SanitizerSet SkippedChecks, + std::optional ArraySize) { if (!sanitizePerformTypeCheck()) return; @@ -271,8 +270,8 @@ static bool endsWithReturn(const Decl *F) { return false; } -void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, - CharUnits alignment) { +void CIRGenFunction::emitAndUpdateRetAlloca(QualType ty, mlir::Location loc, + CharUnits alignment) { if (ty->isVoidType()) { // Void type; nothing to return. @@ -290,7 +289,7 @@ void CIRGenFunction::buildAndUpdateRetAlloca(QualType ty, mlir::Location loc, cir::ABIArgInfo::InAlloca) { llvm_unreachable("NYI"); } else { - auto addr = buildAlloca("__retval", ty, loc, alignment); + auto addr = emitAlloca("__retval", ty, loc, alignment); FnRetAlloca = addr; ReturnValue = Address(addr, alignment); @@ -310,7 +309,7 @@ mlir::LogicalResult CIRGenFunction::declare(const Decl *var, QualType ty, assert(namedVar && "Needs a named decl"); assert(!symbolTable.count(var) && "not supposed to be available just yet"); - addr = buildAlloca(namedVar->getName(), ty, loc, alignment); + addr = emitAlloca(namedVar->getName(), ty, loc, alignment); auto allocaOp = cast(addr.getDefiningOp()); if (isParam) allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext())); @@ -364,7 +363,7 @@ void CIRGenFunction::LexicalScope::cleanup() { builder.setInsertionPointToEnd(retBlock); mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; curLoc++; - (void)buildReturn(retLoc); + (void)emitReturn(retLoc); } auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { @@ -385,7 +384,7 @@ void CIRGenFunction::LexicalScope::cleanup() { } if (localScope->Depth == 0) { - buildImplicitReturn(); + emitImplicitReturn(); return; } @@ -442,15 +441,15 @@ void CIRGenFunction::LexicalScope::cleanup() { insertCleanupAndLeave(currBlock); } -cir::ReturnOp CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { +cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) { auto &builder = CGF.getBuilder(); // If we are on a coroutine, add the coro_end builtin call. auto Fn = dyn_cast(CGF.CurFn); assert(Fn && "other callables NYI"); if (Fn.getCoroutine()) - CGF.buildCoroEndBuiltinCall( - loc, builder.getNullPtr(builder.getVoidPtrTy(), loc)); + CGF.emitCoroEndBuiltinCall(loc, + builder.getNullPtr(builder.getVoidPtrTy(), loc)); if (CGF.FnRetCIRTy.has_value()) { // If there's anything to return, load it first. @@ -460,7 +459,7 @@ cir::ReturnOp CIRGenFunction::LexicalScope::buildReturn(mlir::Location loc) { return builder.create(loc); } -void CIRGenFunction::LexicalScope::buildImplicitReturn() { +void CIRGenFunction::LexicalScope::emitImplicitReturn() { auto &builder = CGF.getBuilder(); auto *localScope = CGF.currLexScope; @@ -497,7 +496,7 @@ void CIRGenFunction::LexicalScope::buildImplicitReturn() { } } - (void)buildReturn(localScope->EndLoc); + (void)emitReturn(localScope->EndLoc); } cir::TryOp CIRGenFunction::LexicalScope::getClosestTryParent() { @@ -711,9 +710,9 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, // TODO: PGO.assignRegionCounters assert(!cir::MissingFeatures::shouldInstrumentFunction()); if (isa(FD)) - buildDestructorBody(Args); + emitDestructorBody(Args); else if (isa(FD)) - buildConstructorBody(Args); + emitConstructorBody(Args); else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice && FD->hasAttr()) llvm_unreachable("NYI"); @@ -722,15 +721,15 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, // The lambda static invoker function is special, because it forwards or // clones the body of the function call operator (but is actually // static). - buildLambdaStaticInvokeBody(cast(FD)); + emitLambdaStaticInvokeBody(cast(FD)); } else if (FD->isDefaulted() && isa(FD) && (cast(FD)->isCopyAssignmentOperator() || cast(FD)->isMoveAssignmentOperator())) { // Implicit copy-assignment gets the same special treatment as implicit // copy-constructors. - buildImplicitAssignmentOperatorBody(Args); + emitImplicitAssignmentOperatorBody(Args); } else if (Body) { - if (mlir::failed(buildFunctionBody(Body))) { + if (mlir::failed(emitFunctionBody(Body))) { Fn.erase(); return nullptr; } @@ -759,7 +758,7 @@ mlir::Value CIRGenFunction::createLoad(const VarDecl *VD, const char *Name) { addr.getElementType(), addr.getPointer()); } -void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { +void CIRGenFunction::emitConstructorBody(FunctionArgList &Args) { assert(!cir::MissingFeatures::emitAsanPrologueOrEpilogue()); const auto *Ctor = cast(CurGD.getDecl()); auto CtorType = CurGD.getCtorType(); @@ -772,7 +771,7 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { // optimization. if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && CGM.getTarget().getCXXABI().hasConstructorVariants()) { - buildDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc()); + emitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getEndLoc()); return; } @@ -794,7 +793,7 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { // complete ctor and then delegate to the base ctor. // Emit the constructor prologue, i.e. the base and member initializers. - buildCtorPrologue(Ctor, CtorType, Args); + emitCtorPrologue(Ctor, CtorType, Args); // Emit the body of the statement. if (IsTryBody) @@ -802,7 +801,7 @@ void CIRGenFunction::buildConstructorBody(FunctionArgList &Args) { else { // TODO: propagate this result via mlir::logical result. Just unreachable // now just to have it handled. - if (mlir::failed(buildStmt(Body, true))) + if (mlir::failed(emitStmt(Body, true))) llvm_unreachable("NYI"); } @@ -886,7 +885,7 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { return true; } -/// TODO: this should live in `buildFunctionProlog` +/// TODO: this should live in `emitFunctionProlog` /// An argument came in as a promoted argument; demote it back to its /// declared type. static mlir::Value emitArgumentDemotion(CIRGenFunction &CGF, const VarDecl *var, @@ -1119,7 +1118,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) && getLangOpts().CUDAIsDevice))) { // Add metadata for a kernel function. - buildKernelMetadata(FD, Fn); + emitKernelMetadata(FD, Fn); } if (FD && FD->hasAttr()) { @@ -1134,7 +1133,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // If we're checking nullability, we need to know whether we can check the // return value. Initialize the falg to 'true' and refine it in - // buildParmDecl. + // emitParmDecl. if (SanOpts.has(SanitizerKind::NullabilityReturn)) { llvm_unreachable("NYI"); } @@ -1220,7 +1219,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, llvm_unreachable("NYI"); } - // TODO: buildFunctionProlog + // TODO: emitFunctionProlog { // Set the insertion point in the builder to the beginning of the @@ -1228,7 +1227,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // operations in this function. builder.setInsertionPointToStart(EntryBB); - // TODO: this should live in `buildFunctionProlog + // TODO: this should live in `emitFunctionProlog // Declare all the function arguments in the symbol table. for (const auto nameValue : llvm::zip(Args, EntryBB->getArguments())) { auto *paramVar = std::get<0>(nameValue); @@ -1245,7 +1244,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, auto address = Address(addr, alignment); setAddrOfLocalVar(paramVar, address); - // TODO: this should live in `buildFunctionProlog` + // TODO: this should live in `emitFunctionProlog` bool isPromoted = isa(paramVar) && cast(paramVar)->isKNRPromoted(); assert(!cir::MissingFeatures::constructABIArgDirectExtend()); @@ -1264,12 +1263,12 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // When the current function is not void, create an address to store the // result value. if (FnRetCIRTy.has_value()) - buildAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, - CGM.getNaturalTypeAlignment(FnRetQualTy)); + emitAndUpdateRetAlloca(FnRetQualTy, FnEndLoc, + CGM.getNaturalTypeAlignment(FnRetQualTy)); } if (D && isa(D) && cast(D)->isInstance()) { - CGM.getCXXABI().buildInstanceFunctionProlog(Loc, *this); + CGM.getCXXABI().emitInstanceFunctionProlog(Loc, *this); const auto *MD = cast(D); if (MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call) { @@ -1311,7 +1310,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, MD->getParent()->getLambdaCaptureDefault() == LCD_None) SkippedChecks.set(SanitizerKind::Null, true); - assert(!cir::MissingFeatures::buildTypeCheck() && "NYI"); + assert(!cir::MissingFeatures::emitTypeCheck() && "NYI"); } } @@ -1330,7 +1329,7 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, ty = vd->getType(); if (ty->isVariablyModifiedType()) - buildVariablyModifiedType(ty); + emitVariablyModifiedType(ty); } } // Emit a location at the end of the prologue. @@ -1358,7 +1357,7 @@ bool CIRGenFunction::ShouldInstrumentFunction() { llvm_unreachable("NYI"); } -mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { +mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *Body) { // TODO: incrementProfileCounter(Body); // We start with function level scope for variables. @@ -1366,9 +1365,9 @@ mlir::LogicalResult CIRGenFunction::buildFunctionBody(const clang::Stmt *Body) { auto result = mlir::LogicalResult::success(); if (const CompoundStmt *S = dyn_cast(Body)) - buildCompoundStmtWithoutScope(*S); + emitCompoundStmtWithoutScope(*S); else - result = buildStmt(Body, /*useCurrentScope*/ true); + result = emitStmt(Body, /*useCurrentScope*/ true); // This is checked after emitting the function body so we know if there are // any permitted infinite loops. @@ -1435,8 +1434,8 @@ std::string CIRGenFunction::getCounterRefTmpAsString() { return getVersionedTmpName("ref.tmp", CounterRefTmp++); } -void CIRGenFunction::buildNullInitialization(mlir::Location loc, - Address DestPtr, QualType Ty) { +void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address DestPtr, + QualType Ty) { // Ignore empty classes in C++. if (getLangOpts().CPlusPlus) { if (const RecordType *RT = Ty->getAs()) { @@ -1558,15 +1557,15 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *CE) { return true; } -void CIRGenFunction::buildDeclRefExprDbgValue(const DeclRefExpr *E, - const APValue &Init) { +void CIRGenFunction::emitDeclRefExprDbgValue(const DeclRefExpr *E, + const APValue &Init) { assert(!cir::MissingFeatures::generateDebugInfo()); } -Address CIRGenFunction::buildVAListRef(const Expr *E) { +Address CIRGenFunction::emitVAListRef(const Expr *E) { if (getContext().getBuiltinVaListType()->isArrayType()) - return buildPointerWithAlignment(E); - return buildLValue(E).getAddress(); + return emitPointerWithAlignment(E); + return emitLValue(E).getAddress(); } // Emits an error if we don't have a valid set of target features for the @@ -1683,7 +1682,7 @@ CIRGenFunction::getVLASize(const VariableArrayType *type) { // TODO(cir): most part of this function can be shared between CIRGen // and traditional LLVM codegen -void CIRGenFunction::buildVariablyModifiedType(QualType type) { +void CIRGenFunction::emitVariablyModifiedType(QualType type) { assert(type->isVariablyModifiedType() && "Must pass variably modified type to EmitVLASizes!"); @@ -1771,7 +1770,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { // e.g. with a typedef and a pointer to it. mlir::Value &entry = VLASizeMap[sizeExpr]; if (!entry) { - mlir::Value size = buildScalarExpr(sizeExpr); + mlir::Value size = emitScalarExpr(sizeExpr); assert(!cir::MissingFeatures::sanitizeVLABound()); // Always zexting here would be wrong if it weren't @@ -1809,7 +1808,7 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { case Type::TypeOfExpr: // Stop walking: emit typeof expression. - buildIgnoredExpr(cast(ty)->getUnderlyingExpr()); + emitIgnoredExpr(cast(ty)->getUnderlyingExpr()); return; case Type::Atomic: @@ -1826,8 +1825,8 @@ void CIRGenFunction::buildVariablyModifiedType(QualType type) { /// Computes the length of an array in elements, as well as the base /// element type and a properly-typed first element pointer. mlir::Value -CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, - QualType &baseType, Address &addr) { +CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType, + QualType &baseType, Address &addr) { const auto *arrayType = origArrayType; // If it's a VLA, we have to load the stored size. Note that @@ -1873,7 +1872,7 @@ CIRGenFunction::buildArrayLength(const clang::ArrayType *origArrayType, return numElements; } -mlir::Value CIRGenFunction::buildAlignmentAssumption( +mlir::Value CIRGenFunction::emitAlignmentAssumption( mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, mlir::IntegerAttr alignment, mlir::Value offsetValue) { @@ -1883,20 +1882,20 @@ mlir::Value CIRGenFunction::buildAlignmentAssumption( alignment, offsetValue); } -mlir::Value CIRGenFunction::buildAlignmentAssumption( +mlir::Value CIRGenFunction::emitAlignmentAssumption( mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc, mlir::IntegerAttr alignment, mlir::Value offsetValue) { QualType ty = expr->getType(); SourceLocation loc = expr->getExprLoc(); - return buildAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment, - offsetValue); + return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment, + offsetValue); } -void CIRGenFunction::buildVarAnnotations(const VarDecl *decl, mlir::Value val) { +void CIRGenFunction::emitVarAnnotations(const VarDecl *decl, mlir::Value val) { assert(decl->hasAttr() && "no annotate attribute"); llvm::SmallVector annotations; for (const auto *annot : decl->specific_attrs()) { - annotations.push_back(CGM.buildAnnotateAttr(annot)); + annotations.push_back(CGM.emitAnnotateAttr(annot)); } auto allocaOp = dyn_cast_or_null(val.getDefiningOp()); assert(allocaOp && "expects available alloca"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 0185b5370642..8d4fabeff642 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -105,7 +105,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Add OpenCL kernel arg metadata and the kernel attribute metadata to /// the function metadata. - void buildKernelMetadata(const FunctionDecl *FD, cir::FuncOp Fn); + void emitKernelMetadata(const FunctionDecl *FD, cir::FuncOp Fn); public: /// A non-RAII class containing all the information about a bound @@ -137,8 +137,8 @@ class CIRGenFunction : public CIRGenTypeCache { static OpaqueValueMappingData bind(CIRGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e) { if (shouldBindAsLValue(ov)) - return bind(CGF, ov, CGF.buildLValue(e)); - return bind(CGF, ov, CGF.buildAnyExpr(e)); + return bind(CGF, ov, CGF.emitLValue(e)); + return bind(CGF, ov, CGF.emitAnyExpr(e)); } static OpaqueValueMappingData @@ -252,22 +252,22 @@ class CIRGenFunction : public CIRGenTypeCache { public: // FIXME(cir): move this to CIRGenBuider.h - mlir::Value buildAlloca(llvm::StringRef name, clang::QualType ty, - mlir::Location loc, clang::CharUnits alignment, - bool insertIntoFnEntryBlock = false, - mlir::Value arraySize = nullptr); - mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, - mlir::Location loc, clang::CharUnits alignment, - bool insertIntoFnEntryBlock = false, - mlir::Value arraySize = nullptr); - mlir::Value buildAlloca(llvm::StringRef name, mlir::Type ty, - mlir::Location loc, clang::CharUnits alignment, - mlir::OpBuilder::InsertPoint ip, - mlir::Value arraySize = nullptr); + mlir::Value emitAlloca(llvm::StringRef name, clang::QualType ty, + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); + mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + bool insertIntoFnEntryBlock = false, + mlir::Value arraySize = nullptr); + mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, + mlir::Location loc, clang::CharUnits alignment, + mlir::OpBuilder::InsertPoint ip, + mlir::Value arraySize = nullptr); private: - void buildAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, - clang::CharUnits alignment); + void emitAndUpdateRetAlloca(clang::QualType ty, mlir::Location loc, + clang::CharUnits alignment); // Track current variable initialization (if there's one) const clang::VarDecl *currVarDecl = nullptr; @@ -438,12 +438,12 @@ class CIRGenFunction : public CIRGenTypeCache { LambdaCaptureFields; clang::FieldDecl *LambdaThisCaptureField = nullptr; - void buildForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, - CallArgList &CallArgs); - void buildLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); - void buildLambdaStaticInvokeBody(const CXXMethodDecl *MD); + void emitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, + CallArgList &CallArgs); + void emitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD); + void emitLambdaStaticInvokeBody(const CXXMethodDecl *MD); - LValue buildPredefinedLValue(const PredefinedExpr *E); + LValue emitPredefinedLValue(const PredefinedExpr *E); /// When generating code for a C++ member function, this will /// hold the implicit 'this' declaration. @@ -596,7 +596,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenDebugInfo *getDebugInfo() { return debugInfo; } - void buildReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); + void emitReturnOfRValue(mlir::Location loc, RValue RV, QualType Ty); /// Set the address of a local variable. void setAddrOfLocalVar(const clang::VarDecl *VD, Address Addr) { @@ -618,131 +618,130 @@ class CIRGenFunction : public CIRGenTypeCache { } /// Whether any type-checking sanitizers are enabled. If \c false, calls to - /// buildTypeCheck can be skipped. + /// emitTypeCheck can be skipped. bool sanitizePerformTypeCheck() const; - void buildTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, - mlir::Value V, clang::QualType Type, - clang::CharUnits Alignment = clang::CharUnits::Zero(), - clang::SanitizerSet SkippedChecks = clang::SanitizerSet(), - std::optional ArraySize = std::nullopt); + void emitTypeCheck(TypeCheckKind TCK, clang::SourceLocation Loc, + mlir::Value V, clang::QualType Type, + clang::CharUnits Alignment = clang::CharUnits::Zero(), + clang::SanitizerSet SkippedChecks = clang::SanitizerSet(), + std::optional ArraySize = std::nullopt); - void buildAggExpr(const clang::Expr *E, AggValueSlot Slot); + void emitAggExpr(const clang::Expr *E, AggValueSlot Slot); /// Emit the computation of the specified expression of complex type, /// returning the result. - mlir::Value buildComplexExpr(const Expr *E); + mlir::Value emitComplexExpr(const Expr *E); - void buildComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit); + void emitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit); - void buildStoreOfComplex(mlir::Location Loc, mlir::Value V, LValue dest, - bool isInit); + void emitStoreOfComplex(mlir::Location Loc, mlir::Value V, LValue dest, + bool isInit); - Address buildAddrOfRealComponent(mlir::Location loc, Address complex, - QualType complexType); - Address buildAddrOfImagComponent(mlir::Location loc, Address complex, - QualType complexType); + Address emitAddrOfRealComponent(mlir::Location loc, Address complex, + QualType complexType); + Address emitAddrOfImagComponent(mlir::Location loc, Address complex, + QualType complexType); - LValue buildComplexAssignmentLValue(const BinaryOperator *E); - LValue buildComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); + LValue emitComplexAssignmentLValue(const BinaryOperator *E); + LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); /// Emits a reference binding to the passed in expression. - RValue buildReferenceBindingToExpr(const Expr *E); + RValue emitReferenceBindingToExpr(const Expr *E); - LValue buildCastLValue(const CastExpr *E); + LValue emitCastLValue(const CastExpr *E); - void buildCXXConstructExpr(const clang::CXXConstructExpr *E, - AggValueSlot Dest); + void emitCXXConstructExpr(const clang::CXXConstructExpr *E, + AggValueSlot Dest); /// Emit a call to an inheriting constructor (that is, one that invokes a /// constructor inherited from a base class) by inlining its definition. This /// is necessary if the ABI does not support forwarding the arguments to the /// base class constructor (because they're variadic or similar). - void buildInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, - CXXCtorType CtorType, - bool ForVirtualBase, - bool Delegating, - CallArgList &Args); + void emitInlinedInheritingCXXConstructorCall(const CXXConstructorDecl *Ctor, + CXXCtorType CtorType, + bool ForVirtualBase, + bool Delegating, + CallArgList &Args); /// Emit a call to a constructor inherited from a base class, passing the /// current constructor's arguments along unmodified (without even making /// a copy). - void buildInheritedCXXConstructorCall(const CXXConstructorDecl *D, - bool ForVirtualBase, Address This, - bool InheritedFromVBase, - const CXXInheritedCtorInitExpr *E); - - void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, bool ForVirtualBase, - bool Delegating, AggValueSlot ThisAVS, - const clang::CXXConstructExpr *E); - - void buildCXXConstructorCall(const clang::CXXConstructorDecl *D, - clang::CXXCtorType Type, bool ForVirtualBase, - bool Delegating, Address This, CallArgList &Args, - AggValueSlot::Overlap_t Overlap, - clang::SourceLocation Loc, - bool NewPointerIsChecked); - - RValue buildCXXMemberOrOperatorCall( + void emitInheritedCXXConstructorCall(const CXXConstructorDecl *D, + bool ForVirtualBase, Address This, + bool InheritedFromVBase, + const CXXInheritedCtorInitExpr *E); + + void emitCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, AggValueSlot ThisAVS, + const clang::CXXConstructExpr *E); + + void emitCXXConstructorCall(const clang::CXXConstructorDecl *D, + clang::CXXCtorType Type, bool ForVirtualBase, + bool Delegating, Address This, CallArgList &Args, + AggValueSlot::Overlap_t Overlap, + clang::SourceLocation Loc, + bool NewPointerIsChecked); + + RValue emitCXXMemberOrOperatorCall( const clang::CXXMethodDecl *Method, const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, mlir::Value This, mlir::Value ImplicitParam, clang::QualType ImplicitParamTy, const clang::CallExpr *E, CallArgList *RtlArgs); - RValue buildCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, - ReturnValueSlot ReturnValue); - RValue buildCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, - ReturnValueSlot ReturnValue); - RValue buildCXXMemberOrOperatorMemberCallExpr( + RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); + RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, + ReturnValueSlot ReturnValue); + RValue emitCXXMemberOrOperatorMemberCallExpr( const clang::CallExpr *CE, const clang::CXXMethodDecl *MD, ReturnValueSlot ReturnValue, bool HasQualifier, clang::NestedNameSpecifier *Qualifier, bool IsArrow, const clang::Expr *Base); - RValue buildCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, - const CXXMethodDecl *MD, - ReturnValueSlot ReturnValue); - void buildNullInitialization(mlir::Location loc, Address DestPtr, - QualType Ty); + RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD, + ReturnValueSlot ReturnValue); + void emitNullInitialization(mlir::Location loc, Address DestPtr, QualType Ty); bool shouldNullCheckClassCastValue(const CastExpr *CE); - void buildCXXTemporary(const CXXTemporary *Temporary, QualType TempType, - Address Ptr); - mlir::Value buildCXXNewExpr(const CXXNewExpr *E); - void buildCXXDeleteExpr(const CXXDeleteExpr *E); + void emitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, + Address Ptr); + mlir::Value emitCXXNewExpr(const CXXNewExpr *E); + void emitCXXDeleteExpr(const CXXDeleteExpr *E); - void buildCXXAggrConstructorCall(const CXXConstructorDecl *D, - const clang::ArrayType *ArrayTy, - Address ArrayPtr, const CXXConstructExpr *E, - bool NewPointerIsChecked, - bool ZeroInitialization = false); + void emitCXXAggrConstructorCall(const CXXConstructorDecl *D, + const clang::ArrayType *ArrayTy, + Address ArrayPtr, const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool ZeroInitialization = false); - void buildCXXAggrConstructorCall(const CXXConstructorDecl *ctor, - mlir::Value numElements, Address arrayBase, - const CXXConstructExpr *E, - bool NewPointerIsChecked, - bool zeroInitialize); + void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, + mlir::Value numElements, Address arrayBase, + const CXXConstructExpr *E, + bool NewPointerIsChecked, + bool zeroInitialize); /// Compute the length of an array, even if it's a VLA, and drill down to the /// base element type. - mlir::Value buildArrayLength(const clang::ArrayType *arrayType, - QualType &baseType, Address &addr); + mlir::Value emitArrayLength(const clang::ArrayType *arrayType, + QualType &baseType, Address &addr); - void buildDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, - QualType DeleteTy, mlir::Value NumElements = nullptr, - CharUnits CookieSize = CharUnits()); + void emitDeleteCall(const FunctionDecl *DeleteFD, mlir::Value Ptr, + QualType DeleteTy, mlir::Value NumElements = nullptr, + CharUnits CookieSize = CharUnits()); - RValue buildBuiltinNewDeleteCall(const FunctionProtoType *type, - const CallExpr *theCallExpr, bool isDelete); + RValue emitBuiltinNewDeleteCall(const FunctionProtoType *type, + const CallExpr *theCallExpr, bool isDelete); - mlir::Value buildDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); + mlir::Value emitDynamicCast(Address ThisAddr, const CXXDynamicCastExpr *DCE); mlir::Value createLoad(const clang::VarDecl *VD, const char *Name); - mlir::Value buildScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + mlir::Value emitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre); + mlir::Value emitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); - mlir::Value buildComplexPrePostIncDec(const UnaryOperator *E, LValue LV, - bool isInc, bool isPre); // Wrapper for function prototype sources. Wraps either a FunctionProtoType or // an ObjCMethodDecl. @@ -794,16 +793,16 @@ class CIRGenFunction : public CIRGenTypeCache { // Build a "reference" to a va_list; this is either the address or the value // of the expression, depending on how va_list is defined. - Address buildVAListRef(const Expr *E); + Address emitVAListRef(const Expr *E); /// Emits a CIR variable-argument operation, either /// \c cir.va.start or \c cir.va.end. /// /// \param ArgValue A reference to the \c va_list as emitted by either - /// \c buildVAListRef or \c buildMSVAListRef. + /// \c emitVAListRef or \c emitMSVAListRef. /// /// \param IsStart If \c true, emits \c cir.va.start, otherwise \c cir.va.end. - void buildVAStartEnd(mlir::Value ArgValue, bool IsStart); + void emitVAStartEnd(mlir::Value ArgValue, bool IsStart); /// Generate code to get an argument from the passed in pointer /// and update it accordingly. @@ -811,12 +810,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param VE The \c VAArgExpr for which to generate code. /// /// \param VAListAddr Receives a reference to the \c va_list as emitted by - /// either \c buildVAListRef or \c buildMSVAListRef. + /// either \c emitVAListRef or \c emitMSVAListRef. /// /// \returns SSA value with the argument. - mlir::Value buildVAArg(VAArgExpr *VE, Address &VAListAddr); + mlir::Value emitVAArg(VAArgExpr *VE, Address &VAListAddr); - void buildVariablyModifiedType(QualType Ty); + void emitVariablyModifiedType(QualType Ty); struct VlaSizePair { mlir::Value NumElts; @@ -828,7 +827,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Returns an MLIR value that corresponds to the size, /// in non-variably-sized elements, of a variable length array type, /// plus that largest non-variably-sized element type. Assumes that - /// the type has already been emitted with buildVariablyModifiedType. + /// the type has already been emitted with emitVariablyModifiedType. VlaSizePair getVLASize(const VariableArrayType *vla); VlaSizePair getVLASize(QualType vla); @@ -843,61 +842,58 @@ class CIRGenFunction : public CIRGenTypeCache { /// Given an expression that represents a value lvalue, this method emits /// the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. - RValue buildLoadOfLValue(LValue LV, SourceLocation Loc); - mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, - clang::QualType ty, clang::SourceLocation loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal = false); - mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, - clang::QualType ty, mlir::Location loc, - LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, - bool isNontemporal = false); + RValue emitLoadOfLValue(LValue LV, SourceLocation Loc); + mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, clang::SourceLocation loc, + LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo, + bool isNontemporal = false); + mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, mlir::Location loc, + LValueBaseInfo baseInfo, TBAAAccessInfo tbaaInfo, + bool isNontemporal = false); int64_t getAccessedFieldNo(unsigned idx, const mlir::ArrayAttr elts); - RValue buildLoadOfExtVectorElementLValue(LValue LV); + RValue emitLoadOfExtVectorElementLValue(LValue LV); - void buildStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); + void emitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst); - RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + RValue emitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. - mlir::Value buildLoadOfScalar(Address addr, bool isVolatile, - clang::QualType ty, clang::SourceLocation loc, - AlignmentSource source = AlignmentSource::Type, - bool isNontemporal = false) { - return buildLoadOfScalar(addr, isVolatile, ty, loc, LValueBaseInfo(source), - CGM.getTBAAAccessInfo(ty), isNontemporal); + mlir::Value emitLoadOfScalar(Address addr, bool isVolatile, + clang::QualType ty, clang::SourceLocation loc, + AlignmentSource source = AlignmentSource::Type, + bool isNontemporal = false) { + return emitLoadOfScalar(addr, isVolatile, ty, loc, LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty), isNontemporal); } /// Load a scalar value from an address, taking care to appropriately convert /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. - mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); - mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); + mlir::Value emitLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value emitLoadOfScalar(LValue lvalue, mlir::Location Loc); /// Load a complex number from the specified l-value. - mlir::Value buildLoadOfComplex(LValue src, SourceLocation loc); + mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc); - Address buildLoadOfReference(LValue refLVal, mlir::Location loc, - LValueBaseInfo *pointeeBaseInfo = nullptr, - TBAAAccessInfo *pointeeTBAAInfo = nullptr); - LValue buildLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); + Address emitLoadOfReference(LValue refLVal, mlir::Location loc, + LValueBaseInfo *pointeeBaseInfo = nullptr, + TBAAAccessInfo *pointeeTBAAInfo = nullptr); + LValue emitLoadOfReferenceLValue(LValue RefLVal, mlir::Location Loc); LValue - buildLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, - QualType RefTy, - AlignmentSource Source = AlignmentSource::Type) { + emitLoadOfReferenceLValue(Address RefAddr, mlir::Location Loc, QualType RefTy, + AlignmentSource Source = AlignmentSource::Type) { LValue RefLVal = makeAddrLValue(RefAddr, RefTy, LValueBaseInfo(Source)); - return buildLoadOfReferenceLValue(RefLVal, Loc); + return emitLoadOfReferenceLValue(RefLVal, Loc); } - void buildImplicitAssignmentOperatorBody(FunctionArgList &Args); + void emitImplicitAssignmentOperatorBody(FunctionArgList &Args); - void buildAggregateStore(mlir::Value Val, Address Dest, bool DestIsVolatile); + void emitAggregateStore(mlir::Value Val, Address Dest, bool DestIsVolatile); - void buildCallArgs( + void emitCallArgs( CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range ArgRange, AbstractCallee AC = AbstractCallee(), unsigned ParamsToSkip = 0, @@ -906,39 +902,39 @@ class CIRGenFunction : public CIRGenTypeCache { void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl); void checkTargetFeatures(SourceLocation Loc, const FunctionDecl *TargetDecl); - LValue buildStmtExprLValue(const StmtExpr *E); + LValue emitStmtExprLValue(const StmtExpr *E); - LValue buildPointerToDataMemberBinaryExpr(const BinaryOperator *E); + LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *E); /// TODO: Add TBAAAccessInfo - Address buildCXXMemberDataPointerAddress( + Address emitCXXMemberDataPointerAddress( const Expr *E, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo); /// Generate a call of the given function, expecting the given /// result type, and using the given argument list which specifies both the /// LLVM arguments and the types they were derived from. - RValue buildCall(const CIRGenFunctionInfo &CallInfo, - const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, - cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, - mlir::Location loc, - std::optional E = std::nullopt); - RValue buildCall(const CIRGenFunctionInfo &CallInfo, - const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, - const CallArgList &Args, - cir::CIRCallOpInterface *callOrTryCall = nullptr, - bool IsMustTail = false) { + RValue emitCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + cir::CIRCallOpInterface *callOrTryCall, bool IsMustTail, + mlir::Location loc, + std::optional E = std::nullopt); + RValue emitCall(const CIRGenFunctionInfo &CallInfo, + const CIRGenCallee &Callee, ReturnValueSlot ReturnValue, + const CallArgList &Args, + cir::CIRCallOpInterface *callOrTryCall = nullptr, + bool IsMustTail = false) { assert(currSrcLoc && "source location must have been set"); - return buildCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, - IsMustTail, *currSrcLoc, std::nullopt); + return emitCall(CallInfo, Callee, ReturnValue, Args, callOrTryCall, + IsMustTail, *currSrcLoc, std::nullopt); } - RValue buildCall(clang::QualType FnType, const CIRGenCallee &Callee, - const clang::CallExpr *E, ReturnValueSlot returnValue, - mlir::Value Chain = nullptr); + RValue emitCall(clang::QualType FnType, const CIRGenCallee &Callee, + const clang::CallExpr *E, ReturnValueSlot returnValue, + mlir::Value Chain = nullptr); - RValue buildCallExpr(const clang::CallExpr *E, - ReturnValueSlot ReturnValue = ReturnValueSlot()); + RValue emitCallExpr(const clang::CallExpr *E, + ReturnValueSlot ReturnValue = ReturnValueSlot()); Address getAsNaturalAddressOf(Address Addr, QualType PointeeTy); @@ -946,26 +942,26 @@ class CIRGenFunction : public CIRGenTypeCache { return getAsNaturalAddressOf(Addr, PointeeType).getBasePointer(); } - mlir::Value buildRuntimeCall(mlir::Location loc, cir::FuncOp callee, - llvm::ArrayRef args = {}); + mlir::Value emitRuntimeCall(mlir::Location loc, cir::FuncOp callee, + llvm::ArrayRef args = {}); - void buildInvariantStart(CharUnits Size); + void emitInvariantStart(CharUnits Size); /// Create a check for a function parameter that may potentially be /// declared as non-null. - void buildNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, - AbstractCallee AC, unsigned ParmNum); + void emitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, + AbstractCallee AC, unsigned ParmNum); - void buildCallArg(CallArgList &args, const clang::Expr *E, - clang::QualType ArgType); + void emitCallArg(CallArgList &args, const clang::Expr *E, + clang::QualType ArgType); - LValue buildCallExprLValue(const CallExpr *E); + LValue emitCallExprLValue(const CallExpr *E); - /// Similarly to buildAnyExpr(), however, the result will always be accessible + /// Similarly to emitAnyExpr(), however, the result will always be accessible /// even if no aggregate location is provided. - RValue buildAnyExprToTemp(const clang::Expr *E); + RValue emitAnyExprToTemp(const clang::Expr *E); - CIRGenCallee buildCallee(const clang::Expr *E); + CIRGenCallee emitCallee(const clang::Expr *E); void finishFunction(SourceLocation EndLoc); @@ -973,87 +969,87 @@ class CIRGenFunction : public CIRGenTypeCache { /// result is returned as an RValue struct. If this is an aggregate /// expression, the aggloc/agglocvolatile arguments indicate where the result /// should be returned. - RValue buildAnyExpr(const clang::Expr *E, - AggValueSlot aggSlot = AggValueSlot::ignored(), - bool ignoreResult = false); - - mlir::LogicalResult buildFunctionBody(const clang::Stmt *Body); - mlir::LogicalResult buildCoroutineBody(const CoroutineBodyStmt &S); - mlir::LogicalResult buildCoreturnStmt(const CoreturnStmt &S); - - cir::CallOp buildCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr); - cir::CallOp buildCoroAllocBuiltinCall(mlir::Location loc); - cir::CallOp buildCoroBeginBuiltinCall(mlir::Location loc, - mlir::Value coroframeAddr); - cir::CallOp buildCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr); - - RValue buildCoawaitExpr(const CoawaitExpr &E, - AggValueSlot aggSlot = AggValueSlot::ignored(), - bool ignoreResult = false); - RValue buildCoyieldExpr(const CoyieldExpr &E, - AggValueSlot aggSlot = AggValueSlot::ignored(), - bool ignoreResult = false); - RValue buildCoroutineIntrinsic(const CallExpr *E, unsigned int IID); - RValue buildCoroutineFrame(); + RValue emitAnyExpr(const clang::Expr *E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + + mlir::LogicalResult emitFunctionBody(const clang::Stmt *Body); + mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &S); + mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &S); + + cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr); + cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc); + cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, + mlir::Value coroframeAddr); + cir::CallOp emitCoroEndBuiltinCall(mlir::Location loc, mlir::Value nullPtr); + + RValue emitCoawaitExpr(const CoawaitExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + RValue emitCoyieldExpr(const CoyieldExpr &E, + AggValueSlot aggSlot = AggValueSlot::ignored(), + bool ignoreResult = false); + RValue emitCoroutineIntrinsic(const CallExpr *E, unsigned int IID); + RValue emitCoroutineFrame(); enum class MSVCIntrin; - mlir::Value buildARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch); - mlir::Value buildARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch); - mlir::Value buildCommonNeonBuiltinExpr( + mlir::Value emitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value emitARMCDEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value emitCommonNeonBuiltinExpr( unsigned builtinID, unsigned llvmIntrinsic, unsigned altLLVMIntrinsic, const char *nameHint, unsigned modifier, const CallExpr *e, llvm::SmallVectorImpl &ops, Address ptrOp0, Address ptrOp1, llvm::Triple::ArchType arch); - mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, QualType ty, - SourceLocation loc, - SourceLocation assumptionLoc, - mlir::IntegerAttr alignment, - mlir::Value offsetValue = nullptr); + mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, + SourceLocation loc, + SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, + mlir::Value offsetValue = nullptr); - mlir::Value buildAlignmentAssumption(mlir::Value ptrValue, const Expr *expr, - SourceLocation assumptionLoc, - mlir::IntegerAttr alignment, - mlir::Value offsetValue = nullptr); + mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, const Expr *expr, + SourceLocation assumptionLoc, + mlir::IntegerAttr alignment, + mlir::Value offsetValue = nullptr); /// Build a debug stoppoint if we are emitting debug info. - void buildStopPoint(const Stmt *S); + void emitStopPoint(const Stmt *S); // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. mlir::LogicalResult - buildStmt(const clang::Stmt *S, bool useCurrentScope, - llvm::ArrayRef Attrs = std::nullopt); + emitStmt(const clang::Stmt *S, bool useCurrentScope, + llvm::ArrayRef Attrs = std::nullopt); - mlir::LogicalResult buildSimpleStmt(const clang::Stmt *S, - bool useCurrentScope); + mlir::LogicalResult emitSimpleStmt(const clang::Stmt *S, + bool useCurrentScope); - mlir::LogicalResult buildForStmt(const clang::ForStmt &S); - mlir::LogicalResult buildWhileStmt(const clang::WhileStmt &S); - mlir::LogicalResult buildDoStmt(const clang::DoStmt &S); + mlir::LogicalResult emitForStmt(const clang::ForStmt &S); + mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &S); + mlir::LogicalResult emitDoStmt(const clang::DoStmt &S); mlir::LogicalResult - buildCXXForRangeStmt(const CXXForRangeStmt &S, - llvm::ArrayRef Attrs = std::nullopt); - mlir::LogicalResult buildSwitchStmt(const clang::SwitchStmt &S); + emitCXXForRangeStmt(const CXXForRangeStmt &S, + llvm::ArrayRef Attrs = std::nullopt); + mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &S); - mlir::LogicalResult buildCXXTryStmtUnderScope(const clang::CXXTryStmt &S); - mlir::LogicalResult buildCXXTryStmt(const clang::CXXTryStmt &S); + mlir::LogicalResult emitCXXTryStmtUnderScope(const clang::CXXTryStmt &S); + mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &S); void enterCXXTryStmt(const CXXTryStmt &S, cir::TryOp catchOp, bool IsFnTryBlock = false); void exitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); - Address buildCompoundStmt(const clang::CompoundStmt &S, bool getLast = false, - AggValueSlot slot = AggValueSlot::ignored()); + Address emitCompoundStmt(const clang::CompoundStmt &S, bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); Address - buildCompoundStmtWithoutScope(const clang::CompoundStmt &S, - bool getLast = false, - AggValueSlot slot = AggValueSlot::ignored()); + emitCompoundStmtWithoutScope(const clang::CompoundStmt &S, + bool getLast = false, + AggValueSlot slot = AggValueSlot::ignored()); GlobalDecl CurSEHParent; bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; } @@ -1073,12 +1069,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit code to compute the specified expression, /// ignoring the result. - void buildIgnoredExpr(const clang::Expr *E); + void emitIgnoredExpr(const clang::Expr *E); - LValue buildArraySubscriptExpr(const clang::ArraySubscriptExpr *E, - bool Accessed = false); + LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *E, + bool Accessed = false); - mlir::LogicalResult buildDeclStmt(const clang::DeclStmt &S); + mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &S); /// Determine whether a return value slot may overlap some other object. AggValueSlot::Overlap_t getOverlapForReturnValue() { @@ -1100,53 +1096,52 @@ class CIRGenFunction : public CIRGenTypeCache { /// addressed later. RValue GetUndefRValue(clang::QualType Ty); - mlir::Value buildFromMemory(mlir::Value Value, clang::QualType Ty); + mlir::Value emitFromMemory(mlir::Value Value, clang::QualType Ty); mlir::Type convertType(clang::QualType T); - mlir::LogicalResult buildAsmStmt(const clang::AsmStmt &S); + mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &S); std::pair - buildAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, - QualType InputType, std::string &ConstraintStr, - SourceLocation Loc); + emitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, + QualType InputType, std::string &ConstraintStr, + SourceLocation Loc); std::pair - buildAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, - std::string &ConstraintStr); + emitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, + std::string &ConstraintStr); - mlir::LogicalResult buildIfStmt(const clang::IfStmt &S); + mlir::LogicalResult emitIfStmt(const clang::IfStmt &S); - mlir::LogicalResult buildReturnStmt(const clang::ReturnStmt &S); + mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &S); - mlir::LogicalResult buildGotoStmt(const clang::GotoStmt &S); + mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &S); - mlir::LogicalResult buildLabel(const clang::LabelDecl *D); - mlir::LogicalResult buildLabelStmt(const clang::LabelStmt &S); + mlir::LogicalResult emitLabel(const clang::LabelDecl *D); + mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &S); - mlir::LogicalResult buildAttributedStmt(const AttributedStmt &S); + mlir::LogicalResult emitAttributedStmt(const AttributedStmt &S); - mlir::LogicalResult buildBreakStmt(const clang::BreakStmt &S); - mlir::LogicalResult buildContinueStmt(const clang::ContinueStmt &S); + mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &S); + mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &S); // OpenMP gen functions: - mlir::LogicalResult buildOMPParallelDirective(const OMPParallelDirective &S); - mlir::LogicalResult buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S); - mlir::LogicalResult - buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S); - mlir::LogicalResult buildOMPBarrierDirective(const OMPBarrierDirective &S); + mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &S); + mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &S); + mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &S); + mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &S); - LValue buildOpaqueValueLValue(const OpaqueValueExpr *e); + LValue emitOpaqueValueLValue(const OpaqueValueExpr *e); /// Emit code to compute a designator that specifies the location /// of the expression. /// FIXME: document this function better. - LValue buildLValue(const clang::Expr *E); + LValue emitLValue(const clang::Expr *E); - void buildDecl(const clang::Decl &D); + void emitDecl(const clang::Decl &D); /// Emit local annotations for the local variable V, declared by D. - void buildVarAnnotations(const VarDecl *decl, mlir::Value val); + void emitVarAnnotations(const VarDecl *decl, mlir::Value val); /// If the specified expression does not fold to a constant, or if it does but /// contains a label, return false. If it constant folds return true and set @@ -1168,20 +1163,19 @@ class CIRGenFunction : public CIRGenTypeCache { /// times we expect the condition to evaluate to true based on PGO data. We /// might decide to leave this as a separate pass (see EmitBranchOnBoolExpr /// for extra ideas). - mlir::LogicalResult buildIfOnBoolExpr(const clang::Expr *cond, - const clang::Stmt *thenS, - const clang::Stmt *elseS); - cir::IfOp buildIfOnBoolExpr( + mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + cir::IfOp emitIfOnBoolExpr( const clang::Expr *cond, llvm::function_ref thenBuilder, mlir::Location thenLoc, llvm::function_ref elseBuilder, std::optional elseLoc = {}); - mlir::Value buildTernaryOnBoolExpr(const clang::Expr *cond, - mlir::Location loc, - const clang::Stmt *thenS, - const clang::Stmt *elseS); - mlir::Value buildOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); + mlir::Value emitTernaryOnBoolExpr(const clang::Expr *cond, mlir::Location loc, + const clang::Stmt *thenS, + const clang::Stmt *elseS); + mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond); class ConstantEmission { // Cannot use mlir::TypedAttr directly here because of bit availability. @@ -1223,14 +1217,14 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit the computation of the specified expression of scalar type, /// ignoring the result. - mlir::Value buildScalarExpr(const clang::Expr *E); - mlir::Value buildScalarConstant(const ConstantEmission &Constant, Expr *E); + mlir::Value emitScalarExpr(const clang::Expr *E); + mlir::Value emitScalarConstant(const ConstantEmission &Constant, Expr *E); - mlir::Value buildPromotedComplexExpr(const Expr *E, QualType PromotionType); - mlir::Value buildPromotedScalarExpr(const clang::Expr *E, - QualType PromotionType); - mlir::Value buildPromotedValue(mlir::Value result, QualType PromotionType); - mlir::Value buildUnPromotedValue(mlir::Value result, QualType PromotionType); + mlir::Value emitPromotedComplexExpr(const Expr *E, QualType PromotionType); + mlir::Value emitPromotedScalarExpr(const clang::Expr *E, + QualType PromotionType); + mlir::Value emitPromotedValue(mlir::Value result, QualType PromotionType); + mlir::Value emitUnPromotedValue(mlir::Value result, QualType PromotionType); mlir::Type getCIRType(const clang::QualType &type); @@ -1238,23 +1232,23 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::ArrayAttr &value, cir::CaseOpKind &kind); template - mlir::LogicalResult - buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - mlir::ArrayAttr value, cir::CaseOpKind kind, - bool buildingTopLevelCase); - - mlir::LogicalResult buildCaseStmt(const clang::CaseStmt &S, - mlir::Type condType, - bool buildingTopLevelCase); + mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, + mlir::ArrayAttr value, + cir::CaseOpKind kind, + bool buildingTopLevelCase); - mlir::LogicalResult buildDefaultStmt(const clang::DefaultStmt &S, - mlir::Type condType, - bool buildingTopLevelCase); + mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &S, + mlir::Type condType, + bool buildingTopLevelCase); - mlir::LogicalResult buildSwitchCase(const clang::SwitchCase &S, + mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &S, + mlir::Type condType, bool buildingTopLevelCase); - mlir::LogicalResult buildSwitchBody(const clang::Stmt *S); + mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &S, + bool buildingTopLevelCase); + + mlir::LogicalResult emitSwitchBody(const clang::Stmt *S); cir::FuncOp generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo); @@ -1305,86 +1299,85 @@ class CIRGenFunction : public CIRGenTypeCache { } }; - LValue buildMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); + LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E); /// Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. - AutoVarEmission buildAutoVarAlloca(const clang::VarDecl &D, - mlir::OpBuilder::InsertPoint = {}); - - void buildAutoVarInit(const AutoVarEmission &emission); - void buildAutoVarCleanups(const AutoVarEmission &emission); - void buildAutoVarTypeCleanup(const AutoVarEmission &emission, - clang::QualType::DestructionKind dtorKind); - - void buildStoreOfScalar(mlir::Value value, LValue lvalue); - void buildStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, - clang::QualType ty, LValueBaseInfo baseInfo, - TBAAAccessInfo tbaaInfo, bool isInit = false, - bool isNontemporal = false); - void buildStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, - QualType ty, - AlignmentSource source = AlignmentSource::Type, - bool isInit = false, bool isNontemporal = false) { - buildStoreOfScalar(value, addr, isVolatile, ty, LValueBaseInfo(source), - CGM.getTBAAAccessInfo(ty), isInit, isNontemporal); + AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &D, + mlir::OpBuilder::InsertPoint = {}); + + void emitAutoVarInit(const AutoVarEmission &emission); + void emitAutoVarCleanups(const AutoVarEmission &emission); + void emitAutoVarTypeCleanup(const AutoVarEmission &emission, + clang::QualType::DestructionKind dtorKind); + + void emitStoreOfScalar(mlir::Value value, LValue lvalue); + void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, + clang::QualType ty, LValueBaseInfo baseInfo, + TBAAAccessInfo tbaaInfo, bool isInit = false, + bool isNontemporal = false); + void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, + QualType ty, + AlignmentSource source = AlignmentSource::Type, + bool isInit = false, bool isNontemporal = false) { + emitStoreOfScalar(value, addr, isVolatile, ty, LValueBaseInfo(source), + CGM.getTBAAAccessInfo(ty), isInit, isNontemporal); } - void buildStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); + void emitStoreOfScalar(mlir::Value value, LValue lvalue, bool isInit); - mlir::Value buildToMemory(mlir::Value Value, clang::QualType Ty); - void buildDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); + mlir::Value emitToMemory(mlir::Value Value, clang::QualType Ty); + void emitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init); /// Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. - void buildStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false); + void emitStoreThroughLValue(RValue Src, LValue Dst, bool isInit = false); - void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value &Result); + void emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value &Result); - cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); + cir::BrOp emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is /// nonnull, if 1\p LHS is marked _Nonnull. - void buildNullabilityCheck(LValue LHS, mlir::Value RHS, - clang::SourceLocation Loc); + void emitNullabilityCheck(LValue LHS, mlir::Value RHS, + clang::SourceLocation Loc); /// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to /// detect undefined behavior when the pointer overflow sanitizer is enabled. /// \p SignedIndices indicates whether any of the GEP indices are signed. /// \p IsSubtraction indicates whether the expression used to form the GEP /// is a subtraction. - mlir::Value buildCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, - llvm::ArrayRef IdxList, - bool SignedIndices, bool IsSubtraction, - SourceLocation Loc); - - void buildScalarInit(const clang::Expr *init, mlir::Location loc, - LValue lvalue, bool capturedByInit = false); - - LValue buildDeclRefLValue(const clang::DeclRefExpr *E); - LValue buildExtVectorElementExpr(const ExtVectorElementExpr *E); - LValue buildBinaryOperatorLValue(const clang::BinaryOperator *E); - LValue buildCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); - LValue buildUnaryOpLValue(const clang::UnaryOperator *E); - LValue buildStringLiteralLValue(const StringLiteral *E); - RValue buildBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue); - RValue buildRotate(const CallExpr *E, bool IsRotateRight); - mlir::Value buildTargetBuiltinExpr(unsigned BuiltinID, - const clang::CallExpr *E, - ReturnValueSlot ReturnValue); + mlir::Value emitCheckedInBoundsGEP(mlir::Type ElemTy, mlir::Value Ptr, + llvm::ArrayRef IdxList, + bool SignedIndices, bool IsSubtraction, + SourceLocation Loc); + + void emitScalarInit(const clang::Expr *init, mlir::Location loc, + LValue lvalue, bool capturedByInit = false); + + LValue emitDeclRefLValue(const clang::DeclRefExpr *E); + LValue emitExtVectorElementExpr(const ExtVectorElementExpr *E); + LValue emitBinaryOperatorLValue(const clang::BinaryOperator *E); + LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *E); + LValue emitUnaryOpLValue(const clang::UnaryOperator *E); + LValue emitStringLiteralLValue(const StringLiteral *E); + RValue emitBuiltinExpr(const clang::GlobalDecl GD, unsigned BuiltinID, + const clang::CallExpr *E, ReturnValueSlot ReturnValue); + RValue emitRotate(const CallExpr *E, bool IsRotateRight); + mlir::Value emitTargetBuiltinExpr(unsigned BuiltinID, + const clang::CallExpr *E, + ReturnValueSlot ReturnValue); // Target specific builtin emission - mlir::Value buildScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, - const CallExpr *E); - mlir::Value buildAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, - ReturnValueSlot ReturnValue, - llvm::Triple::ArchType Arch); - mlir::Value buildAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); - mlir::Value buildAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); - mlir::Value buildX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value emitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, + const CallExpr *E); + mlir::Value emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, + ReturnValueSlot ReturnValue, + llvm::Triple::ArchType Arch); + mlir::Value emitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value emitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + mlir::Value emitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); /// Given an expression with a pointer type, emit the value and compute our /// best estimate of the alignment of the pointee. @@ -1403,13 +1396,12 @@ class CIRGenFunction : public CIRGenTypeCache { /// reasonable to just ignore the returned alignment when it isn't from an /// explicit source. Address - buildPointerWithAlignment(const clang::Expr *expr, - LValueBaseInfo *baseInfo = nullptr, - TBAAAccessInfo *tbaaInfo = nullptr, - KnownNonNull_t isKnownNonNull = NotKnownNonNull); + emitPointerWithAlignment(const clang::Expr *expr, + LValueBaseInfo *baseInfo = nullptr, + TBAAAccessInfo *tbaaInfo = nullptr, + KnownNonNull_t isKnownNonNull = NotKnownNonNull); - LValue - buildConditionalOperatorLValue(const AbstractConditionalOperator *expr); + LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr); /// Emit an expression as an initializer for an object (variable, field, etc.) /// at the given location. The expression is not necessarily the normal @@ -1421,39 +1413,39 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param lvalue the lvalue to initialize /// \param capturedByInit true if \p D is a __block variable whose address is /// potentially changed by the initializer - void buildExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, - LValue lvalue, bool capturedByInit = false); + void emitExprAsInit(const clang::Expr *init, const clang::ValueDecl *D, + LValue lvalue, bool capturedByInit = false); /// Emit code and set up symbol table for a variable declaration with auto, /// register, or no storage class specifier. These turn into simple stack /// objects, globals depending on target. - void buildAutoVarDecl(const clang::VarDecl &D); + void emitAutoVarDecl(const clang::VarDecl &D); /// This method handles emission of any variable declaration /// inside a function, including static vars etc. - void buildVarDecl(const clang::VarDecl &D); + void emitVarDecl(const clang::VarDecl &D); cir::GlobalOp addInitializerToStaticVarDecl(const VarDecl &D, cir::GlobalOp GV, cir::GetGlobalOp GVAddr); - void buildStaticVarDecl(const VarDecl &D, cir::GlobalLinkageKind Linkage); + void emitStaticVarDecl(const VarDecl &D, cir::GlobalLinkageKind Linkage); /// Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. mlir::Value evaluateExprAsBool(const clang::Expr *E); - void buildCtorPrologue(const clang::CXXConstructorDecl *CD, - clang::CXXCtorType Type, FunctionArgList &Args); - void buildConstructorBody(FunctionArgList &Args); - void buildDestructorBody(FunctionArgList &Args); - void buildCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, - bool ForVirtualBase, bool Delegating, - Address This, QualType ThisTy); - RValue buildCXXDestructorCall(GlobalDecl Dtor, const CIRGenCallee &Callee, - mlir::Value This, QualType ThisTy, - mlir::Value ImplicitParam, - QualType ImplicitParamTy, const CallExpr *E); + void emitCtorPrologue(const clang::CXXConstructorDecl *CD, + clang::CXXCtorType Type, FunctionArgList &Args); + void emitConstructorBody(FunctionArgList &Args); + void emitDestructorBody(FunctionArgList &Args); + void emitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, + bool ForVirtualBase, bool Delegating, Address This, + QualType ThisTy); + RValue emitCXXDestructorCall(GlobalDecl Dtor, const CIRGenCallee &Callee, + mlir::Value This, QualType ThisTy, + mlir::Value ImplicitParam, + QualType ImplicitParamTy, const CallExpr *E); /// Enter the cleanups necessary to complete the given phase of destruction /// for a destructor. The end result should call destructors on members and @@ -1521,8 +1513,8 @@ class CIRGenFunction : public CIRGenTypeCache { /// If whole-program virtual table optimization is enabled, emit an assumption /// that VTable is a member of RD's type identifier. Or, if vptr CFI is /// enabled, emit a check that VTable is a member of RD's type identifier. - void buildTypeMetadataCodeForVCall(const CXXRecordDecl *RD, - mlir::Value VTable, SourceLocation Loc); + void emitTypeMetadataCodeForVCall(const CXXRecordDecl *RD, mlir::Value VTable, + SourceLocation Loc); /// Return the VTT parameter that should be passed to a base /// constructor/destructor with virtual bases. @@ -1647,15 +1639,14 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emit a conversion from the specified type to the specified destination /// type, both of which are CIR scalar types. - mlir::Value buildScalarConversion(mlir::Value Src, clang::QualType SrcTy, - clang::QualType DstTy, - clang::SourceLocation Loc); + mlir::Value emitScalarConversion(mlir::Value Src, clang::QualType SrcTy, + clang::QualType DstTy, + clang::SourceLocation Loc); /// Emit a conversion from the specified complex type to the specified /// destination type, where the destination type is an LLVM scalar type. - mlir::Value buildComplexToScalarConversion(mlir::Value Src, QualType SrcTy, - QualType DstTy, - SourceLocation Loc); + mlir::Value emitComplexToScalarConversion(mlir::Value Src, QualType SrcTy, + QualType DstTy, SourceLocation Loc); LValue makeAddrLValue(Address addr, clang::QualType ty, LValueBaseInfo baseInfo) { @@ -1674,18 +1665,18 @@ class CIRGenFunction : public CIRGenTypeCache { void initializeVTablePointer(mlir::Location loc, const VPtr &Vptr); AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); - LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - LValue buildLValueForBitField(LValue base, const FieldDecl *field); + LValue emitLValueForField(LValue Base, const clang::FieldDecl *Field); + LValue emitLValueForBitField(LValue base, const FieldDecl *field); - /// Like buildLValueForField, excpet that if the Field is a reference, this + /// Like emitLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. - LValue buildLValueForFieldInitialization(LValue Base, - const clang::FieldDecl *Field, - llvm::StringRef FieldName); + LValue emitLValueForFieldInitialization(LValue Base, + const clang::FieldDecl *Field, + llvm::StringRef FieldName); - void buildInitializerForField(clang::FieldDecl *Field, LValue LHS, - clang::Expr *Init); + void emitInitializerForField(clang::FieldDecl *Field, LValue LHS, + clang::Expr *Init); /// Determine whether the given initializer is trivial in the sense /// that it requires no code to be generated. @@ -1694,13 +1685,13 @@ class CIRGenFunction : public CIRGenTypeCache { // TODO: this can also be abstrated into common AST helpers bool hasBooleanRepresentation(clang::QualType Ty); - void buildCXXThrowExpr(const CXXThrowExpr *E); + void emitCXXThrowExpr(const CXXThrowExpr *E); - RValue buildAtomicExpr(AtomicExpr *E); - void buildAtomicStore(RValue rvalue, LValue lvalue, bool isInit); - void buildAtomicStore(RValue rvalue, LValue lvalue, cir::MemOrder MO, - bool IsVolatile, bool isInit); - void buildAtomicInit(Expr *init, LValue dest); + RValue emitAtomicExpr(AtomicExpr *E); + void emitAtomicStore(RValue rvalue, LValue lvalue, bool isInit); + void emitAtomicStore(RValue rvalue, LValue lvalue, cir::MemOrder MO, + bool IsVolatile, bool isInit); + void emitAtomicInit(Expr *init, LValue dest); /// Return the address of a local variable. Address GetAddrOfLocalVar(const clang::VarDecl *VD) { @@ -1724,23 +1715,23 @@ class CIRGenFunction : public CIRGenTypeCache { /// Check if \p E is a C++ "this" pointer wrapped in value-preserving casts. static bool isWrappedCXXThis(const clang::Expr *E); - void buildDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, - clang::CXXCtorType CtorType, - const FunctionArgList &Args, - clang::SourceLocation Loc); + void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *Ctor, + clang::CXXCtorType CtorType, + const FunctionArgList &Args, + clang::SourceLocation Loc); // It's important not to confuse this and the previous function. Delegating // constructors are the C++11 feature. The constructor delegate optimization // is used to reduce duplication in the base and complete constructors where // they are substantially the same. - void buildDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, - const FunctionArgList &Args); + void emitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, + const FunctionArgList &Args); /// We are performing a delegate call; that is, the current function is /// delegating to another one. Produce a r-value suitable for passing the /// given parameter. - void buildDelegateCallArg(CallArgList &args, const clang::VarDecl *param, - clang::SourceLocation loc); + void emitDelegateCallArg(CallArgList &args, const clang::VarDecl *param, + clang::SourceLocation loc); /// Return true if the current function should not be instrumented with /// sanitizers. @@ -1752,18 +1743,18 @@ class CIRGenFunction : public CIRGenTypeCache { bool ShouldInstrumentFunction(); /// TODO(cir): add TBAAAccessInfo - Address buildArrayToPointerDecay(const Expr *Array, - LValueBaseInfo *BaseInfo = nullptr); + Address emitArrayToPointerDecay(const Expr *Array, + LValueBaseInfo *BaseInfo = nullptr); /// Emits the code necessary to evaluate an arbitrary expression into the /// given memory location. - void buildAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, - bool IsInitializer); - void buildAnyExprToExn(const Expr *E, Address Addr); + void emitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, + bool IsInitializer); + void emitAnyExprToExn(const Expr *E, Address Addr); - LValue buildCheckedLValue(const Expr *E, TypeCheckKind TCK); - LValue buildMemberExpr(const MemberExpr *E); - LValue buildCompoundLiteralLValue(const CompoundLiteralExpr *E); + LValue emitCheckedLValue(const Expr *E, TypeCheckKind TCK); + LValue emitMemberExpr(const MemberExpr *E); + LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *E); /// Specifies which type of sanitizer check to apply when handling a /// particular builtin. @@ -1774,7 +1765,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emits an argument for a call to a builtin. If the builtin sanitizer is /// enabled, a runtime check specified by \p Kind is also emitted. - mlir::Value buildCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); + mlir::Value emitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind); /// returns true if aggregate type has a volatile member. /// TODO(cir): this could be a common AST helper between LLVM / CIR. @@ -1787,12 +1778,12 @@ class CIRGenFunction : public CIRGenTypeCache { } /// Emit an aggregate assignment. - void buildAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { + void emitAggregateAssign(LValue Dest, LValue Src, QualType EltTy) { bool IsVolatile = hasVolatileMember(EltTy); - buildAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); + emitAggregateCopy(Dest, Src, EltTy, AggValueSlot::MayOverlap, IsVolatile); } - LValue buildAggExprToLValue(const Expr *E); + LValue emitAggExprToLValue(const Expr *E); /// Emit an aggregate copy. /// @@ -1801,13 +1792,13 @@ class CIRGenFunction : public CIRGenTypeCache { /// \param MayOverlap Whether the tail padding of the destination might be /// occupied by some other object. More efficient code can often be /// generated if not. - void buildAggregateCopy(LValue Dest, LValue Src, QualType EltTy, - AggValueSlot::Overlap_t MayOverlap, - bool isVolatile = false); + void emitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, + AggValueSlot::Overlap_t MayOverlap, + bool isVolatile = false); /// Emit a reached-unreachable diagnostic if \p Loc is valid and runtime /// checking is enabled. Otherwise, just emit an unreachable instruction. - void buildUnreachable(SourceLocation Loc); + void emitUnreachable(SourceLocation Loc); /// /// Cleanups @@ -1829,9 +1820,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// Emits try/catch information for the current EH stack. cir::CallOp callWithExceptionCtx = nullptr; - mlir::Operation *buildLandingPad(cir::TryOp tryOp); - void buildEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock, - mlir::Location loc); + mlir::Operation *emitLandingPad(cir::TryOp tryOp); + void emitEHResumeBlock(bool isCleanup, mlir::Block *ehResumeBlock, + mlir::Location loc); mlir::Block *getEHResumeBlock(bool isCleanup, cir::TryOp tryOp); mlir::Block *getEHDispatchBlock(EHScopeStack::stable_iterator scope, cir::TryOp tryOp); @@ -1928,8 +1919,8 @@ class CIRGenFunction : public CIRGenTypeCache { }; template - ConditionalInfo buildConditionalBlocks(const AbstractConditionalOperator *E, - const FuncTy &BranchGenFunc); + ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *E, + const FuncTy &BranchGenFunc); // Return true if we're currently emitting one branch or the other of a // conditional expression. @@ -1964,10 +1955,10 @@ class CIRGenFunction : public CIRGenTypeCache { void pushDestroyAndDeferDeactivation(CleanupKind cleanupKind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray); - void buildArrayDestroy(mlir::Value begin, mlir::Value end, - QualType elementType, CharUnits elementAlign, - Destroyer *destroyer, bool checkZeroLength, - bool useEHCleanup); + void emitArrayDestroy(mlir::Value begin, mlir::Value end, + QualType elementType, CharUnits elementAlign, + Destroyer *destroyer, bool checkZeroLength, + bool useEHCleanup); /// The values of function arguments to use when evaluating /// CXXInheritedCtorInitExprs within this context. @@ -2239,8 +2230,8 @@ class CIRGenFunction : public CIRGenTypeCache { return b; } - cir::ReturnOp buildReturn(mlir::Location loc); - void buildImplicitReturn(); + cir::ReturnOp emitReturn(mlir::Location loc); + void emitImplicitReturn(); public: llvm::ArrayRef getRetBlocks() { return RetBlocks; } diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 4c73215432db..5f00189ef90c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -167,8 +167,8 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { return false; } - void buildInstanceFunctionProlog(SourceLocation Loc, - CIRGenFunction &CGF) override; + void emitInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) override; void addImplicitStructorParams(CIRGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; @@ -178,17 +178,17 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { CXXDtorType Type, bool ForVirtualBase, bool Delegating) override; - void buildCXXConstructors(const clang::CXXConstructorDecl *D) override; - void buildCXXDestructors(const clang::CXXDestructorDecl *D) override; - void buildCXXStructor(clang::GlobalDecl GD) override; - void buildDestructorCall(CIRGenFunction &CGF, const CXXDestructorDecl *DD, - CXXDtorType Type, bool ForVirtualBase, - bool Delegating, Address This, - QualType ThisTy) override; + void emitCXXConstructors(const clang::CXXConstructorDecl *D) override; + void emitCXXDestructors(const clang::CXXDestructorDecl *D) override; + void emitCXXStructor(clang::GlobalDecl GD) override; + void emitDestructorCall(CIRGenFunction &CGF, const CXXDestructorDecl *DD, + CXXDtorType Type, bool ForVirtualBase, + bool Delegating, Address This, + QualType ThisTy) override; void registerGlobalDtor(CIRGenFunction &CGF, const VarDecl *D, cir::FuncOp dtor, mlir::Value Addr) override; - virtual void buildRethrow(CIRGenFunction &CGF, bool isNoReturn) override; - virtual void buildThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; + virtual void emitRethrow(CIRGenFunction &CGF, bool isNoReturn) override; + virtual void emitThrow(CIRGenFunction &CGF, const CXXThrowExpr *E) override; CatchTypeInfo getAddrOfCXXCatchHandlerType(mlir::Location loc, QualType Ty, QualType CatchHandlerType) override { @@ -304,7 +304,7 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { return Args.size() - 1; } - void buildBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; + void emitBadCastCall(CIRGenFunction &CGF, mlir::Location loc) override; mlir::Value getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &CGF, @@ -316,10 +316,10 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { // expressions are lowered to `cir.dyn_cast` ops instead of calls to runtime // functions. So during CIRGen we don't need the `emitDynamicCastCall` // function that clang CodeGen has. - mlir::Value buildDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, QualType DestRecordTy, - cir::PointerType DestCIRTy, bool isRefCast, - Address Src) override; + mlir::Value emitDynamicCast(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, QualType DestRecordTy, + cir::PointerType DestCIRTy, bool isRefCast, + Address Src) override; cir::MethodAttr buildVirtualMethodAttr(cir::MethodType MethodTy, const CXXMethodDecl *MD) override; @@ -499,10 +499,10 @@ static void emitConstructorDestructorAlias(CIRGenModule &CGM, assert(Aliasee && "expected cir.func"); // Populate actual alias. - CGM.buildAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); + CGM.emitAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage); } -void CIRGenItaniumCXXABI::buildCXXStructor(GlobalDecl GD) { +void CIRGenItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { auto *MD = cast(GD.getDecl()); auto *CD = dyn_cast(MD); const CXXDestructorDecl *DD = CD ? nullptr : cast(MD); @@ -594,8 +594,8 @@ void CIRGenCXXABI::setCXXABIThisValue(CIRGenFunction &CGF, CGF.CXXABIThisValue = ThisPtr; } -void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(SourceLocation Loc, - CIRGenFunction &CGF) { +void CIRGenItaniumCXXABI::emitInstanceFunctionProlog(SourceLocation Loc, + CIRGenFunction &CGF) { // Naked functions have no prolog. if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr()) llvm_unreachable("NYI"); @@ -624,36 +624,36 @@ void CIRGenItaniumCXXABI::buildInstanceFunctionProlog(SourceLocation Loc, llvm_unreachable("NYI"); } -void CIRGenItaniumCXXABI::buildCXXConstructors(const CXXConstructorDecl *D) { +void CIRGenItaniumCXXABI::emitCXXConstructors(const CXXConstructorDecl *D) { // Just make sure we're in sync with TargetCXXABI. assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); // The constructor used for constructing this as a base class; // ignores virtual bases. - CGM.buildGlobal(GlobalDecl(D, Ctor_Base)); + CGM.emitGlobal(GlobalDecl(D, Ctor_Base)); // The constructor used for constructing this as a complete class; // constructs the virtual bases, then calls the base constructor. if (!D->getParent()->isAbstract()) { // We don't need to emit the complete ctro if the class is abstract. - CGM.buildGlobal(GlobalDecl(D, Ctor_Complete)); + CGM.emitGlobal(GlobalDecl(D, Ctor_Complete)); } } -void CIRGenItaniumCXXABI::buildCXXDestructors(const CXXDestructorDecl *D) { +void CIRGenItaniumCXXABI::emitCXXDestructors(const CXXDestructorDecl *D) { // The destructor used for destructing this as a base class; ignores // virtual bases. - CGM.buildGlobal(GlobalDecl(D, Dtor_Base)); + CGM.emitGlobal(GlobalDecl(D, Dtor_Base)); // The destructor used for destructing this as a most-derived class; // call the base destructor and then destructs any virtual bases. - CGM.buildGlobal(GlobalDecl(D, Dtor_Complete)); + CGM.emitGlobal(GlobalDecl(D, Dtor_Complete)); // The destructor in a virtual table is always a 'deleting' // destructor, which calls the complete destructor and then uses the // appropriate operator delete. if (D->isVirtual()) - CGM.buildGlobal(GlobalDecl(D, Dtor_Deleting)); + CGM.emitGlobal(GlobalDecl(D, Dtor_Deleting)); } namespace { @@ -769,8 +769,8 @@ static void InitCatchParam(CIRGenFunction &CGF, const VarDecl &CatchParam, llvm_unreachable("NYI"); return; case cir::TEK_Scalar: { - auto exnLoad = CGF.buildLoadOfScalar(srcLV, catchParam.getLoc()); - CGF.buildStoreOfScalar(exnLoad, destLV, /*init*/ true); + auto exnLoad = CGF.emitLoadOfScalar(srcLV, catchParam.getLoc()); + CGF.emitStoreOfScalar(exnLoad, destLV, /*init*/ true); return; } case cir::TEK_Aggregate: @@ -839,10 +839,10 @@ void CIRGenItaniumCXXABI::emitBeginCatch(CIRGenFunction &CGF, // Emit the local. Make sure the alloca's superseed the current scope, since // these are going to be consumed by `cir.catch`, which is not within the // current scope. - auto var = CGF.buildAutoVarAlloca(*CatchParam, getCatchParamAllocaIP()); + auto var = CGF.emitAutoVarAlloca(*CatchParam, getCatchParamAllocaIP()); InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); // FIXME(cir): double check cleanups here are happening in the right blocks. - CGF.buildAutoVarCleanups(var); + CGF.emitAutoVarCleanups(var); } cir::GlobalOp CIRGenItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, @@ -905,7 +905,7 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( if (CGF.shouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { llvm_unreachable("NYI"); } else { - CGF.buildTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); + CGF.emitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); mlir::Value VFuncLoad; if (CGM.getItaniumVTableContext().isRelativeLayout()) { @@ -2135,7 +2135,7 @@ void CIRGenItaniumCXXABI::emitVTableDefinitions(CIRGenVTables &CGVT, // defined in headers but with a strong definition only in a shared // library. if (!isDeclarationForLinker || CGM.getCodeGenOpts().WholeProgramVTables) { - CGM.buildVTableTypeMetadata(RD, VTable, VTLayout); + CGM.emitVTableTypeMetadata(RD, VTable, VTLayout); // For available_externally definitions, add the vtable to // @llvm.compiler.used so that it isn't deleted before whole program // analysis. @@ -2154,7 +2154,7 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables( const CXXRecordDecl *RD) { CIRGenVTables &VTables = CGM.getVTables(); auto VTT = VTables.getAddrOfVTT(RD); - VTables.buildVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); + VTables.emitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); } /// What sort of uniqueness rules should we use for the RTTI for the @@ -2185,7 +2185,7 @@ CIRGenItaniumCXXABI::classifyRTTIUniqueness( return RUK_NonUniqueVisible; } -void CIRGenItaniumCXXABI::buildDestructorCall( +void CIRGenItaniumCXXABI::emitDestructorCall( CIRGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy) { GlobalDecl GD(DD, Type); @@ -2199,8 +2199,8 @@ void CIRGenItaniumCXXABI::buildDestructorCall( else Callee = CIRGenCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); - CGF.buildCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, - nullptr); + CGF.emitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy, + nullptr); } void CIRGenItaniumCXXABI::registerGlobalDtor(CIRGenFunction &CGF, @@ -2227,13 +2227,13 @@ mlir::Value CIRGenItaniumCXXABI::getCXXDestructorImplicitParam( return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); } -void CIRGenItaniumCXXABI::buildRethrow(CIRGenFunction &CGF, bool isNoReturn) { +void CIRGenItaniumCXXABI::emitRethrow(CIRGenFunction &CGF, bool isNoReturn) { // void __cxa_rethrow(); llvm_unreachable("NYI"); } -void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, - const CXXThrowExpr *E) { +void CIRGenItaniumCXXABI::emitThrow(CIRGenFunction &CGF, + const CXXThrowExpr *E) { // This differs a bit from LLVM codegen, CIR has native operations for some // cxa functions, and defers allocation size computation, always pass the dtor // symbol, etc. CIRGen also does not use getAllocateExceptionFn / getThrowFn. @@ -2254,7 +2254,7 @@ void CIRGenItaniumCXXABI::buildThrow(CIRGenFunction &CGF, // Build expression and store its result into exceptionPtr. CharUnits exnAlign = CGF.getContext().getExnObjectAlignment(); - CGF.buildAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); + CGF.emitAnyExprToExn(E->getSubExpr(), Address(exceptionPtr, exnAlign)); // Get the RTTI symbol address. auto typeInfo = mlir::dyn_cast_if_present( @@ -2333,18 +2333,18 @@ static cir::FuncOp getBadCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__cxa_bad_cast"); } -static void buildCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { +static void emitCallToBadCast(CIRGenFunction &CGF, mlir::Location loc) { // TODO(cir): set the calling convention to the runtime function. assert(!cir::MissingFeatures::setCallingConv()); - CGF.buildRuntimeCall(loc, getBadCastFn(CGF)); + CGF.emitRuntimeCall(loc, getBadCastFn(CGF)); CGF.getBuilder().create(loc); CGF.getBuilder().clearInsertionPoint(); } -void CIRGenItaniumCXXABI::buildBadCastCall(CIRGenFunction &CGF, - mlir::Location loc) { - buildCallToBadCast(CGF, loc); +void CIRGenItaniumCXXABI::emitBadCastCall(CIRGenFunction &CGF, + mlir::Location loc) { + emitCallToBadCast(CGF, loc); } static CharUnits computeOffsetHint(ASTContext &Context, @@ -2418,8 +2418,8 @@ static cir::FuncOp getItaniumDynamicCastFn(CIRGenFunction &CGF) { return CGF.CGM.createRuntimeFunction(FTy, "__dynamic_cast"); } -static Address buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, - QualType SrcRecordTy, Address Src) { +static Address emitDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, Address Src) { auto vtableUsesRelativeLayout = CGF.CGM.getItaniumVTableContext().isRelativeLayout(); auto ptr = CGF.getBuilder().createDynCastToVoid(Loc, Src.getPointer(), @@ -2427,11 +2427,12 @@ static Address buildDynamicCastToVoid(CIRGenFunction &CGF, mlir::Location Loc, return Address{ptr, Src.getAlignment()}; } -static mlir::Value -buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, - mlir::Location Loc, QualType SrcRecordTy, - QualType DestRecordTy, cir::PointerType DestCIRTy, - bool IsRefCast, Address Src) { +static mlir::Value emitExactDynamicCast(CIRGenItaniumCXXABI &ABI, + CIRGenFunction &CGF, mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool IsRefCast, Address Src) { // Find all the inheritance paths from SrcRecordTy to DestRecordTy. const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); @@ -2472,7 +2473,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, // object and see if it's a DestDecl. Note that the most-derived object // must be at least as aligned as this base class subobject, and must // have a vptr at offset 0. - Src = buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); + Src = emitDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src); SrcDecl = DestDecl; Offset = CharUnits::Zero(); break; @@ -2484,7 +2485,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value NullPtrValue = CGF.getBuilder().getNullPtr(DestCIRTy, Loc); if (IsRefCast) { auto *CurrentRegion = CGF.getBuilder().getBlock()->getParent(); - buildCallToBadCast(CGF, Loc); + emitCallToBadCast(CGF, Loc); // The call to bad_cast will terminate the block. Create a new block to // hold any follow up code. @@ -2517,7 +2518,7 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value Success = CGF.getBuilder().createCompare(Loc, cir::CmpOpKind::eq, SrcVPtr, ExpectedVPtr); - auto buildCastResult = [&] { + auto emitCastResult = [&] { if (Offset->isZero()) return CGF.getBuilder().createBitcast(Src.getPointer(), DestCIRTy); @@ -2539,16 +2540,16 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, mlir::Value Failed = CGF.getBuilder().createNot(Success); CGF.getBuilder().create(Loc, Failed, /*withElseRegion=*/false, [&](mlir::OpBuilder &, mlir::Location) { - buildCallToBadCast(CGF, Loc); + emitCallToBadCast(CGF, Loc); }); - return buildCastResult(); + return emitCastResult(); } return CGF.getBuilder() .create( Loc, Success, [&](mlir::OpBuilder &, mlir::Location) { - auto Result = buildCastResult(); + auto Result = emitCastResult(); CGF.getBuilder().createYield(Loc, Result); }, [&](mlir::OpBuilder &, mlir::Location) { @@ -2559,10 +2560,10 @@ buildExactDynamicCast(CIRGenItaniumCXXABI &ABI, CIRGenFunction &CGF, .getResult(); } -static cir::DynamicCastInfoAttr buildDynamicCastInfo(CIRGenFunction &CGF, - mlir::Location Loc, - QualType SrcRecordTy, - QualType DestRecordTy) { +static cir::DynamicCastInfoAttr emitDynamicCastInfo(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy) { auto srcRtti = mlir::cast( CGF.CGM.getAddrOfRTTIDescriptor(Loc, SrcRecordTy)); auto destRtti = mlir::cast( @@ -2584,26 +2585,26 @@ static cir::DynamicCastInfoAttr buildDynamicCastInfo(CIRGenFunction &CGF, badCastFuncRef, offsetHintAttr); } -mlir::Value CIRGenItaniumCXXABI::buildDynamicCast(CIRGenFunction &CGF, - mlir::Location Loc, - QualType SrcRecordTy, - QualType DestRecordTy, - cir::PointerType DestCIRTy, - bool isRefCast, Address Src) { +mlir::Value CIRGenItaniumCXXABI::emitDynamicCast(CIRGenFunction &CGF, + mlir::Location Loc, + QualType SrcRecordTy, + QualType DestRecordTy, + cir::PointerType DestCIRTy, + bool isRefCast, Address Src) { bool isCastToVoid = DestRecordTy.isNull(); assert((!isCastToVoid || !isRefCast) && "cannot cast to void reference"); if (isCastToVoid) - return buildDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src).getPointer(); + return emitDynamicCastToVoid(CGF, Loc, SrcRecordTy, Src).getPointer(); // If the destination is effectively final, the cast succeeds if and only // if the dynamic type of the pointer is exactly the destination type. if (DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() && CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) - return buildExactDynamicCast(*this, CGF, Loc, SrcRecordTy, DestRecordTy, - DestCIRTy, isRefCast, Src); + return emitExactDynamicCast(*this, CGF, Loc, SrcRecordTy, DestRecordTy, + DestCIRTy, isRefCast, Src); - auto castInfo = buildDynamicCastInfo(CGF, Loc, SrcRecordTy, DestRecordTy); + auto castInfo = emitDynamicCastInfo(CGF, Loc, SrcRecordTy, DestRecordTy); return CGF.getBuilder().createDynCast(Loc, Src.getPointer(), DestCIRTy, isRefCast, castInfo); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c8fecd3f20ee..a1b1e9293c48 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -474,7 +474,7 @@ void CIRGenModule::setDSOLocal(CIRGlobalValueInterface GV) const { GV.setDSOLocal(shouldAssumeDSOLocal(*this, GV)); } -void CIRGenModule::buildGlobal(GlobalDecl GD) { +void CIRGenModule::emitGlobal(GlobalDecl GD) { llvm::TimeTraceScope scope("build CIR Global", [&]() -> std::string { auto *ND = dyn_cast(GD.getDecl()); if (!ND) @@ -560,7 +560,7 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { // to benefit from cache locality. if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) { // Emit the definition if it can't be deferred. - buildGlobalDefinition(GD); + emitGlobalDefinition(GD); return; } @@ -587,8 +587,8 @@ void CIRGenModule::buildGlobal(GlobalDecl GD) { } } -void CIRGenModule::buildGlobalFunctionDefinition(GlobalDecl GD, - mlir::Operation *Op) { +void CIRGenModule::emitGlobalFunctionDefinition(GlobalDecl GD, + mlir::Operation *Op) { auto const *D = cast(GD.getDecl()); // Compute the function info and CIR type. @@ -1113,8 +1113,8 @@ void CIRGenModule::maybeHandleStaticInExternC(const SomeDecl *D, assert(0 && "not implemented"); } -void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, - bool IsTentative) { +void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative) { // TODO(cir): // OpenCL global variables of sampler type are translated to function calls, // therefore no need to be translated. @@ -1356,7 +1356,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, // Emit the initializer function if necessary. if (NeedsGlobalCtor || NeedsGlobalDtor) { globalOpContext = GV; - buildCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); + emitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); globalOpContext = nullptr; } @@ -1366,7 +1366,7 @@ void CIRGenModule::buildGlobalVarDefinition(const clang::VarDecl *D, assert(!cir::MissingFeatures::generateDebugInfo()); } -void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { +void CIRGenModule::emitGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { const auto *D = cast(GD.getDecl()); if (const auto *FD = dyn_cast(D)) { // At -O0, don't generate CIR for functions with available_externally @@ -1378,29 +1378,29 @@ void CIRGenModule::buildGlobalDefinition(GlobalDecl GD, mlir::Operation *Op) { // Make sure to emit the definition(s) before we emit the thunks. This is // necessary for the generation of certain thunks. if (isa(Method) || isa(Method)) - ABI->buildCXXStructor(GD); + ABI->emitCXXStructor(GD); else if (FD->isMultiVersion()) llvm_unreachable("NYI"); else - buildGlobalFunctionDefinition(GD, Op); + emitGlobalFunctionDefinition(GD, Op); if (Method->isVirtual()) - getVTables().buildThunks(GD); + getVTables().emitThunks(GD); return; } if (FD->isMultiVersion()) llvm_unreachable("NYI"); - buildGlobalFunctionDefinition(GD, Op); + emitGlobalFunctionDefinition(GD, Op); return; } if (const auto *VD = dyn_cast(D)) { - return buildGlobalVarDefinition(VD, !VD->hasDefinition()); + return emitGlobalVarDefinition(VD, !VD->hasDefinition()); } - llvm_unreachable("Invalid argument to buildGlobalDefinition()"); + llvm_unreachable("Invalid argument to emitGlobalDefinition()"); } mlir::Attribute @@ -1564,7 +1564,7 @@ CIRGenModule::getAddrOfConstantStringFromLiteral(const StringLiteral *S, return builder.getGlobalViewAttr(PtrTy, GV); } -void CIRGenModule::buildDeclContext(const DeclContext *DC) { +void CIRGenModule::emitDeclContext(const DeclContext *DC) { for (auto *I : DC->decls()) { // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope // are themselves considered "top-level", so EmitTopLevelDecl on an @@ -1574,17 +1574,17 @@ void CIRGenModule::buildDeclContext(const DeclContext *DC) { if (auto *OID = dyn_cast(I)) llvm_unreachable("NYI"); - buildTopLevelDecl(I); + emitTopLevelDecl(I); } } -void CIRGenModule::buildLinkageSpec(const LinkageSpecDecl *LSD) { +void CIRGenModule::emitLinkageSpec(const LinkageSpecDecl *LSD) { if (LSD->getLanguage() != LinkageSpecLanguageIDs::C && LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) { llvm_unreachable("unsupported linkage spec"); return; } - buildDeclContext(LSD); + emitDeclContext(LSD); } mlir::Operation * @@ -1704,7 +1704,7 @@ CIRGenModule::getAddrOfGlobalTemporary(const MaterializeTemporaryExpr *expr, } // Emit code for a single top level declaration. -void CIRGenModule::buildTopLevelDecl(Decl *decl) { +void CIRGenModule::emitTopLevelDecl(Decl *decl) { // Ignore dependent declarations if (decl->isTemplated()) return; @@ -1716,7 +1716,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { switch (decl->getKind()) { default: - llvm::errs() << "buildTopLevelDecl codegen for decl kind '" + llvm::errs() << "emitTopLevelDecl codegen for decl kind '" << decl->getDeclKindName() << "' not implemented\n"; assert(false && "Not yet implemented"); @@ -1727,13 +1727,13 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { for (DeclContext::decl_iterator D = TU->decls_begin(), DEnd = TU->decls_end(); D != DEnd; ++D) - buildTopLevelDecl(*D); + emitTopLevelDecl(*D); return; } case Decl::Var: case Decl::Decomposition: case Decl::VarTemplateSpecialization: - buildGlobal(cast(decl)); + emitGlobal(cast(decl)); assert(!isa(decl) && "not implemented"); // if (auto *DD = dyn_cast(decl)) // for (auto *B : DD->bindings()) @@ -1744,12 +1744,12 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { case Decl::CXXConversion: case Decl::CXXMethod: case Decl::Function: - buildGlobal(cast(decl)); + emitGlobal(cast(decl)); assert(!codeGenOpts.CoverageMapping && "Coverage Mapping NYI"); break; // C++ Decls case Decl::Namespace: - buildDeclContext(cast(decl)); + emitDeclContext(cast(decl)); break; case Decl::ClassTemplateSpecialization: { // const auto *Spec = cast(decl); @@ -1761,7 +1761,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { // TODO: Handle debug info as CodeGenModule.cpp does for (auto *childDecl : crd->decls()) if (isa(childDecl) || isa(childDecl)) - buildTopLevelDecl(childDecl); + emitTopLevelDecl(childDecl); break; } // No code generation needed. @@ -1783,10 +1783,10 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); break; case Decl::CXXConstructor: - getCXXABI().buildCXXConstructors(cast(decl)); + getCXXABI().emitCXXConstructors(cast(decl)); break; case Decl::CXXDestructor: - getCXXABI().buildCXXDestructors(cast(decl)); + getCXXABI().emitCXXDestructors(cast(decl)); break; case Decl::StaticAssert: @@ -1794,7 +1794,7 @@ void CIRGenModule::buildTopLevelDecl(Decl *decl) { break; case Decl::LinkageSpec: - buildLinkageSpec(cast(decl)); + emitLinkageSpec(cast(decl)); break; case Decl::Typedef: @@ -2119,10 +2119,10 @@ cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl GD) { return getCIRLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false); } -void CIRGenModule::buildAliasForGlobal(StringRef mangledName, - mlir::Operation *op, GlobalDecl aliasGD, - cir::FuncOp aliasee, - cir::GlobalLinkageKind linkage) { +void CIRGenModule::emitAliasForGlobal(StringRef mangledName, + mlir::Operation *op, GlobalDecl aliasGD, + cir::FuncOp aliasee, + cir::GlobalLinkageKind linkage) { auto *aliasFD = dyn_cast(aliasGD.getDecl()); assert(aliasFD && "expected FunctionDecl"); @@ -2303,7 +2303,7 @@ StringRef CIRGenModule::getMangledName(GlobalDecl GD) { return MangledDeclNames[CanonicalGD] = Result.first->first(); } -void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { +void CIRGenModule::emitTentativeDefinition(const VarDecl *D) { assert(!D->getInit() && "Cannot emit definite definitions here!"); StringRef MangledName = getMangledName(D); @@ -2331,7 +2331,7 @@ void CIRGenModule::buildTentativeDefinition(const VarDecl *D) { } // The tentative definition is the only definition. - buildGlobalVarDefinition(D); + emitGlobalVarDefinition(D); } void CIRGenModule::setGlobalVisibility(mlir::Operation *GV, @@ -2817,7 +2817,7 @@ mlir::Location CIRGenModule::getLoc(mlir::Location lhs, mlir::Location rhs) { return mlir::FusedLoc::get(locs, metadata, &getMLIRContext()); } -void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { +void CIRGenModule::emitGlobalDecl(clang::GlobalDecl &D) { // We should call GetAddrOfGlobal with IsForDefinition set to true in order // to get a Value with exactly the type we need, not something that might // have been created for another decl with the same mangled name but @@ -2865,10 +2865,10 @@ void CIRGenModule::buildGlobalDecl(clang::GlobalDecl &D) { return; // Otherwise, emit the definition and move on to the next one. - buildGlobalDefinition(D, Op); + emitGlobalDefinition(D, Op); } -void CIRGenModule::buildDeferred(unsigned recursionLimit) { +void CIRGenModule::emitDeferred(unsigned recursionLimit) { // Emit deferred declare target declarations if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd) getOpenMPRuntime().emitDeferredTargetDecls(); @@ -2878,7 +2878,7 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { // static function, iterate until no changes are made. if (!DeferredVTables.empty()) { - buildDeferredVTables(); + emitDeferredVTables(); // Emitting a vtable doesn't directly cause more vtables to // become deferred, although it can cause functions to be @@ -2897,7 +2897,7 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { if (DeferredDeclsToEmit.empty()) return; - // Grab the list of decls to emit. If buildGlobalDefinition schedules more + // Grab the list of decls to emit. If emitGlobalDefinition schedules more // work, it will not interfere with this. std::vector CurDeclsToEmit; CurDeclsToEmit.swap(DeferredDeclsToEmit); @@ -2913,23 +2913,23 @@ void CIRGenModule::buildDeferred(unsigned recursionLimit) { continue; } - buildGlobalDecl(D); + emitGlobalDecl(D); // If we found out that we need to emit more decls, do that recursively. // This has the advantage that the decls are emitted in a DFS and related // ones are close together, which is convenient for testing. if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { - buildDeferred(recursionLimit); + emitDeferred(recursionLimit); assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); } } } -void CIRGenModule::buildDefaultMethods() { +void CIRGenModule::emitDefaultMethods() { // Differently from DeferredDeclsToEmit, there's no recurrent use of // DefaultMethodsToEmit, so use it directly for emission. for (auto &D : DefaultMethodsToEmit) - buildGlobalDecl(D); + emitGlobalDecl(D); } mlir::IntegerAttr CIRGenModule::getSize(CharUnits size) { @@ -2964,32 +2964,32 @@ CIRGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) { } void CIRGenModule::Release() { - buildDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); - // TODO: buildVTablesOpportunistically(); + emitDeferred(getCodeGenOpts().ClangIRBuildDeferredThreshold); + // TODO: emitVTablesOpportunistically(); // TODO: applyGlobalValReplacements(); applyReplacements(); // TODO: checkAliases(); - // TODO: buildMultiVersionFunctions(); - buildCXXGlobalInitFunc(); - // TODO: buildCXXGlobalCleanUpFunc(); + // TODO: emitMultiVersionFunctions(); + emitCXXGlobalInitFunc(); + // TODO: emitCXXGlobalCleanUpFunc(); // TODO: registerGlobalDtorsWithAtExit(); - // TODO: buildCXXThreadLocalInitFunc(); + // TODO: emitCXXThreadLocalInitFunc(); // TODO: ObjCRuntime if (astCtx.getLangOpts().CUDA) { llvm_unreachable("NYI"); } // TODO: OpenMPRuntime // TODO: PGOReader - // TODO: buildCtorList(GlobalCtors); + // TODO: emitCtorList(GlobalCtors); // TODO: builtCtorList(GlobalDtors); - buildGlobalAnnotations(); - // TODO: buildDeferredUnusedCoverageMappings(); + emitGlobalAnnotations(); + // TODO: emitDeferredUnusedCoverageMappings(); // TODO: CIRGenPGO // TODO: CoverageMapping if (getCodeGenOpts().SanitizeCfiCrossDso) { llvm_unreachable("NYI"); } - // TODO: buildAtAvailableLinkGuard(); + // TODO: emitAtAvailableLinkGuard(); if (astCtx.getTargetInfo().getTriple().isWasm() && !astCtx.getTargetInfo().getTriple().isOSEmscripten()) { llvm_unreachable("NYI"); @@ -3001,18 +3001,18 @@ void CIRGenModule::Release() { llvm_unreachable("NYI"); } - // TODO: buildLLVMUsed(); + // TODO: emitLLVMUsed(); // TODO: SanStats if (getCodeGenOpts().Autolink) { - // TODO: buildModuleLinkOptions + // TODO: emitModuleLinkOptions } // Emit OpenCL specific module metadata: OpenCL/SPIR version. if (langOpts.CUDAIsDevice && getTriple().isSPIRV()) llvm_unreachable("CUDA SPIR-V NYI"); if (langOpts.OpenCL) { - buildOpenCLMetadata(); + emitOpenCLMetadata(); // Emit SPIR version. if (getTriple().isSPIR()) llvm_unreachable("SPIR target NYI"); @@ -3205,8 +3205,8 @@ void CIRGenModule::applyReplacements() { } } -void CIRGenModule::buildExplicitCastExprType(const ExplicitCastExpr *E, - CIRGenFunction *CGF) { +void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF) { // Bind VLAs in the cast type. if (CGF && E->getType()->isVariablyModifiedType()) llvm_unreachable("NYI"); @@ -3226,7 +3226,7 @@ void CIRGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) { llvm_unreachable("NYI"); } - buildTopLevelDecl(VD); + emitTopLevelDecl(VD); } cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( @@ -3283,9 +3283,9 @@ bool CIRGenModule::shouldOpportunisticallyEmitVTables() { return codeGenOpts.OptimizationLevel > 0; } -void CIRGenModule::buildVTableTypeMetadata(const CXXRecordDecl *RD, - cir::GlobalOp VTable, - const VTableLayout &VTLayout) { +void CIRGenModule::emitVTableTypeMetadata(const CXXRecordDecl *RD, + cir::GlobalOp VTable, + const VTableLayout &VTLayout) { if (!getCodeGenOpts().LTOUnit) return; llvm_unreachable("NYI"); @@ -3440,7 +3440,7 @@ LangAS CIRGenModule::getGlobalVarAddressSpace(const VarDecl *D) { return getTargetCIRGenInfo().getGlobalVarAddressSpace(*this, D); } -mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { +mlir::ArrayAttr CIRGenModule::emitAnnotationArgs(const AnnotateAttr *attr) { ArrayRef exprs = {attr->args_begin(), attr->args_size()}; if (exprs.empty()) { return mlir::ArrayAttr::get(&getMLIRContext(), {}); @@ -3482,9 +3482,9 @@ mlir::ArrayAttr CIRGenModule::buildAnnotationArgs(const AnnotateAttr *attr) { } cir::AnnotationAttr -CIRGenModule::buildAnnotateAttr(const clang::AnnotateAttr *aa) { +CIRGenModule::emitAnnotateAttr(const clang::AnnotateAttr *aa) { mlir::StringAttr annoGV = builder.getStringAttr(aa->getAnnotation()); - mlir::ArrayAttr args = buildAnnotationArgs(aa); + mlir::ArrayAttr args = emitAnnotationArgs(aa); return cir::AnnotationAttr::get(&getMLIRContext(), annoGV, args); } @@ -3495,14 +3495,14 @@ void CIRGenModule::addGlobalAnnotations(const ValueDecl *d, "annotation only on globals"); llvm::SmallVector annotations; for (auto *i : d->specific_attrs()) - annotations.push_back(buildAnnotateAttr(i)); + annotations.push_back(emitAnnotateAttr(i)); if (auto global = dyn_cast(gv)) global.setAnnotationsAttr(builder.getArrayAttr(annotations)); else if (auto func = dyn_cast(gv)) func.setAnnotationsAttr(builder.getArrayAttr(annotations)); } -void CIRGenModule::buildGlobalAnnotations() { +void CIRGenModule::emitGlobalAnnotations() { for (const auto &[mangledName, vd] : deferredAnnotations) { mlir::Operation *gv = getGlobalValue(mangledName); if (gv) diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 77abb80bbc77..961a999990b6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -112,7 +112,7 @@ class CIRGenModule : public CIRGenTypeCache { /// Holds the OpenMP runtime std::unique_ptr openMPRuntime; - /// Per-function codegen information. Updated everytime buildCIR is called + /// Per-function codegen information. Updated everytime emitCIR is called /// for FunctionDecls's. CIRGenFunction *CurCGF = nullptr; @@ -182,7 +182,7 @@ class CIRGenModule : public CIRGenTypeCache { std::vector CXXGlobalInits; /// Emit the function that initializes C++ globals. - void buildCXXGlobalInitFunc(); + void emitCXXGlobalInitFunc(); /// Track whether the CIRGenModule is currently building an initializer /// for a global (e.g. as opposed to a regular cir.func). @@ -340,10 +340,10 @@ class CIRGenModule : public CIRGenTypeCache { cir::GlobalLinkageKind Linkage, clang::CharUnits Alignment); /// Emit any vtables which we deferred and still have a use for. - void buildDeferredVTables(); + void emitDeferredVTables(); bool shouldOpportunisticallyEmitVTables(); - void buildVTable(CXXRecordDecl *rd); + void emitVTable(CXXRecordDecl *rd); void setDSOLocal(cir::CIRGlobalValueInterface GV) const; @@ -352,8 +352,8 @@ class CIRGenModule : public CIRGenTypeCache { cir::GlobalLinkageKind getVTableLinkage(const CXXRecordDecl *RD); /// Emit type metadata for the given vtable using the given layout. - void buildVTableTypeMetadata(const CXXRecordDecl *RD, cir::GlobalOp VTable, - const VTableLayout &VTLayout); + void emitVTableTypeMetadata(const CXXRecordDecl *RD, cir::GlobalOp VTable, + const VTableLayout &VTLayout); /// Get the address of the RTTI descriptor for the given type. mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType Ty, @@ -536,18 +536,18 @@ class CIRGenModule : public CIRGenTypeCache { cir::FuncType FnType = nullptr, bool Dontdefer = false, ForDefinition_t IsForDefinition = NotForDefinition); - void buildTopLevelDecl(clang::Decl *decl); - void buildLinkageSpec(const LinkageSpecDecl *D); + void emitTopLevelDecl(clang::Decl *decl); + void emitLinkageSpec(const LinkageSpecDecl *D); /// Emit code for a single global function or var decl. Forward declarations /// are emitted lazily. - void buildGlobal(clang::GlobalDecl D); + void emitGlobal(clang::GlobalDecl D); bool tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); - void buildAliasForGlobal(llvm::StringRef mangledName, mlir::Operation *op, - GlobalDecl aliasGD, cir::FuncOp aliasee, - cir::GlobalLinkageKind linkage); + void emitAliasForGlobal(llvm::StringRef mangledName, mlir::Operation *op, + GlobalDecl aliasGD, cir::FuncOp aliasee, + cir::GlobalLinkageKind linkage); mlir::Type getCIRType(const clang::QualType &type); @@ -614,22 +614,22 @@ class CIRGenModule : public CIRGenTypeCache { } // C++ related functions. - void buildDeclContext(const DeclContext *DC); + void emitDeclContext(const DeclContext *DC); /// Return the result of value-initializing the given type, i.e. a null /// expression of the given type. This is usually, but not always, an LLVM /// null constant. - mlir::Value buildNullConstant(QualType T, mlir::Location loc); + mlir::Value emitNullConstant(QualType T, mlir::Location loc); /// Return a null constant appropriate for zero-initializing a base class with /// the given type. This is usually, but not always, an LLVM null constant. - mlir::TypedAttr buildNullConstantForBase(const CXXRecordDecl *Record); + mlir::TypedAttr emitNullConstantForBase(const CXXRecordDecl *Record); - mlir::Value buildMemberPointerConstant(const UnaryOperator *E); + mlir::Value emitMemberPointerConstant(const UnaryOperator *E); llvm::StringRef getMangledName(clang::GlobalDecl GD); - void buildTentativeDefinition(const VarDecl *D); + void emitTentativeDefinition(const VarDecl *D); // Make sure that this type is translated. void UpdateCompletedType(const clang::TagDecl *TD); @@ -647,18 +647,17 @@ class CIRGenModule : public CIRGenTypeCache { void setCIRFunctionAttributesForDefinition(const Decl *decl, cir::FuncOp func); - void buildGlobalDefinition(clang::GlobalDecl D, - mlir::Operation *Op = nullptr); - void buildGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); - void buildGlobalVarDefinition(const clang::VarDecl *D, - bool IsTentative = false); + void emitGlobalDefinition(clang::GlobalDecl D, mlir::Operation *Op = nullptr); + void emitGlobalFunctionDefinition(clang::GlobalDecl D, mlir::Operation *Op); + void emitGlobalVarDefinition(const clang::VarDecl *D, + bool IsTentative = false); /// Emit the function that initializes the specified global - void buildCXXGlobalVarDeclInit(const VarDecl *varDecl, cir::GlobalOp addr, - bool performInit); + void emitCXXGlobalVarDeclInit(const VarDecl *varDecl, cir::GlobalOp addr, + bool performInit); - void buildCXXGlobalVarDeclInitFunc(const VarDecl *D, cir::GlobalOp Addr, - bool PerformInit); + void emitCXXGlobalVarDeclInitFunc(const VarDecl *D, cir::GlobalOp Addr, + bool PerformInit); void addDeferredVTable(const CXXRecordDecl *RD) { DeferredVTables.push_back(RD); @@ -671,13 +670,13 @@ class CIRGenModule : public CIRGenTypeCache { std::nullptr_t getModuleDebugInfo() { return nullptr; } /// Emit any needed decls for which code generation was deferred. - void buildDeferred(unsigned recursionLimit); + void emitDeferred(unsigned recursionLimit); - /// Helper for `buildDeferred` to apply actual codegen. - void buildGlobalDecl(clang::GlobalDecl &D); + /// Helper for `emitDeferred` to apply actual codegen. + void emitGlobalDecl(clang::GlobalDecl &D); /// Build default methods not emitted before this point. - void buildDefaultMethods(); + void emitDefaultMethods(); const llvm::Triple &getTriple() const { return target.getTriple(); } @@ -759,8 +758,8 @@ class CIRGenModule : public CIRGenTypeCache { /// Emit type info if type of an expression is a variably modified /// type. Also emit proper debug info for cast types. - void buildExplicitCastExprType(const ExplicitCastExpr *E, - CIRGenFunction *CGF = nullptr); + void emitExplicitCastExprType(const ExplicitCastExpr *E, + CIRGenFunction *CGF = nullptr); static constexpr const char *builtinCoroId = "__builtin_coro_id"; static constexpr const char *builtinCoroAlloc = "__builtin_coro_alloc"; @@ -810,13 +809,13 @@ class CIRGenModule : public CIRGenTypeCache { CIRGenFunction *CGF = nullptr); /// Emits OpenCL specific Metadata e.g. OpenCL version. - void buildOpenCLMetadata(); + void emitOpenCLMetadata(); /// Create cir::AnnotationAttr which contains the annotation /// information for a given GlobalValue. Notice that a GlobalValue could /// have multiple annotations, and this function creates attribute for /// one of them. - cir::AnnotationAttr buildAnnotateAttr(const clang::AnnotateAttr *aa); + cir::AnnotationAttr emitAnnotateAttr(const clang::AnnotateAttr *aa); private: // An ordered map of canonical GlobalDecls to their mangled names. @@ -836,10 +835,10 @@ class CIRGenModule : public CIRGenTypeCache { /// Emit all the global annotations. /// This actually only emits annotations for deffered declarations of /// functions, because global variables need no deffred emission. - void buildGlobalAnnotations(); + void emitGlobalAnnotations(); /// Emit additional args of the annotation. - mlir::ArrayAttr buildAnnotationArgs(const clang::AnnotateAttr *attr); + mlir::ArrayAttr emitAnnotationArgs(const clang::AnnotateAttr *attr); /// Add global annotations for a global value. /// Those annotations are emitted during lowering to the LLVM code. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp index 6247cf6b5c2a..d11126940935 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCL.cpp @@ -186,8 +186,8 @@ void CIRGenModule::genKernelArgMetadata(cir::FuncOp Fn, const FunctionDecl *FD, } } -void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, - cir::FuncOp Fn) { +void CIRGenFunction::emitKernelMetadata(const FunctionDecl *FD, + cir::FuncOp Fn) { if (!FD->hasAttr() && !FD->hasAttr()) return; @@ -249,7 +249,7 @@ void CIRGenFunction::buildKernelMetadata(const FunctionDecl *FD, &getMLIRContext(), attrs.getDictionary(&getMLIRContext()))); } -void CIRGenModule::buildOpenCLMetadata() { +void CIRGenModule::emitOpenCLMetadata() { // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the // opencl.ocl.version named metadata node. // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL. diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp index 34207f74089b..4f4433b49bea 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.cpp @@ -22,7 +22,7 @@ using namespace clang::CIRGen; CIRGenOpenCLRuntime::~CIRGenOpenCLRuntime() {} -void CIRGenOpenCLRuntime::buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, - const VarDecl &D) { - return CGF.buildStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); +void CIRGenOpenCLRuntime::emitWorkGroupLocalVarDecl(CIRGenFunction &CGF, + const VarDecl &D) { + return CGF.emitStaticVarDecl(D, cir::GlobalLinkageKind::InternalLinkage); } diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h index f08ed0bf31e8..252a810f2061 100644 --- a/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h +++ b/clang/lib/CIR/CodeGen/CIRGenOpenCLRuntime.h @@ -37,8 +37,8 @@ class CIRGenOpenCLRuntime { /// Emit the IR required for a work-group-local variable declaration, and add /// an entry to CGF's LocalDeclMap for D. The base class does this using /// CIRGenFunction::EmitStaticVarDecl to emit an internal global for D. - virtual void buildWorkGroupLocalVarDecl(CIRGenFunction &CGF, - const clang::VarDecl &D); + virtual void emitWorkGroupLocalVarDecl(CIRGenFunction &CGF, + const clang::VarDecl &D); }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 1a29affa0df3..8dff466cecd8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -24,9 +24,9 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; -Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, - bool getLast, - AggValueSlot slot) { +Address CIRGenFunction::emitCompoundStmtWithoutScope(const CompoundStmt &S, + bool getLast, + AggValueSlot slot) { const Stmt *ExprResult = S.getStmtExprResult(); assert((!getLast || (getLast && ExprResult)) && "If getLast is true then the CompoundStmt must have a StmtExprResult"); @@ -47,17 +47,17 @@ Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, const Expr *E = cast(ExprResult); QualType exprTy = E->getType(); if (hasAggregateEvaluationKind(exprTy)) { - buildAggExpr(E, slot); + emitAggExpr(E, slot); } else { // We can't return an RValue here because there might be cleanups at // the end of the StmtExpr. Because of that, we have to emit the result // here into a temporary alloca. retAlloca = CreateMemTemp(exprTy, getLoc(E->getSourceRange())); - buildAnyExprToMem(E, retAlloca, Qualifiers(), - /*IsInit*/ false); + emitAnyExprToMem(E, retAlloca, Qualifiers(), + /*IsInit*/ false); } } else { - if (buildStmt(CurStmt, /*useCurrentScope=*/false).failed()) + if (emitStmt(CurStmt, /*useCurrentScope=*/false).failed()) llvm_unreachable("failed to build statement"); } } @@ -65,8 +65,8 @@ Address CIRGenFunction::buildCompoundStmtWithoutScope(const CompoundStmt &S, return retAlloca; } -Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, - AggValueSlot slot) { +Address CIRGenFunction::emitCompoundStmt(const CompoundStmt &S, bool getLast, + AggValueSlot slot) { Address retAlloca = Address::invalid(); // Add local scope to track new declared variables. @@ -76,22 +76,22 @@ Address CIRGenFunction::buildCompoundStmt(const CompoundStmt &S, bool getLast, scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - retAlloca = buildCompoundStmtWithoutScope(S, getLast, slot); + retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); }); return retAlloca; } -void CIRGenFunction::buildStopPoint(const Stmt *S) { +void CIRGenFunction::emitStopPoint(const Stmt *S) { assert(!cir::MissingFeatures::generateDebugInfo()); } // Build CIR for a statement. useCurrentScope should be true if no // new scopes need be created when finding a compound statement. -mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, - bool useCurrentScope, - ArrayRef Attrs) { - if (mlir::succeeded(buildSimpleStmt(S, useCurrentScope))) +mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *S, + bool useCurrentScope, + ArrayRef Attrs) { + if (mlir::succeeded(emitSimpleStmt(S, useCurrentScope))) return mlir::success(); if (getContext().getLangOpts().OpenMP && @@ -132,7 +132,7 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, mlir::Block *incoming = builder.getInsertionBlock(); assert(incoming && "expression emission must have an insertion point"); - buildIgnoredExpr(cast(S)); + emitIgnoredExpr(cast(S)); mlir::Block *outgoing = builder.getInsertionBlock(); assert(outgoing && "expression emission cleared block!"); @@ -141,52 +141,52 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, } case Stmt::IfStmtClass: - if (buildIfStmt(cast(*S)).failed()) + if (emitIfStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::SwitchStmtClass: - if (buildSwitchStmt(cast(*S)).failed()) + if (emitSwitchStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::ForStmtClass: - if (buildForStmt(cast(*S)).failed()) + if (emitForStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::WhileStmtClass: - if (buildWhileStmt(cast(*S)).failed()) + if (emitWhileStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::DoStmtClass: - if (buildDoStmt(cast(*S)).failed()) + if (emitDoStmt(cast(*S)).failed()) return mlir::failure(); break; case Stmt::CoroutineBodyStmtClass: - return buildCoroutineBody(cast(*S)); + return emitCoroutineBody(cast(*S)); case Stmt::CoreturnStmtClass: - return buildCoreturnStmt(cast(*S)); + return emitCoreturnStmt(cast(*S)); case Stmt::CXXTryStmtClass: - return buildCXXTryStmt(cast(*S)); + return emitCXXTryStmt(cast(*S)); case Stmt::CXXForRangeStmtClass: - return buildCXXForRangeStmt(cast(*S), Attrs); + return emitCXXForRangeStmt(cast(*S), Attrs); case Stmt::IndirectGotoStmtClass: case Stmt::ReturnStmtClass: // When implemented, GCCAsmStmtClass should fall-through to MSAsmStmtClass. case Stmt::GCCAsmStmtClass: case Stmt::MSAsmStmtClass: - return buildAsmStmt(cast(*S)); + return emitAsmStmt(cast(*S)); // OMP directives: case Stmt::OMPParallelDirectiveClass: - return buildOMPParallelDirective(cast(*S)); + return emitOMPParallelDirective(cast(*S)); case Stmt::OMPTaskwaitDirectiveClass: - return buildOMPTaskwaitDirective(cast(*S)); + return emitOMPTaskwaitDirective(cast(*S)); case Stmt::OMPTaskyieldDirectiveClass: - return buildOMPTaskyieldDirective(cast(*S)); + return emitOMPTaskyieldDirective(cast(*S)); case Stmt::OMPBarrierDirectiveClass: - return buildOMPBarrierDirective(cast(*S)); + return emitOMPBarrierDirective(cast(*S)); // Unsupported AST nodes: case Stmt::CapturedStmtClass: case Stmt::ObjCAtTryStmtClass: @@ -281,41 +281,41 @@ mlir::LogicalResult CIRGenFunction::buildStmt(const Stmt *S, return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, - bool useCurrentScope) { +mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *S, + bool useCurrentScope) { switch (S->getStmtClass()) { default: return mlir::failure(); case Stmt::DeclStmtClass: - return buildDeclStmt(cast(*S)); + return emitDeclStmt(cast(*S)); case Stmt::CompoundStmtClass: - useCurrentScope ? buildCompoundStmtWithoutScope(cast(*S)) - : buildCompoundStmt(cast(*S)); + useCurrentScope ? emitCompoundStmtWithoutScope(cast(*S)) + : emitCompoundStmt(cast(*S)); break; case Stmt::ReturnStmtClass: - return buildReturnStmt(cast(*S)); + return emitReturnStmt(cast(*S)); case Stmt::GotoStmtClass: - return buildGotoStmt(cast(*S)); + return emitGotoStmt(cast(*S)); case Stmt::ContinueStmtClass: - return buildContinueStmt(cast(*S)); + return emitContinueStmt(cast(*S)); case Stmt::NullStmtClass: break; case Stmt::LabelStmtClass: - return buildLabelStmt(cast(*S)); + return emitLabelStmt(cast(*S)); case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: // If we reached here, we must not handling a switch case in the top level. - return buildSwitchCase(cast(*S), - /*buildingTopLevelCase=*/false); + return emitSwitchCase(cast(*S), + /*buildingTopLevelCase=*/false); break; case Stmt::BreakStmtClass: - return buildBreakStmt(cast(*S)); + return emitBreakStmt(cast(*S)); case Stmt::AttributedStmtClass: - return buildAttributedStmt(cast(*S)); + return emitAttributedStmt(cast(*S)); case Stmt::SEHLeaveStmtClass: llvm::errs() << "CIR codegen for '" << S->getStmtClassName() @@ -326,18 +326,18 @@ mlir::LogicalResult CIRGenFunction::buildSimpleStmt(const Stmt *S, return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildLabelStmt(const clang::LabelStmt &S) { - if (buildLabel(S.getDecl()).failed()) +mlir::LogicalResult CIRGenFunction::emitLabelStmt(const clang::LabelStmt &S) { + if (emitLabel(S.getDecl()).failed()) return mlir::failure(); // IsEHa: not implemented. assert(!(getContext().getLangOpts().EHAsynch && S.isSideEntry())); - return buildStmt(S.getSubStmt(), /* useCurrentScope */ true); + return emitStmt(S.getSubStmt(), /* useCurrentScope */ true); } mlir::LogicalResult -CIRGenFunction::buildAttributedStmt(const AttributedStmt &S) { +CIRGenFunction::emitAttributedStmt(const AttributedStmt &S) { for (const auto *A : S.getAttrs()) { switch (A->getKind()) { case attr::NoMerge: @@ -350,7 +350,7 @@ CIRGenFunction::buildAttributedStmt(const AttributedStmt &S) { } } - return buildStmt(S.getSubStmt(), true, S.getAttrs()); + return emitStmt(S.getSubStmt(), true, S.getAttrs()); } // Add terminating yield on body regions (loops, ...) in case there are @@ -382,7 +382,7 @@ static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, b->erase(); } -mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { +mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &S) { mlir::LogicalResult res = mlir::success(); // The else branch of a consteval if statement is always the only branch // that can be runtime evaluated. @@ -398,14 +398,14 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // compares unequal to 0. The condition must be a scalar type. auto ifStmtBuilder = [&]() -> mlir::LogicalResult { if (S.isConsteval()) - return buildStmt(ConstevalExecuted, /*useCurrentScope=*/true); + return emitStmt(ConstevalExecuted, /*useCurrentScope=*/true); if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); // During LLVM codegen, if the condition constant folds and can be elided, // it tries to avoid emitting the condition and the dead arm of the if/else. @@ -420,7 +420,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { // in this lambda like in Clang but postponed to other MLIR // passes. if (const Stmt *Executed = CondConstant ? S.getThen() : S.getElse()) - return buildStmt(Executed, /*useCurrentScope=*/true); + return emitStmt(Executed, /*useCurrentScope=*/true); // There is nothing to execute at runtime. // TODO(cir): there is still an empty cir.scope generated by the caller. return mlir::success(); @@ -430,7 +430,7 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic()); assert(!cir::MissingFeatures::incrementProfileCounter()); - return buildIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); + return emitIfOnBoolExpr(S.getCond(), S.getThen(), S.getElse()); }; // TODO: Add a new scoped symbol table. @@ -447,20 +447,20 @@ mlir::LogicalResult CIRGenFunction::buildIfStmt(const IfStmt &S) { return res; } -mlir::LogicalResult CIRGenFunction::buildDeclStmt(const DeclStmt &S) { +mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &S) { if (!builder.getInsertionBlock()) { CGM.emitError("Seems like this is unreachable code, what should we do?"); return mlir::failure(); } for (const auto *I : S.decls()) { - buildDecl(*I); + emitDecl(*I); } return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { +mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { assert(!cir::MissingFeatures::requiresReturnValueCheck()); auto loc = getLoc(S.getSourceRange()); @@ -492,29 +492,29 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { // Make sure not to return anything, but evaluate the expression // for side effects. if (RV) { - buildAnyExpr(RV); + emitAnyExpr(RV); } } else if (!RV) { // Do nothing (return value is left uninitialized) } else if (FnRetTy->isReferenceType()) { // If this function returns a reference, take the address of the // expression rather than the value. - RValue Result = buildReferenceBindingToExpr(RV); + RValue Result = emitReferenceBindingToExpr(RV); builder.createStore(loc, Result.getScalarVal(), ReturnValue); } else { mlir::Value V = nullptr; switch (CIRGenFunction::getEvaluationKind(RV->getType())) { case cir::TEK_Scalar: - V = buildScalarExpr(RV); + V = emitScalarExpr(RV); builder.CIRBaseBuilderTy::createStore(loc, V, *FnRetAlloca); break; case cir::TEK_Complex: - buildComplexExprIntoLValue(RV, - makeAddrLValue(ReturnValue, RV->getType()), - /*isInit*/ true); + emitComplexExprIntoLValue(RV, + makeAddrLValue(ReturnValue, RV->getType()), + /*isInit*/ true); break; case cir::TEK_Aggregate: - buildAggExpr( + emitAggExpr( RV, AggValueSlot::forAddr( ReturnValue, Qualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, @@ -559,7 +559,7 @@ mlir::LogicalResult CIRGenFunction::buildReturnStmt(const ReturnStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { +mlir::LogicalResult CIRGenFunction::emitGotoStmt(const GotoStmt &S) { // FIXME: LLVM codegen inserts emit stop point here for debug info // sake when the insertion point is available, but doesn't do // anything special when there isn't. We haven't implemented debug @@ -570,7 +570,7 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { S.getLabel()->getName()); // A goto marks the end of a block, create a new one for codegen after - // buildGotoStmt can resume building in that block. + // emitGotoStmt can resume building in that block. // Insert the new block to continue codegen after goto. builder.createBlock(builder.getBlock()->getParent()); @@ -578,7 +578,7 @@ mlir::LogicalResult CIRGenFunction::buildGotoStmt(const GotoStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { +mlir::LogicalResult CIRGenFunction::emitLabel(const LabelDecl *D) { // Create a new block to tag with a label and add a branch from // the current one to it. If the block is empty just call attach it // to this label. @@ -601,7 +601,7 @@ mlir::LogicalResult CIRGenFunction::buildLabel(const LabelDecl *D) { } mlir::LogicalResult -CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { +CIRGenFunction::emitContinueStmt(const clang::ContinueStmt &S) { builder.createContinue(getLoc(S.getContinueLoc())); // Insert the new block to continue codegen after the continue statement. @@ -610,7 +610,7 @@ CIRGenFunction::buildContinueStmt(const clang::ContinueStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildBreakStmt(const clang::BreakStmt &S) { +mlir::LogicalResult CIRGenFunction::emitBreakStmt(const clang::BreakStmt &S) { builder.createBreak(getLoc(S.getBreakLoc())); // Insert the new block to continue codegen after the break statement. @@ -667,9 +667,9 @@ const CaseStmt *CIRGenFunction::foldCaseStmt(const clang::CaseStmt &S, template mlir::LogicalResult -CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, - mlir::ArrayAttr value, CaseOpKind kind, - bool buildingTopLevelCase) { +CIRGenFunction::emitCaseDefaultCascade(const T *stmt, mlir::Type condType, + mlir::ArrayAttr value, CaseOpKind kind, + bool buildingTopLevelCase) { assert((isa(stmt)) && "only case or default stmt go here"); @@ -696,7 +696,7 @@ CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, subStmtKind = SubStmtKind::Case; builder.createYield(loc); } else - result = buildStmt(sub, /*useCurrentScope=*/!isa(sub)); + result = emitStmt(sub, /*useCurrentScope=*/!isa(sub)); insertPoint = builder.saveInsertionPoint(); } @@ -734,11 +734,10 @@ CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, // We don't need to revert this if we find the current switch can't be in // simple form later since the conversion itself should be harmless. if (subStmtKind == SubStmtKind::Case) - result = - buildCaseStmt(*cast(sub), condType, buildingTopLevelCase); + result = emitCaseStmt(*cast(sub), condType, buildingTopLevelCase); else if (subStmtKind == SubStmtKind::Default) - result = buildDefaultStmt(*cast(sub), condType, - buildingTopLevelCase); + result = emitDefaultStmt(*cast(sub), condType, + buildingTopLevelCase); else if (buildingTopLevelCase) // If we're building a top level case, try to restore the insert point to // the case we're building, then we can attach more random stmts to the @@ -748,43 +747,42 @@ CIRGenFunction::buildCaseDefaultCascade(const T *stmt, mlir::Type condType, return result; } -mlir::LogicalResult CIRGenFunction::buildCaseStmt(const CaseStmt &S, - mlir::Type condType, - bool buildingTopLevelCase) { +mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &S, + mlir::Type condType, + bool buildingTopLevelCase) { mlir::ArrayAttr value; CaseOpKind kind; auto *caseStmt = foldCaseStmt(S, condType, value, kind); - return buildCaseDefaultCascade(caseStmt, condType, value, kind, - buildingTopLevelCase); + return emitCaseDefaultCascade(caseStmt, condType, value, kind, + buildingTopLevelCase); } -mlir::LogicalResult -CIRGenFunction::buildDefaultStmt(const DefaultStmt &S, mlir::Type condType, - bool buildingTopLevelCase) { - return buildCaseDefaultCascade(&S, condType, builder.getArrayAttr({}), - cir::CaseOpKind::Default, - buildingTopLevelCase); +mlir::LogicalResult CIRGenFunction::emitDefaultStmt(const DefaultStmt &S, + mlir::Type condType, + bool buildingTopLevelCase) { + return emitCaseDefaultCascade(&S, condType, builder.getArrayAttr({}), + cir::CaseOpKind::Default, buildingTopLevelCase); } -mlir::LogicalResult CIRGenFunction::buildSwitchCase(const SwitchCase &S, - bool buildingTopLevelCase) { +mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &S, + bool buildingTopLevelCase) { assert(!condTypeStack.empty() && "build switch case without specifying the type of the condition"); if (S.getStmtClass() == Stmt::CaseStmtClass) - return buildCaseStmt(cast(S), condTypeStack.back(), - buildingTopLevelCase); + return emitCaseStmt(cast(S), condTypeStack.back(), + buildingTopLevelCase); if (S.getStmtClass() == Stmt::DefaultStmtClass) - return buildDefaultStmt(cast(S), condTypeStack.back(), - buildingTopLevelCase); + return emitDefaultStmt(cast(S), condTypeStack.back(), + buildingTopLevelCase); llvm_unreachable("expect case or default stmt"); } mlir::LogicalResult -CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, - ArrayRef ForAttrs) { +CIRGenFunction::emitCXXForRangeStmt(const CXXForRangeStmt &S, + ArrayRef ForAttrs) { cir::ForOp forOp; // TODO(cir): pass in array of attributes. @@ -792,13 +790,13 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, auto loopRes = mlir::success(); // Evaluate the first pieces before the loop. if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - if (buildStmt(S.getRangeStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getRangeStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - if (buildStmt(S.getBeginStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getBeginStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); - if (buildStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getEndStmt(), /*useCurrentScope=*/true).failed()) return mlir::failure(); assert(!cir::MissingFeatures::loopInfoStack()); @@ -823,16 +821,16 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, // In C++ the scope of the init-statement and the scope of // statement are one and the same. bool useCurrentScope = true; - if (buildStmt(S.getLoopVarStmt(), useCurrentScope).failed()) + if (emitStmt(S.getLoopVarStmt(), useCurrentScope).failed()) loopRes = mlir::failure(); - if (buildStmt(S.getBody(), useCurrentScope).failed()) + if (emitStmt(S.getBody(), useCurrentScope).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (S.getInc()) - if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInc(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); builder.createYield(loc); }); @@ -859,7 +857,7 @@ CIRGenFunction::buildCXXForRangeStmt(const CXXForRangeStmt &S, return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { +mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &S) { cir::ForOp forOp; // TODO: pass in array of attributes. @@ -867,7 +865,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { auto loopRes = mlir::success(); // Evaluate the first part before the loop. if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); assert(!cir::MissingFeatures::loopInfoStack()); // From LLVM: if there are any cleanups between here and the loop-exit @@ -887,7 +885,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // If the for statement has a condition scope, // emit the local variable declaration. if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -907,14 +905,14 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { // nested within the scope of init-statement. bool useCurrentScope = CGM.getASTContext().getLangOpts().CPlusPlus ? true : false; - if (buildStmt(S.getBody(), useCurrentScope).failed()) + if (emitStmt(S.getBody(), useCurrentScope).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }, /*stepBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { if (S.getInc()) - if (buildStmt(S.getInc(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInc(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); builder.createYield(loc); }); @@ -937,7 +935,7 @@ mlir::LogicalResult CIRGenFunction::buildForStmt(const ForStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { +mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &S) { cir::DoWhileOp doWhileOp; // TODO: pass in array of attributes. @@ -964,9 +962,9 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }); return loopRes; }; @@ -987,7 +985,7 @@ mlir::LogicalResult CIRGenFunction::buildDoStmt(const DoStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { +mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &S) { cir::WhileOp whileOp; // TODO: pass in array of attributes. @@ -1010,7 +1008,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { // If the for statement has a condition scope, // emit the local variable declaration. if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); // C99 6.8.5p2/p4: The first substatement is executed if the // expression compares unequal to 0. The condition must be a // scalar type. @@ -1019,9 +1017,9 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { }, /*bodyBuilder=*/ [&](mlir::OpBuilder &b, mlir::Location loc) { - if (buildStmt(S.getBody(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getBody(), /*useCurrentScope=*/true).failed()) loopRes = mlir::failure(); - buildStopPoint(&S); + emitStopPoint(&S); }); return loopRes; }; @@ -1042,7 +1040,7 @@ mlir::LogicalResult CIRGenFunction::buildWhileStmt(const WhileStmt &S) { return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { +mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *S) { // It is rare but legal if the switch body is not a compound stmt. e.g., // // switch(a) @@ -1053,7 +1051,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { // ... // } if (!isa(S)) - return buildStmt(S, /*useCurrentScope=*/!false); + return emitStmt(S, /*useCurrentScope=*/!false); auto *compoundStmt = cast(S); @@ -1065,21 +1063,21 @@ mlir::LogicalResult CIRGenFunction::buildSwitchBody(const Stmt *S) { // random stmt to the region of previous built case op to try to make // the being generated `cir.switch` to be in simple form. if (mlir::failed( - buildSwitchCase(*switchCase, /*buildingTopLevelCase=*/true))) + emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true))) return mlir::failure(); continue; } // Otherwise, just build the statements in the nearest case region. - if (mlir::failed(buildStmt(c, /*useCurrentScope=*/!isa(c)))) + if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa(c)))) return mlir::failure(); } return mlir::success(); } -mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { +mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const SwitchStmt &S) { // TODO: LLVM codegen does some early optimization to fold the condition and // only emit live cases. CIR should use MLIR to achieve similar things, // nothing to be done here. @@ -1088,13 +1086,13 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { SwitchOp swop; auto switchStmtBuilder = [&]() -> mlir::LogicalResult { if (S.getInit()) - if (buildStmt(S.getInit(), /*useCurrentScope=*/true).failed()) + if (emitStmt(S.getInit(), /*useCurrentScope=*/true).failed()) return mlir::failure(); if (S.getConditionVariable()) - buildDecl(*S.getConditionVariable()); + emitDecl(*S.getConditionVariable()); - mlir::Value condV = buildScalarExpr(S.getCond()); + mlir::Value condV = emitScalarExpr(S.getCond()); // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts()) // TODO: if the switch has a condition wrapped by __builtin_unpredictable? @@ -1108,7 +1106,7 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { condTypeStack.push_back(condV.getType()); - res = buildSwitchBody(S.getBody()); + res = emitSwitchBody(S.getBody()); condTypeStack.pop_back(); }); @@ -1135,16 +1133,16 @@ mlir::LogicalResult CIRGenFunction::buildSwitchStmt(const SwitchStmt &S) { return res; } -void CIRGenFunction::buildReturnOfRValue(mlir::Location loc, RValue RV, - QualType Ty) { +void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue RV, + QualType Ty) { if (RV.isScalar()) { builder.createStore(loc, RV.getScalarVal(), ReturnValue); } else if (RV.isAggregate()) { LValue Dest = makeAddrLValue(ReturnValue, Ty); LValue Src = makeAddrLValue(RV.getAggregateAddress(), Ty); - buildAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); + emitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue()); } else { llvm_unreachable("NYI"); } - buildBranchThroughCleanup(loc, ReturnBlock()); + emitBranchThroughCleanup(loc, ReturnBlock()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp index b865046828c9..5494268e9606 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmtOpenMP.cpp @@ -67,7 +67,7 @@ static void buildDependences(const OMPExecutableDirective &S, } mlir::LogicalResult -CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { +CIRGenFunction::emitOMPParallelDirective(const OMPParallelDirective &S) { mlir::LogicalResult res = mlir::success(); auto scopeLoc = getLoc(S.getSourceRange()); // Create a `omp.parallel` op. @@ -81,9 +81,9 @@ CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { [&](mlir::OpBuilder &b, mlir::Location loc) { LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; // Emit the body of the region. - if (buildStmt(S.getCapturedStmt(OpenMPDirectiveKind::OMPD_parallel) - ->getCapturedStmt(), - /*useCurrentScope=*/true) + if (emitStmt(S.getCapturedStmt(OpenMPDirectiveKind::OMPD_parallel) + ->getCapturedStmt(), + /*useCurrentScope=*/true) .failed()) res = mlir::failure(); }); @@ -93,7 +93,7 @@ CIRGenFunction::buildOMPParallelDirective(const OMPParallelDirective &S) { } mlir::LogicalResult -CIRGenFunction::buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { +CIRGenFunction::emitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { mlir::LogicalResult res = mlir::success(); OMPTaskDataTy Data; buildDependences(S, Data); @@ -103,7 +103,7 @@ CIRGenFunction::buildOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { return res; } mlir::LogicalResult -CIRGenFunction::buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { +CIRGenFunction::emitOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { mlir::LogicalResult res = mlir::success(); // Creation of an omp.taskyield operation CGM.getOpenMPRuntime().emitTaskyieldCall(builder, *this, @@ -112,7 +112,7 @@ CIRGenFunction::buildOMPTaskyieldDirective(const OMPTaskyieldDirective &S) { } mlir::LogicalResult -CIRGenFunction::buildOMPBarrierDirective(const OMPBarrierDirective &S) { +CIRGenFunction::emitOMPBarrierDirective(const OMPBarrierDirective &S) { mlir::LogicalResult res = mlir::success(); // Creation of an omp.barrier operation CGM.getOpenMPRuntime().emitBarrierCall(builder, *this, diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index cec319e41046..932dd4bebeed 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -131,7 +131,7 @@ static bool shouldEmitVTableAtEndOfTranslationUnit(CIRGenModule &CGM, /// Given that at some point we emitted a reference to one or more /// vtables, and that we are now at the end of the translation unit, /// decide whether we should emit them. -void CIRGenModule::buildDeferredVTables() { +void CIRGenModule::emitDeferredVTables() { #ifndef NDEBUG // Remember the size of DeferredVTables, because we're going to assume // that this entire operation doesn't modify it. @@ -156,7 +156,7 @@ void CIRGenModule::buildDeferredVTables() { /// This is only called for vtables that _must_ be emitted (mainly due to key /// functions). For weak vtables, CodeGen tracks when they are needed and /// emits them as-needed. -void CIRGenModule::buildVTable(CXXRecordDecl *rd) { +void CIRGenModule::emitVTable(CXXRecordDecl *rd) { VTables.GenerateClassData(rd); } @@ -387,7 +387,7 @@ cir::GlobalOp CIRGenVTables::generateConstructionVTable( assert(!VTable.isDeclaration() && "Shouldn't set properties on declaration"); CGM.setGVProperties(VTable, RD); - CGM.buildVTableTypeMetadata(RD, VTable, *VTLayout.get()); + CGM.emitVTableTypeMetadata(RD, VTable, *VTLayout.get()); if (UsingRelativeLayout) { llvm_unreachable("NYI"); @@ -585,9 +585,9 @@ uint64_t CIRGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, } /// Emit the definition of the given vtable. -void CIRGenVTables::buildVTTDefinition(cir::GlobalOp VTT, - cir::GlobalLinkageKind Linkage, - const CXXRecordDecl *RD) { +void CIRGenVTables::emitVTTDefinition(cir::GlobalOp VTT, + cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD) { VTTBuilder Builder(CGM.getASTContext(), RD, /*GenerateDefinition=*/true); auto ArrayType = cir::ArrayType::get(CGM.getBuilder().getContext(), @@ -651,7 +651,7 @@ void CIRGenVTables::buildVTTDefinition(cir::GlobalOp VTT, } } -void CIRGenVTables::buildThunks(GlobalDecl GD) { +void CIRGenVTables::emitThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast(GD.getDecl())->getCanonicalDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 4b2247dc9fc8..639eb370ca0e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -102,11 +102,11 @@ class CIRGenVTables { cir::GlobalOp getAddrOfVTT(const CXXRecordDecl *RD); /// Emit the definition of the given vtable. - void buildVTTDefinition(cir::GlobalOp VTT, cir::GlobalLinkageKind Linkage, - const CXXRecordDecl *RD); + void emitVTTDefinition(cir::GlobalOp VTT, cir::GlobalLinkageKind Linkage, + const CXXRecordDecl *RD); /// Emit the associated thunks for the given global decl. - void buildThunks(GlobalDecl GD); + void emitThunks(GlobalDecl GD); /// Generate all the class data required to be generated upon definition of a /// KeyFunction. This includes the vtable, the RTTI data structure (if RTTI diff --git a/clang/lib/CIR/CodeGen/CIRGenerator.cpp b/clang/lib/CIR/CodeGen/CIRGenerator.cpp index 24143185691e..0266e893909a 100644 --- a/clang/lib/CIR/CodeGen/CIRGenerator.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenerator.cpp @@ -82,7 +82,7 @@ bool CIRGenerator::HandleTopLevelDecl(DeclGroupRef D) { HandlingTopLevelDeclRAII HandlingDecl(*this); for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - CGM->buildTopLevelDecl(*I); + CGM->emitTopLevelDecl(*I); } return true; @@ -125,9 +125,9 @@ void CIRGenerator::HandleInlineFunctionDefinition(FunctionDecl *D) { CGM->AddDeferredUnusedCoverageMapping(D); } -void CIRGenerator::buildDefaultMethods() { CGM->buildDefaultMethods(); } +void CIRGenerator::emitDefaultMethods() { CGM->emitDefaultMethods(); } -void CIRGenerator::buildDeferredDecls() { +void CIRGenerator::emitDeferredDecls() { if (DeferredInlineMemberFuncDefs.empty()) return; @@ -136,7 +136,7 @@ void CIRGenerator::buildDeferredDecls() { // invoked if AST inspection results in declarations being added. HandlingTopLevelDeclRAII HandlingDecls(*this); for (unsigned I = 0; I != DeferredInlineMemberFuncDefs.size(); ++I) - CGM->buildTopLevelDecl(DeferredInlineMemberFuncDefs[I]); + CGM->emitTopLevelDecl(DeferredInlineMemberFuncDefs[I]); DeferredInlineMemberFuncDefs.clear(); } @@ -188,12 +188,12 @@ void CIRGenerator::CompleteTentativeDefinition(VarDecl *D) { if (Diags.hasErrorOccurred()) return; - CGM->buildTentativeDefinition(D); + CGM->emitTentativeDefinition(D); } void CIRGenerator::HandleVTable(CXXRecordDecl *rd) { if (Diags.hasErrorOccurred()) return; - CGM->buildVTable(rd); + CGM->emitVTable(rd); } diff --git a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp index f94553c58112..bba759494e3b 100644 --- a/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp +++ b/clang/lib/CIR/Dialect/Transforms/TargetLowering/Targets/LoweringPrepareItaniumCXXABI.cpp @@ -141,7 +141,7 @@ LoweringPrepareItaniumCXXABI::lowerDynamicCast(CIRBaseBuilderTy &builder, auto loc = op->getLoc(); auto srcValue = op.getSrc(); - cir_cconv_assert(!MissingFeatures::buildTypeCheck()); + cir_cconv_assert(!MissingFeatures::emitTypeCheck()); if (op.isRefcast()) return buildDynamicCastAfterNullCheck(builder, op); diff --git a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp index c9031964ea3a..6b7ef5ae90e3 100644 --- a/clang/lib/CIR/FrontendAction/CIRGenAction.cpp +++ b/clang/lib/CIR/FrontendAction/CIRGenAction.cpp @@ -262,7 +262,7 @@ class CIRGenConsumer : public clang::ASTConsumer { if (outputStream && mlirMod) { // Emit remaining defaulted C++ methods if (!feOptions.ClangIRDisableEmitCXXDefault) - gen->buildDefaultMethods(); + gen->emitDefaultMethods(); // FIXME: we cannot roundtrip prettyForm=true right now. mlir::OpPrintingFlags flags;