Skip to content

Commit

Permalink
Merge tag 'llvmorg-19.1.0' of https://github.com/llvm/llvm-project in…
Browse files Browse the repository at this point in the history
…to llvm-19.x

LLVM Release 19.1.0
  • Loading branch information
KomiMoe committed Sep 18, 2024
2 parents 765043f + a4bf6cd commit 4adca9a
Show file tree
Hide file tree
Showing 90 changed files with 3,440 additions and 1,567 deletions.
15 changes: 13 additions & 2 deletions .github/workflows/release-binaries.yml
Original file line number Diff line number Diff line change
Expand Up @@ -450,11 +450,22 @@ jobs:
name: ${{ needs.prepare.outputs.release-binary-filename }}-attestation
path: ${{ needs.prepare.outputs.release-binary-filename }}.jsonl

- name: Checkout Release Scripts
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
sparse-checkout: |
llvm/utils/release/github-upload-release.py
llvm/utils/git/requirements.txt
sparse-checkout-cone-mode: false

- name: Install Python Requirements
run: |
pip install --require-hashes -r ./llvm/utils/git/requirements.txt
- name: Upload Release
shell: bash
run: |
sudo apt install python3-github
./llvm-project/llvm/utils/release/github-upload-release.py \
./llvm/utils/release/github-upload-release.py \
--token ${{ github.token }} \
--release ${{ needs.prepare.outputs.release-version }} \
upload \
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release-sources.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ jobs:
steps:
- id: inputs
run: |
ref=${{ inputs.release-version || github.sha }}
ref=${{ (inputs.release-version && format('llvmorg-{0}', inputs.release-version)) || github.sha }}
if [ -n "${{ inputs.release-version }}" ]; then
export_args="-release ${{ inputs.release-version }} -final"
else
Expand Down
3 changes: 3 additions & 0 deletions clang/docs/ReleaseNotes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1121,6 +1121,9 @@ Bug Fixes to C++ Support
Fixes (#GH85992).
- Fixed a crash-on-invalid bug involving extraneous template parameter with concept substitution. (#GH73885)
- Fixed assertion failure by skipping the analysis of an invalid field declaration. (#GH99868)
- Fix an issue with dependent source location expressions (#GH106428), (#GH81155), (#GH80210), (#GH85373)
- Fix handling of ``_`` as the name of a lambda's init capture variable. (#GH107024)


Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
Expand Down
4 changes: 4 additions & 0 deletions clang/lib/Basic/Targets/X86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -723,6 +723,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_ZNVER4:
defineCPUMacros(Builder, "znver4");
break;
case CK_ZNVER5:
defineCPUMacros(Builder, "znver5");
break;
case CK_Geode:
defineCPUMacros(Builder, "geode");
break;
Expand Down Expand Up @@ -1613,6 +1616,7 @@ std::optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_ZNVER2:
case CK_ZNVER3:
case CK_ZNVER4:
case CK_ZNVER5:
// Deprecated
case CK_x86_64:
case CK_x86_64_v2:
Expand Down
146 changes: 61 additions & 85 deletions clang/lib/CodeGen/CGCall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1336,75 +1336,50 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
return CGF.Builder.CreateLoad(Tmp);
}

// Function to store a first-class aggregate into memory. We prefer to
// store the elements rather than the aggregate to be more friendly to
// fast-isel.
// FIXME: Do we need to recurse here?
void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
bool DestIsVolatile) {
// Prefer scalar stores to first-class aggregate stores.
if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Address EltPtr = Builder.CreateStructGEP(Dest, i);
llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
}
} else {
Builder.CreateStore(Val, Dest, DestIsVolatile);
}
}

/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
/// where the source and destination may have different types. The
/// destination is known to be aligned to \arg DstAlign bytes.
///
/// This safely handles the case when the src type is larger than the
/// destination type; the upper bits of the src will be lost.
static void CreateCoercedStore(llvm::Value *Src,
Address Dst,
bool DstIsVolatile,
CodeGenFunction &CGF) {
llvm::Type *SrcTy = Src->getType();
llvm::Type *DstTy = Dst.getElementType();
if (SrcTy == DstTy) {
CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
}

llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);

if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
SrcSize.getFixedValue(), CGF);
DstTy = Dst.getElementType();
}

llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
if (SrcPtrTy && DstPtrTy &&
SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy);
CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
void CodeGenFunction::CreateCoercedStore(llvm::Value *Src, Address Dst,
llvm::TypeSize DstSize,
bool DstIsVolatile) {
if (!DstSize)
return;
}

// If the source and destination are integer or pointer types, just do an
// extension or truncation to the desired type.
if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
(isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
llvm::Type *SrcTy = Src->getType();
llvm::TypeSize SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);

// GEP into structs to try to make types match.
// FIXME: This isn't really that useful with opaque types, but it impacts a
// lot of regression tests.
if (SrcTy != Dst.getElementType()) {
if (llvm::StructType *DstSTy =
dyn_cast<llvm::StructType>(Dst.getElementType())) {
assert(!SrcSize.isScalable());
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
SrcSize.getFixedValue(), *this);
}
}

llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);

// If store is legal, just bitcast the src pointer.
if (isa<llvm::ScalableVectorType>(SrcTy) ||
isa<llvm::ScalableVectorType>(DstTy) ||
SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
Dst = Dst.withElementType(SrcTy);
CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
if (SrcSize.isScalable() || SrcSize <= DstSize) {
if (SrcTy->isIntegerTy() && Dst.getElementType()->isPointerTy() &&
SrcSize == CGM.getDataLayout().getTypeAllocSize(Dst.getElementType())) {
// If the value is supposed to be a pointer, convert it before storing it.
Src = CoerceIntOrPtrToIntOrPtr(Src, Dst.getElementType(), *this);
Builder.CreateStore(Src, Dst, DstIsVolatile);
} else if (llvm::StructType *STy =
dyn_cast<llvm::StructType>(Src->getType())) {
// Prefer scalar stores to first-class aggregate stores.
Dst = Dst.withElementType(SrcTy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Address EltPtr = Builder.CreateStructGEP(Dst, i);
llvm::Value *Elt = Builder.CreateExtractValue(Src, i);
Builder.CreateStore(Elt, EltPtr, DstIsVolatile);
}
} else {
Builder.CreateStore(Src, Dst.withElementType(SrcTy), DstIsVolatile);
}
} else if (SrcTy->isIntegerTy()) {
// If the source is a simple integer, coerce it directly.
llvm::Type *DstIntTy = Builder.getIntNTy(DstSize.getFixedValue() * 8);
Src = CoerceIntOrPtrToIntOrPtr(Src, DstIntTy, *this);
Builder.CreateStore(Src, Dst.withElementType(DstIntTy), DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
// simple.
Expand All @@ -1416,12 +1391,12 @@ static void CreateCoercedStore(llvm::Value *Src,
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
RawAddress Tmp =
CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
CGF.Builder.CreateStore(Src, Tmp);
CGF.Builder.CreateMemCpy(
Dst.emitRawPointer(CGF), Dst.getAlignment().getAsAlign(),
Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
CreateTempAllocaForCoercion(*this, SrcTy, Dst.getAlignment());
Builder.CreateStore(Src, Tmp);
Builder.CreateMemCpy(Dst.emitRawPointer(*this),
Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
Tmp.getAlignment().getAsAlign(),
Builder.CreateTypeSize(IntPtrTy, DstSize));
}
}

Expand Down Expand Up @@ -3309,7 +3284,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(NumIRArgs == 1);
auto AI = Fn->getArg(FirstIRArg);
AI->setName(Arg->getName() + ".coerce");
CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
CreateCoercedStore(
AI, Ptr,
llvm::TypeSize::getFixed(
getContext().getTypeSizeInChars(Ty).getQuantity() -
ArgI.getDirectOffset()),
/*DstIsVolatile=*/false);
}

// Match to what EmitParmDecl is expecting for this type.
Expand Down Expand Up @@ -5939,17 +5919,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
return RValue::getComplex(std::make_pair(Real, Imag));
}
case TEK_Aggregate: {
Address DestPtr = ReturnValue.getAddress();
bool DestIsVolatile = ReturnValue.isVolatile();

if (!DestPtr.isValid()) {
DestPtr = CreateMemTemp(RetTy, "agg.tmp");
DestIsVolatile = false;
}
EmitAggregateStore(CI, DestPtr, DestIsVolatile);
return RValue::getAggregate(DestPtr);
}
case TEK_Aggregate:
break;
case TEK_Scalar: {
// If the argument doesn't match, perform a bitcast to coerce it.
// This can happen due to trivial type mismatches.
Expand All @@ -5959,7 +5930,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return RValue::get(V);
}
}
llvm_unreachable("bad evaluation kind");
}

// If coercing a fixed vector from a scalable vector for ABI
Expand All @@ -5981,10 +5951,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,

Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
uint64_t DestSize =
getContext().getTypeInfoDataSizeInChars(RetTy).Width.getQuantity();

if (!DestPtr.isValid()) {
DestPtr = CreateMemTemp(RetTy, "coerce");
DestIsVolatile = false;
DestSize = getContext().getTypeSizeInChars(RetTy).getQuantity();
}

// An empty record can overlap other data (if declared with
Expand All @@ -5993,7 +5966,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!isEmptyRecord(getContext(), RetTy, true)) {
// If the value is offset in memory, apply the offset now.
Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
CreateCoercedStore(
CI, StorePtr,
llvm::TypeSize::getFixed(DestSize - RetAI.getDirectOffset()),
DestIsVolatile);
}

return convertTempToRValue(DestPtr, RetTy, SourceLocation());
Expand Down
23 changes: 14 additions & 9 deletions clang/lib/CodeGen/CGExprAgg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,15 +131,12 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
EnsureDest(E->getType());

if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
Address StoreDest = Dest.getAddress();
// The emitted value is guaranteed to have the same size as the
// destination but can have a different type. Just do a bitcast in this
// case to avoid incorrect GEPs.
if (Result->getType() != StoreDest.getType())
StoreDest = StoreDest.withElementType(Result->getType());

CGF.EmitAggregateStore(Result, StoreDest,
E->getType().isVolatileQualified());
CGF.CreateCoercedStore(
Result, Dest.getAddress(),
llvm::TypeSize::getFixed(
Dest.getPreferredSize(CGF.getContext(), E->getType())
.getQuantity()),
E->getType().isVolatileQualified());
return;
}
return Visit(E->getSubExpr());
Expand Down Expand Up @@ -2050,6 +2047,10 @@ CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) {
if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType())
return AggValueSlot::DoesNotOverlap;

// Empty fields can overlap earlier fields.
if (FD->getType()->getAsCXXRecordDecl()->isEmpty())
return AggValueSlot::MayOverlap;

// If the field lies entirely within the enclosing class's nvsize, its tail
// padding cannot overlap any already-initialized object. (The only subobjects
// with greater addresses that might already be initialized are vbases.)
Expand All @@ -2072,6 +2073,10 @@ AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit(
if (IsVirtual)
return AggValueSlot::MayOverlap;

// Empty bases can overlap earlier bases.
if (BaseRD->isEmpty())
return AggValueSlot::MayOverlap;

// If the base class is laid out entirely within the nvsize of the derived
// class, its tail padding cannot yet be initialized, so we can issue
// stores at the full width of the base class.
Expand Down
2 changes: 1 addition & 1 deletion clang/lib/CodeGen/CGStmt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,7 @@ void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
} break;
case attr::CXXAssume: {
const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
if (getLangOpts().CXXAssumptions &&
if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
!Assumption->HasSideEffects(getContext())) {
llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
Builder.CreateAssumption(AssumptionVal);
Expand Down
7 changes: 4 additions & 3 deletions clang/lib/CodeGen/CodeGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -4838,9 +4838,10 @@ class CodeGenFunction : public CodeGenTypeCache {
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src,
ExprValueKind SrcKind);

/// Build all the stores needed to initialize an aggregate at Dest with the
/// value Val.
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile);
/// Create a store to \arg DstPtr from \arg Src, truncating the stored value
/// to at most \arg DstSize bytes.
void CreateCoercedStore(llvm::Value *Src, Address Dst, llvm::TypeSize DstSize,
bool DstIsVolatile);

/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
/// make sure it survives garbage collection until this point.
Expand Down
6 changes: 4 additions & 2 deletions clang/lib/Format/UnwrappedLineParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,8 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
Keywords.kw_as));
ProbablyBracedList =
ProbablyBracedList || (IsCpp && NextTok->is(tok::l_paren));
ProbablyBracedList || (IsCpp && (PrevTok->Tok.isLiteral() ||
NextTok->is(tok::l_paren)));

// If there is a comma, semicolon or right paren after the closing
// brace, we assume this is a braced initializer list.
Expand Down Expand Up @@ -609,8 +610,9 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
ProbablyBracedList = NextTok->isNot(tok::l_square);
}

// Cpp macro definition body containing nonempty braced list or block:
// Cpp macro definition body that is a nonempty braced list or block:
if (IsCpp && Line->InMacroBody && PrevTok != FormatTok &&
!FormatTok->Previous && NextTok->is(tok::eof) &&
// A statement can end with only `;` (simple statement), a block
// closing brace (compound statement), or `:` (label statement).
// If PrevTok is a block opening brace, Tok ends an empty block.
Expand Down
21 changes: 17 additions & 4 deletions clang/lib/Sema/SemaExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5430,11 +5430,24 @@ struct EnsureImmediateInvocationInDefaultArgs

// Rewrite to source location to refer to the context in which they are used.
ExprResult TransformSourceLocExpr(SourceLocExpr *E) {
if (E->getParentContext() == SemaRef.CurContext)
DeclContext *DC = E->getParentContext();
if (DC == SemaRef.CurContext)
return E;
return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getType(),
E->getBeginLoc(), E->getEndLoc(),
SemaRef.CurContext);

// FIXME: During instantiation, because the rebuild of defaults arguments
// is not always done in the context of the template instantiator,
// we run the risk of producing a dependent source location
// that would never be rebuilt.
// This usually happens during overload resolution, or in contexts
// where the value of the source location does not matter.
// However, we should find a better way to deal with source location
// of function templates.
if (!SemaRef.CurrentInstantiationScope ||
!SemaRef.CurContext->isDependentContext() || DC->isDependentContext())
DC = SemaRef.CurContext;

return getDerived().RebuildSourceLocExpr(
E->getIdentKind(), E->getType(), E->getBeginLoc(), E->getEndLoc(), DC);
}
};

Expand Down
3 changes: 2 additions & 1 deletion clang/lib/Sema/SemaExprCXX.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5140,7 +5140,8 @@ static bool HasNonDeletedDefaultedEqualityComparison(Sema &S,

// const ClassT& obj;
OpaqueValueExpr Operand(
{}, Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(),
KeyLoc,
Decl->getTypeForDecl()->getCanonicalTypeUnqualified().withConst(),
ExprValueKind::VK_LValue);
UnresolvedSet<16> Functions;
// obj == obj;
Expand Down
1 change: 0 additions & 1 deletion clang/lib/Sema/SemaLambda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1318,7 +1318,6 @@ void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,

if (C->Init.isUsable()) {
addInitCapture(LSI, cast<VarDecl>(Var), C->Kind == LCK_ByRef);
PushOnScopeChains(Var, CurScope, false);
} else {
TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef
: TryCapture_ExplicitByVal;
Expand Down
Loading

0 comments on commit 4adca9a

Please sign in to comment.