21
21
#include " clang/AST/GlobalDecl.h"
22
22
#include " clang/Basic/Builtins.h"
23
23
#include " clang/CIR/Dialect/IR/CIRDialect.h"
24
+ #include " clang/CIR/Dialect/IR/CIROpsEnums.h"
24
25
#include " clang/CIR/Dialect/IR/CIRTypes.h"
25
26
#include " llvm/Support/Casting.h"
26
27
#include " llvm/Support/ErrorHandling.h"
@@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E,
128
129
if (PtrTy->getPointeeType ()->isVoidType ())
129
130
break ;
130
131
assert (!UnimplementedFeature::tbaa ());
132
+
131
133
LValueBaseInfo InnerBaseInfo;
132
134
Address Addr = CGF.buildPointerWithAlignment (
133
135
CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -211,13 +213,79 @@ static Address buildPointerWithAlignment(const Expr *E,
211
213
return Address (CGF.buildScalarExpr (E), Align);
212
214
}
213
215
216
+ // / Helper method to check if the underlying ABI is AAPCS
217
+ static bool isAAPCS (const TargetInfo &TargetInfo) {
218
+ return TargetInfo.getABI ().starts_with (" aapcs" );
219
+ }
220
+
221
+ Address CIRGenFunction::getAddrOfBitFieldStorage (LValue base,
222
+ const FieldDecl *field,
223
+ unsigned index,
224
+ unsigned size) {
225
+ if (index == 0 )
226
+ return base.getAddress ();
227
+
228
+ auto loc = getLoc (field->getLocation ());
229
+ auto fieldType = builder.getUIntNTy (size);
230
+
231
+ auto fieldPtr =
232
+ mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
233
+ auto sea = getBuilder ().createGetMember (
234
+ loc, fieldPtr, base.getPointer (), field->getName (), index);
235
+
236
+ return Address (sea, CharUnits::One ());
237
+ }
238
+
239
+ static bool useVolatileForBitField (const CIRGenModule &cgm, LValue base,
240
+ const CIRGenBitFieldInfo &info,
241
+ const FieldDecl *field) {
242
+ return isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth &&
243
+ info.VolatileStorageSize != 0 &&
244
+ field->getType ()
245
+ .withCVRQualifiers (base.getVRQualifiers ())
246
+ .isVolatileQualified ();
247
+ }
248
+
249
+ LValue CIRGenFunction::buildLValueForBitField (LValue base,
250
+ const FieldDecl *field) {
251
+
252
+ LValueBaseInfo BaseInfo = base.getBaseInfo ();
253
+ const RecordDecl *rec = field->getParent ();
254
+ auto &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
255
+ auto &info = layout.getBitFieldInfo (field);
256
+ auto useVolatile = useVolatileForBitField (CGM, base, info, field);
257
+ unsigned Idx = layout.getCIRFieldNo (field);
258
+
259
+ if (useVolatile ||
260
+ (IsInPreservedAIRegion ||
261
+ (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
262
+ llvm_unreachable (" NYI" );
263
+ }
264
+
265
+ const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize ;
266
+ Address Addr = getAddrOfBitFieldStorage (base, field, Idx, SS);
267
+
268
+ // Get the access type.
269
+ mlir::Type FieldIntTy = builder.getUIntNTy (SS);
270
+
271
+ auto loc = getLoc (field->getLocation ());
272
+ if (Addr.getElementType () != FieldIntTy)
273
+ Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
274
+
275
+ QualType fieldType =
276
+ field->getType ().withCVRQualifiers (base.getVRQualifiers ());
277
+
278
+ assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" );
279
+ LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
280
+ return LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
281
+ }
282
+
214
283
LValue CIRGenFunction::buildLValueForField (LValue base,
215
284
const FieldDecl *field) {
216
285
LValueBaseInfo BaseInfo = base.getBaseInfo ();
217
286
218
- if (field->isBitField ()) {
219
- llvm_unreachable (" NYI" );
220
- }
287
+ if (field->isBitField ())
288
+ return buildLValueForBitField (base, field);
221
289
222
290
// Fields of may-alias structures are may-alais themselves.
223
291
// FIXME: this hould get propagated down through anonymous structs and unions.
@@ -518,12 +586,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
518
586
// / method emits the address of the lvalue, then loads the result as an rvalue,
519
587
// / returning the rvalue.
520
588
RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
521
- assert (LV.isSimple () && " not implemented" );
522
589
assert (!LV.getType ()->isFunctionType ());
523
590
assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" );
524
591
525
- // Everything needs a load.
526
- return RValue::get (buildLoadOfScalar (LV, Loc));
592
+ if (LV.isBitField ())
593
+ return buildLoadOfBitfieldLValue (LV, Loc);
594
+
595
+ if (LV.isSimple ())
596
+ return RValue::get (buildLoadOfScalar (LV, Loc));
597
+ llvm_unreachable (" NYI" );
598
+ }
599
+
600
+ RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
601
+ SourceLocation Loc) {
602
+ const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
603
+
604
+ // Get the output type.
605
+ mlir::Type ResLTy = convertType (LV.getType ());
606
+ Address Ptr = LV.getBitFieldAddress ();
607
+ mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr);
608
+ auto ValWidth = Val.getType ().cast <IntType>().getWidth ();
609
+
610
+ bool UseVolatile = LV.isVolatileQualified () &&
611
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
612
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
613
+ const unsigned StorageSize =
614
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
615
+
616
+ if (Info.IsSigned ) {
617
+ assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
618
+
619
+ mlir::Type typ = builder.getSIntNTy (ValWidth);
620
+ Val = builder.createIntCast (Val, typ);
621
+
622
+ unsigned HighBits = StorageSize - Offset - Info.Size ;
623
+ if (HighBits)
624
+ Val = builder.createShiftLeft (Val, HighBits);
625
+ if (Offset + HighBits)
626
+ Val = builder.createShiftRight (Val, Offset + HighBits);
627
+ } else {
628
+ if (Offset)
629
+ Val = builder.createShiftRight (Val, Offset);
630
+
631
+ if (static_cast <unsigned >(Offset) + Info.Size < StorageSize)
632
+ Val = builder.createAnd (Val,
633
+ llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
634
+ }
635
+ Val = builder.createIntCast (Val, ResLTy);
636
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
637
+ return RValue::get (Val);
527
638
}
528
639
529
640
void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -546,6 +657,83 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
546
657
buildStoreOfScalar (Src.getScalarVal (), Dst);
547
658
}
548
659
660
+ void CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
661
+ mlir::Value &Result) {
662
+ const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
663
+ mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
664
+ Address Ptr = Dst.getBitFieldAddress ();
665
+
666
+ // Get the source value, truncated to the width of the bit-field.
667
+ mlir::Value SrcVal = Src.getScalarVal ();
668
+
669
+ // Cast the source to the storage type and shift it into place.
670
+ SrcVal = builder.createIntCast (SrcVal, Ptr.getElementType ());
671
+ auto SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
672
+ mlir::Value MaskedVal = SrcVal;
673
+
674
+ const bool UseVolatile =
675
+ CGM.getCodeGenOpts ().AAPCSBitfieldWidth && Dst.isVolatileQualified () &&
676
+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
677
+ const unsigned StorageSize =
678
+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
679
+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
680
+ // See if there are other bits in the bitfield's storage we'll need to load
681
+ // and mask together with source before storing.
682
+ if (StorageSize != Info.Size ) {
683
+ assert (StorageSize > Info.Size && " Invalid bitfield size." );
684
+
685
+ mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
686
+
687
+ // Mask the source value as needed.
688
+ if (!hasBooleanRepresentation (Dst.getType ()))
689
+ SrcVal = builder.createAnd (
690
+ SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
691
+
692
+ MaskedVal = SrcVal;
693
+ if (Offset)
694
+ SrcVal = builder.createShiftLeft (SrcVal, Offset);
695
+
696
+ // Mask out the original value.
697
+ Val = builder.createAnd (
698
+ Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
699
+
700
+ // Or together the unchanged values and the source value.
701
+ SrcVal = builder.createOr (Val, SrcVal);
702
+
703
+ } else {
704
+ // According to the AACPS:
705
+ // When a volatile bit-field is written, and its container does not overlap
706
+ // with any non-bit-field member, its container must be read exactly once
707
+ // and written exactly once using the access width appropriate to the type
708
+ // of the container. The two accesses are not atomic.
709
+ if (Dst.isVolatileQualified () && isAAPCS (CGM.getTarget ()) &&
710
+ CGM.getCodeGenOpts ().ForceAAPCSBitfieldLoad )
711
+ llvm_unreachable (" volatile bit-field is not implemented for the AACPS" );
712
+ }
713
+
714
+ // Write the new value back out.
715
+ // TODO: constant matrix type, volatile, no init, non temporal, TBAA
716
+ buildStoreOfScalar (SrcVal, Ptr, Dst.isVolatileQualified (), Dst.getType (),
717
+ Dst.getBaseInfo (), false , false );
718
+
719
+ // Return the new value of the bit-field.
720
+ mlir::Value ResultVal = MaskedVal;
721
+ ResultVal = builder.createIntCast (ResultVal, ResLTy);
722
+
723
+ // Sign extend the value if needed.
724
+ if (Info.IsSigned ) {
725
+ assert (Info.Size <= StorageSize);
726
+ unsigned HighBits = StorageSize - Info.Size ;
727
+
728
+ if (HighBits) {
729
+ ResultVal = builder.createShiftLeft (ResultVal, HighBits);
730
+ ResultVal = builder.createShiftRight (ResultVal, HighBits);
731
+ }
732
+ }
733
+
734
+ Result = buildFromMemory (ResultVal, Dst.getType ());
735
+ }
736
+
549
737
static LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const Expr *E,
550
738
const VarDecl *VD) {
551
739
QualType T = E->getType ();
@@ -769,7 +957,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
769
957
LValue LV = buildLValue (E->getLHS ());
770
958
771
959
SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
772
- buildStoreThroughLValue (RV, LV);
960
+ if (LV.isBitField ()) {
961
+ mlir::Value result;
962
+ buildStoreThroughBitfieldLValue (RV, LV, result);
963
+ } else {
964
+ buildStoreThroughLValue (RV, LV);
965
+ }
966
+
773
967
assert (!getContext ().getLangOpts ().OpenMP &&
774
968
" last priv cond not implemented" );
775
969
return LV;
@@ -2205,6 +2399,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
2205
2399
2206
2400
mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2207
2401
SourceLocation Loc) {
2402
+ return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2403
+ lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2404
+ lvalue.isNontemporal ());
2405
+ }
2406
+
2407
+ mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2408
+ mlir::Location Loc) {
2208
2409
return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2209
2410
lvalue.getType (), Loc, lvalue.getBaseInfo (),
2210
2411
lvalue.isNontemporal ());
@@ -2222,6 +2423,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2222
2423
QualType Ty, SourceLocation Loc,
2223
2424
LValueBaseInfo BaseInfo,
2224
2425
bool isNontemporal) {
2426
+ return buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2427
+ isNontemporal);
2428
+ }
2429
+
2430
+ mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool Volatile,
2431
+ QualType Ty, mlir::Location Loc,
2432
+ LValueBaseInfo BaseInfo,
2433
+ bool isNontemporal) {
2225
2434
// TODO(CIR): this has fallen out of sync with codegen
2226
2435
2227
2436
// Atomic operations have to be done on integral types
@@ -2231,15 +2440,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
2231
2440
}
2232
2441
2233
2442
mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2234
- getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
2443
+ Loc, Addr.getElementType (), Addr.getPointer ());
2235
2444
2236
2445
if (isNontemporal) {
2237
2446
llvm_unreachable (" NYI" );
2238
2447
}
2239
-
2240
- // TODO: TBAA
2241
-
2242
- // TODO: buildScalarRangeCheck
2448
+
2449
+ assert (!UnimplementedFeature::tbaa () && " NYI" );
2450
+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
2243
2451
2244
2452
return buildFromMemory (Load, Ty);
2245
2453
}
0 commit comments