Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MC/DC] Rework tvbitmap.update to get rid of the inlined function #110792

Merged
merged 1 commit into from
Oct 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 33 additions & 77 deletions llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -267,10 +267,6 @@ class InstrLowerer final {
GlobalVariable *NamesVar = nullptr;
size_t NamesSize = 0;

/// The instance of [[alwaysinline]] rmw_or(ptr, i8).
/// This is name-insensitive.
Function *RMWOrFunc = nullptr;

// vector of counter load/store pairs to be register promoted.
std::vector<LoadStorePair> PromotionCandidates;

Expand Down Expand Up @@ -337,14 +333,6 @@ class InstrLowerer final {
StringRef Name,
GlobalValue::LinkageTypes Linkage);

/// Create [[alwaysinline]] rmw_or(ptr, i8).
/// This doesn't update `RMWOrFunc`.
Function *createRMWOrFunc();

/// Get the call to `rmw_or`.
/// Create the instance if it is unknown.
CallInst *getRMWOrCall(Value *Addr, Value *Val);

/// Compute the address of the test vector bitmap that this profiling
/// instruction acts on.
Value *getBitmapAddress(InstrProfMCDCTVBitmapUpdate *I);
Expand Down Expand Up @@ -1137,68 +1125,6 @@ Value *InstrLowerer::getCounterAddress(InstrProfCntrInstBase *I) {
return Builder.CreateIntToPtr(Add, Addr->getType());
}

/// Create `void [[alwaysinline]] rmw_or(uint8_t *ArgAddr, uint8_t ArgVal)`
/// "Basic" sequence is `*ArgAddr |= ArgVal`
Function *InstrLowerer::createRMWOrFunc() {
auto &Ctx = M.getContext();
auto *Int8Ty = Type::getInt8Ty(Ctx);
Function *Fn = Function::Create(
FunctionType::get(Type::getVoidTy(Ctx),
{PointerType::getUnqual(Ctx), Int8Ty}, false),
Function::LinkageTypes::PrivateLinkage, "rmw_or", M);
Fn->addFnAttr(Attribute::AlwaysInline);
auto *ArgAddr = Fn->getArg(0);
auto *ArgVal = Fn->getArg(1);
IRBuilder<> Builder(BasicBlock::Create(Ctx, "", Fn));

// Load profile bitmap byte.
// %mcdc.bits = load i8, ptr %4, align 1
auto *Bitmap = Builder.CreateLoad(Int8Ty, ArgAddr, "mcdc.bits");

if (Options.Atomic || AtomicCounterUpdateAll) {
// If ((Bitmap & Val) != Val), then execute atomic (Bitmap |= Val).
// Note, just-loaded Bitmap might not be up-to-date. Use it just for
// early testing.
auto *Masked = Builder.CreateAnd(Bitmap, ArgVal);
auto *ShouldStore = Builder.CreateICmpNE(Masked, ArgVal);
auto *ThenTerm = BasicBlock::Create(Ctx, "", Fn);
auto *ElseTerm = BasicBlock::Create(Ctx, "", Fn);
// Assume updating will be rare.
auto *Unlikely = MDBuilder(Ctx).createUnlikelyBranchWeights();
Builder.CreateCondBr(ShouldStore, ThenTerm, ElseTerm, Unlikely);

IRBuilder<> ThenBuilder(ThenTerm);
ThenBuilder.CreateAtomicRMW(AtomicRMWInst::Or, ArgAddr, ArgVal,
MaybeAlign(), AtomicOrdering::Monotonic);
ThenBuilder.CreateRetVoid();

IRBuilder<> ElseBuilder(ElseTerm);
ElseBuilder.CreateRetVoid();

return Fn;
}

// Perform logical OR of profile bitmap byte and shifted bit offset.
// %8 = or i8 %mcdc.bits, %7
auto *Result = Builder.CreateOr(Bitmap, ArgVal);

// Store the updated profile bitmap byte.
// store i8 %8, ptr %3, align 1
Builder.CreateStore(Result, ArgAddr);

// Terminator
Builder.CreateRetVoid();

return Fn;
}

CallInst *InstrLowerer::getRMWOrCall(Value *Addr, Value *Val) {
if (!RMWOrFunc)
RMWOrFunc = createRMWOrFunc();

return CallInst::Create(RMWOrFunc, {Addr, Val});
}

Value *InstrLowerer::getBitmapAddress(InstrProfMCDCTVBitmapUpdate *I) {
auto *Bitmaps = getOrCreateRegionBitmaps(I);
if (!isRuntimeCounterRelocationEnabled())
Expand Down Expand Up @@ -1291,9 +1217,10 @@ void InstrLowerer::lowerCoverageData(GlobalVariable *CoverageNamesVar) {

void InstrLowerer::lowerMCDCTestVectorBitmapUpdate(
InstrProfMCDCTVBitmapUpdate *Update) {
auto &Ctx = M.getContext();
IRBuilder<> Builder(Update);
auto *Int8Ty = Type::getInt8Ty(M.getContext());
auto *Int32Ty = Type::getInt32Ty(M.getContext());
auto *Int8Ty = Type::getInt8Ty(Ctx);
auto *Int32Ty = Type::getInt32Ty(Ctx);
auto *MCDCCondBitmapAddr = Update->getMCDCCondBitmapAddr();
auto *BitmapAddr = getBitmapAddress(Update);

Expand Down Expand Up @@ -1321,7 +1248,36 @@ void InstrLowerer::lowerMCDCTestVectorBitmapUpdate(
// %7 = shl i8 1, %6
auto *ShiftedVal = Builder.CreateShl(Builder.getInt8(0x1), BitToSet);

Builder.Insert(getRMWOrCall(BitmapByteAddr, ShiftedVal));
// Load profile bitmap byte.
// %mcdc.bits = load i8, ptr %4, align 1
auto *Bitmap = Builder.CreateLoad(Int8Ty, BitmapByteAddr, "mcdc.bits");

if (Options.Atomic || AtomicCounterUpdateAll) {
// If ((Bitmap & Val) != Val), then execute atomic (Bitmap |= Val).
// Note, just-loaded Bitmap might not be up-to-date. Use it just for
// early testing.
auto *Masked = Builder.CreateAnd(Bitmap, ShiftedVal);
auto *ShouldStore = Builder.CreateICmpNE(Masked, ShiftedVal);

// Assume updating will be rare.
auto *Unlikely = MDBuilder(Ctx).createUnlikelyBranchWeights();
Instruction *ThenBranch =
SplitBlockAndInsertIfThen(ShouldStore, Update, false, Unlikely);

// Execute if (unlikely(ShouldStore)).
Builder.SetInsertPoint(ThenBranch);
Builder.CreateAtomicRMW(AtomicRMWInst::Or, BitmapByteAddr, ShiftedVal,
MaybeAlign(), AtomicOrdering::Monotonic);
} else {
// Perform logical OR of profile bitmap byte and shifted bit offset.
// %8 = or i8 %mcdc.bits, %7
auto *Result = Builder.CreateOr(Bitmap, ShiftedVal);

// Store the updated profile bitmap byte.
// store i8 %8, ptr %3, align 1
Builder.CreateStore(Result, BitmapByteAddr);
}

Update->eraseFromParent();
}

Expand Down
28 changes: 12 additions & 16 deletions llvm/test/Instrumentation/InstrProfiling/mcdc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,25 +36,21 @@ entry:
; CHECK-NEXT: %[[LAB8:[0-9]+]] = and i32 %[[TEMP]], 7
; CHECK-NEXT: %[[LAB9:[0-9]+]] = trunc i32 %[[LAB8]] to i8
; CHECK-NEXT: %[[LAB10:[0-9]+]] = shl i8 1, %[[LAB9]]
; CHECK-NEXT: call void @[[RMW_OR:.+]](ptr %[[LAB7]], i8 %[[LAB10]])
; CHECK-NEXT: %[[BITS:.+]] = load i8, ptr %[[LAB7]], align 1
; ATOMIC-NEXT: %[[MASKED:.+]] = and i8 %[[BITS]], %[[LAB10]]
; ATOMIC-NEXT: %[[SHOULDWRITE:.+]] = icmp ne i8 %[[MASKED]], %[[LAB10]]
; ATOMIC-NEXT: br i1 %[[SHOULDWRITE]], label %[[WRITE:.+]], label %[[SKIP:.+]], !prof ![[MDPROF:[0-9]+]]
; ATOMIC: [[WRITE]]:
; BASIC-NEXT: %[[LAB11:[0-9]+]] = or i8 %[[BITS]], %[[LAB10]]
; RELOC-NEXT: %[[LAB11:[0-9]+]] = or i8 %[[BITS]], %[[LAB10]]
; BASIC-NEXT: store i8 %[[LAB11]], ptr %[[LAB7]], align 1
; RELOC-NEXT: store i8 %[[LAB11]], ptr %[[LAB7]], align 1
; ATOMIC-NEXT: %{{.+}} = atomicrmw or ptr %[[LAB7]], i8 %[[LAB10]] monotonic, align 1
; ATOMIC: [[SKIP]]:
ret void
; CHECK-NEXT: ret void
}

; CHECK: define private void @[[RMW_OR]](ptr %[[ARGPTR:.+]], i8 %[[ARGVAL:.+]])
; CHECK: %[[BITS:.+]] = load i8, ptr %[[ARGPTR]], align 1
; BASIC-NEXT: %[[LAB11:[0-9]+]] = or i8 %[[BITS]], %[[ARGVAL]]
; RELOC-NEXT: %[[LAB11:[0-9]+]] = or i8 %[[BITS]], %[[ARGVAL]]
; BASIC-NEXT: store i8 %[[LAB11]], ptr %[[ARGPTR]], align 1
; RELOC-NEXT: store i8 %[[LAB11]], ptr %[[ARGPTR]], align 1
; ATOMIC-NEXT: %[[MASKED:.+]] = and i8 %[[BITS]], %[[ARGVAL]]
; ATOMIC-NEXT: %[[SHOULDWRITE:.+]] = icmp ne i8 %[[MASKED]], %[[ARGVAL]]
; ATOMIC-NEXT: br i1 %[[SHOULDWRITE]], label %[[WRITE:.+]], label %[[SKIP:.+]], !prof ![[MDPROF:[0-9]+]]
; ATOMIC: [[WRITE]]:
; ATOMIC-NEXT: %{{.+}} = atomicrmw or ptr %[[ARGPTR]], i8 %[[ARGVAL]] monotonic, align 1
; ATOMIC-NEXT: ret void
; ATOMIC: [[SKIP]]:
; CHECK-NEXT: ret void

; ATOMIC: ![[MDPROF]] = !{!"branch_weights", i32 1, i32 1048575}

declare void @llvm.instrprof.cover(ptr, i64, i32, i32)
Expand Down
Loading