Skip to content

Commit

Permalink
[AMDGPU][Backend] Fix user-after-free in AMDGPUReleaseVGPRs::isLastVG…
Browse files Browse the repository at this point in the history
…PRUseVMEMStore

Reviewed By: jpages, arsenm

Differential Revision: https://reviews.llvm.org/D134641
  • Loading branch information
jmmartinez committed Oct 19, 2022
1 parent dbe60ca commit bb24b2c
Showing 1 changed file with 60 additions and 46 deletions.
106 changes: 60 additions & 46 deletions llvm/lib/Target/AMDGPU/AMDGPUReleaseVGPRs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIDefines.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineOperand.h"
using namespace llvm;
Expand All @@ -29,60 +29,76 @@ class AMDGPUReleaseVGPRs : public MachineFunctionPass {
public:
static char ID;

const SIInstrInfo *SII;
const SIRegisterInfo *TRI;

AMDGPUReleaseVGPRs() : MachineFunctionPass(ID) {}

void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
}

// Used to cache the result of isLastInstructionVMEMStore for each block
using BlockVMEMStoreType = DenseMap<MachineBasicBlock *, bool>;
BlockVMEMStoreType BlockVMEMStore;

// Return true if the last instruction referencing a vgpr in this MBB
// is a VMEM store, otherwise return false.
// Visit previous basic blocks to find this last instruction if needed.
// Because this pass is late in the pipeline, it is expected that the
// Track if the last instruction referencing a vgpr in a MBB is a VMEM
// store. Because this pass is late in the pipeline, it is expected that the
// last vgpr use will likely be one of vmem store, ds, exp.
// Loads and others vgpr operations would have been
// deleted by this point, except for complex control flow involving loops.
// This is why we are just testing the type of instructions rather
// than the operands.
bool isLastVGPRUseVMEMStore(MachineBasicBlock &MBB) {
// Use the cache to break infinite loop and save some time. Initialize to
// false in case we have a cycle.
BlockVMEMStoreType::iterator It;
bool Inserted;
std::tie(It, Inserted) = BlockVMEMStore.insert({&MBB, false});
bool &CacheEntry = It->second;
if (!Inserted)
return CacheEntry;

for (auto &MI : reverse(MBB.instrs())) {
// If it's a VMEM store, a vgpr will be used, return true.
if ((SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI)) && MI.mayStore())
return CacheEntry = true;

// If it's referencing a VGPR but is not a VMEM store, return false.
if (SIInstrInfo::isDS(MI) || SIInstrInfo::isEXP(MI) ||
SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI) ||
SIInstrInfo::isVALU(MI))
return CacheEntry = false;
class LastVGPRUseIsVMEMStore {
BitVector BlockVMEMStore;

static Optional<bool> lastVGPRUseIsStore(const MachineBasicBlock &MBB) {
for (auto &MI : reverse(MBB.instrs())) {
// If it's a VMEM store, a VGPR will be used, return true.
if ((SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI)) &&
MI.mayStore())
return true;

// If it's referencing a VGPR but is not a VMEM store, return false.
if (SIInstrInfo::isDS(MI) || SIInstrInfo::isEXP(MI) ||
SIInstrInfo::isVMEM(MI) || SIInstrInfo::isFLAT(MI) ||
SIInstrInfo::isVALU(MI))
return false;
}
// Wait until the values are propagated from the predecessors
return None;
}

// Recursive call into parent blocks. Look into predecessors if there is no
// vgpr used in this block.
return CacheEntry = llvm::any_of(MBB.predecessors(),
[this](MachineBasicBlock *Parent) {
return isLastVGPRUseVMEMStore(*Parent);
});
}
public:
LastVGPRUseIsVMEMStore(const MachineFunction &MF)
: BlockVMEMStore(MF.getNumBlockIDs()) {

df_iterator_default_set<const MachineBasicBlock *> Visited;
SmallVector<const MachineBasicBlock *> EndWithVMEMStoreBlocks;

for (const auto &MBB : MF) {
auto LastUseIsStore = lastVGPRUseIsStore(MBB);
if (!LastUseIsStore.has_value())
continue;

if (*LastUseIsStore) {
EndWithVMEMStoreBlocks.push_back(&MBB);
} else {
Visited.insert(&MBB);
}
}

for (const auto *MBB : EndWithVMEMStoreBlocks) {
for (const auto *Succ : depth_first_ext(MBB, Visited)) {
BlockVMEMStore[Succ->getNumber()] = true;
}
}
}

// Return true if the last instruction referencing a vgpr in this MBB
// is a VMEM store, otherwise return false.
bool isLastVGPRUseVMEMStore(const MachineBasicBlock &MBB) const {
return BlockVMEMStore[MBB.getNumber()];
}
};

bool runOnMachineBasicBlock(MachineBasicBlock &MBB) {
static bool
runOnMachineBasicBlock(MachineBasicBlock &MBB, const SIInstrInfo *SII,
const LastVGPRUseIsVMEMStore &BlockVMEMStore) {

bool Changed = false;

Expand All @@ -93,7 +109,7 @@ class AMDGPUReleaseVGPRs : public MachineFunctionPass {
// If the last instruction using a VGPR in the block is a VMEM store,
// release VGPRs. The VGPRs release will be placed just before ending
// the program
if (isLastVGPRUseVMEMStore(MBB)) {
if (BlockVMEMStore.isLastVGPRUseVMEMStore(MBB)) {
BuildMI(MBB, MI, DebugLoc(), SII->get(AMDGPU::S_SENDMSG))
.addImm(AMDGPU::SendMsg::ID_DEALLOC_VGPRS_GFX11Plus);
Changed = true;
Expand All @@ -117,16 +133,14 @@ class AMDGPUReleaseVGPRs : public MachineFunctionPass {
LLVM_DEBUG(dbgs() << "AMDGPUReleaseVGPRs running on " << MF.getName()
<< "\n");

SII = ST.getInstrInfo();
TRI = ST.getRegisterInfo();
const SIInstrInfo *SII = ST.getInstrInfo();
LastVGPRUseIsVMEMStore BlockVMEMStore(MF);

bool Changed = false;
for (auto &MBB : MF) {
Changed |= runOnMachineBasicBlock(MBB);
Changed |= runOnMachineBasicBlock(MBB, SII, BlockVMEMStore);
}

BlockVMEMStore.clear();

return Changed;
}
};
Expand Down

0 comments on commit bb24b2c

Please sign in to comment.