-
Notifications
You must be signed in to change notification settings - Fork 12.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RISCV] Implement RISCVInstrInfo::getMemOperandsWithOffsetWidth #73681
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Alex Bradbury (asb) ChangesThis hook is called by the default implementation of getMemOperandWithOffset and by the load/store clustering code in the MachineScheduler though this isn't enabled by default and is not yet enabled for RISC-V. Only return true for queries on scalar loads/stores for now (this is a conservative starting point, and vector load/store can be handled in a follow-on patch). Full diff: https://github.com/llvm/llvm-project/pull/73681.diff 3 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index c76a330b08aa8fd..6c8c5db37e0b2a1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -2193,11 +2193,50 @@ MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,
.setMIFlags(MemI.getFlags());
}
+bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
+ const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
+ int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
+ const TargetRegisterInfo *TRI) const {
+ if (!LdSt.mayLoadOrStore())
+ return false;
+
+ // Conservatively, only handle scalar loads/stores for now.
+ switch (LdSt.getOpcode()) {
+ case RISCV::LB:
+ case RISCV::LBU:
+ case RISCV::SB:
+ case RISCV::LH:
+ case RISCV::LHU:
+ case RISCV::FLH:
+ case RISCV::SH:
+ case RISCV::FSH:
+ case RISCV::LW:
+ case RISCV::LWU:
+ case RISCV::FLW:
+ case RISCV::SW:
+ case RISCV::FSW:
+ case RISCV::LD:
+ case RISCV::FLD:
+ case RISCV::SD:
+ case RISCV::FSD:
+ break;
+ default: return false;
+ }
+ const MachineOperand *BaseOp;
+ OffsetIsScalable = false;
+ if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
+ return false;
+ BaseOps.push_back(BaseOp);
+ return true;
+}
+
// Set BaseReg (the base register operand), Offset (the byte offset being
// accessed) and the access Width of the passed instruction that reads/writes
// memory. Returns false if the instruction does not read/write memory or the
// BaseReg/Offset/Width can't be determined. Is not guaranteed to always
// recognise base operands and offsets in all cases.
+// TODO: Add an IsScalable bool ref argument (like the equivalent AArch64
+// function) and set it as appropriate.
bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
unsigned &Width, const TargetRegisterInfo *TRI) const {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 0eac8d1e1b1af2c..8f860077c303170 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -152,6 +152,11 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
MachineInstr *emitLdStWithAddr(MachineInstr &MemI,
const ExtAddrMode &AM) const override;
+ bool getMemOperandsWithOffsetWidth(
+ const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
+ int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
+ const TargetRegisterInfo *TRI) const override;
+
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp,
int64_t &Offset, unsigned &Width,
diff --git a/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp b/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp
index fd5f17e0185e4ab..43e98d703d7a33a 100644
--- a/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp
+++ b/llvm/unittests/Target/RISCV/RISCVInstrInfoTest.cpp
@@ -92,6 +92,69 @@ TEST_P(RISCVInstrInfoTest, IsAddImmediate) {
}
}
+TEST_P(RISCVInstrInfoTest, GetMemOperandsWithOffsetWidth) {
+ const RISCVInstrInfo *TII = ST->getInstrInfo();
+ const TargetRegisterInfo *TRI = ST->getRegisterInfo();
+ DebugLoc DL;
+
+ SmallVector<const MachineOperand *> BaseOps;
+ unsigned Width;
+ int64_t Offset;
+ bool OffsetIsScalable;
+
+ auto MMO = MF->getMachineMemOperand(MachinePointerInfo(),
+ MachineMemOperand::MOLoad, 1, Align(1));
+ MachineInstr *MI = BuildMI(*MF, DL, TII->get(RISCV::LB), RISCV::X1)
+ .addReg(RISCV::X2)
+ .addImm(-128)
+ .addMemOperand(MMO)
+ .getInstr();
+ bool Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset,
+ OffsetIsScalable, Width, TRI);
+ ASSERT_TRUE(Res);
+ ASSERT_EQ(BaseOps.size(), 1u);
+ ASSERT_TRUE(BaseOps.front()->isReg());
+ EXPECT_EQ(BaseOps.front()->getReg(), RISCV::X2);
+ EXPECT_EQ(Offset, -128);
+ EXPECT_FALSE(OffsetIsScalable);
+ EXPECT_EQ(Width, 1u);
+
+ BaseOps.clear();
+ MMO = MF->getMachineMemOperand(MachinePointerInfo(),
+ MachineMemOperand::MOStore, 4, Align(4));
+ MI = BuildMI(*MF, DL, TII->get(RISCV::FSW))
+ .addReg(RISCV::F3_F)
+ .addReg(RISCV::X3)
+ .addImm(36)
+ .addMemOperand(MMO);
+ Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset,
+ OffsetIsScalable, Width, TRI);
+ ASSERT_TRUE(Res);
+ ASSERT_EQ(BaseOps.size(), 1u);
+ ASSERT_TRUE(BaseOps.front()->isReg());
+ EXPECT_EQ(BaseOps.front()->getReg(), RISCV::X3);
+ EXPECT_EQ(Offset, 36);
+ EXPECT_FALSE(OffsetIsScalable);
+ EXPECT_EQ(Width, 4u);
+
+ BaseOps.clear();
+ MMO = MF->getMachineMemOperand(MachinePointerInfo(),
+ MachineMemOperand::MOStore, 16, Align(16));
+ MI = BuildMI(*MF, DL, TII->get(RISCV::PseudoVLE32_V_M1), RISCV::V8)
+ .addReg(RISCV::X3)
+ .addMemOperand(MMO);
+ Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset,
+ OffsetIsScalable, Width, TRI);
+ ASSERT_FALSE(Res); // Vector loads/stored are not handled for now.
+
+ BaseOps.clear();
+ MI = BuildMI(*MF, DL, TII->get(RISCV::ADDI), RISCV::X4)
+ .addReg(RISCV::X5)
+ .addImm(16);
+ Res = TII->getMemOperandsWithOffsetWidth(*MI, BaseOps, Offset,
+ OffsetIsScalable, Width, TRI);
+}
+
} // namespace
INSTANTIATE_TEST_SUITE_P(RV32And64, RISCVInstrInfoTest,
|
✅ With the latest revision this PR passed the C/C++ code formatter. |
This hook is called by the default implementation of getMemOperandWithOffset and by the load/store clustering code in the MachineScheduler though this isn't enabled by default and is not yet enabled for RISC-V. Only return true for queries on scalar loads/stores for now (this is a conservative starting point, and vector load/store can be handled in a follow-on patch).
bcf7114
to
fc2f2e3
Compare
// Set BaseReg (the base register operand), Offset (the byte offset being | ||
// accessed) and the access Width of the passed instruction that reads/writes | ||
// memory. Returns false if the instruction does not read/write memory or the | ||
// BaseReg/Offset/Width can't be determined. Is not guaranteed to always | ||
// recognise base operands and offsets in all cases. | ||
// TODO: Add an IsScalable bool ref argument (like the equivalent AArch64 | ||
// function) and set it as appropriate. | ||
bool RISCVInstrInfo::getMemOperandWithOffsetWidth( |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we fold this into the new function and get rid of this signature? The default implementation of this signature calls the SmallVector version.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nevermind this isn't an override. There are too many functions with this name.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Local branch amd-gfx 606d2d4 Merged main:81e3e7e5d455 into amd-gfx:207601c46e4e Remote branch main 9c5003c [RISCV] Implement RISCVInstrInfo::getMemOperandsWithOffsetWidth (llvm#73681)
This hook is called by the default implementation of getMemOperandWithOffset and by the load/store clustering code in the MachineScheduler though this isn't enabled by default and is not yet enabled for RISC-V. Only return true for queries on scalar loads/stores for now (this is a conservative starting point, and vector load/store can be handled in a follow-on patch).