-
Notifications
You must be signed in to change notification settings - Fork 13.4k
[NFC][AMDGPU] clang-format AMDGPUBaseInfo.[h,cpp]
#133559
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
+153
−164
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-backend-amdgpu Author: Shilei Tian (shiltian) ChangesPatch is 27.82 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/133559.diff 2 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 1c777e235fb60..c097166e9a205 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -247,7 +247,6 @@ unsigned getMultigridSyncArgImplicitArgPosition(unsigned CodeObjectVersion) {
}
}
-
// FIXME: All such magic numbers about the ABI should be in a
// central TD file.
unsigned getHostcallImplicitArgPosition(unsigned CodeObjectVersion) {
@@ -296,8 +295,8 @@ unsigned getCompletionActionImplicitArgPosition(unsigned CodeObjectVersion) {
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
unsigned VDataDwords, unsigned VAddrDwords) {
- const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
- VDataDwords, VAddrDwords);
+ const MIMGInfo *Info =
+ getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding, VDataDwords, VAddrDwords);
return Info ? Info->Opcode : -1;
}
@@ -460,7 +459,8 @@ int getMTBUFBaseOpcode(unsigned Opc) {
}
int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
- const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
+ const MTBUFInfo *Info =
+ getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
return Info ? Info->Opcode : -1;
}
@@ -490,7 +490,8 @@ int getMUBUFBaseOpcode(unsigned Opc) {
}
int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
- const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
+ const MUBUFInfo *Info =
+ getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
return Info ? Info->Opcode : -1;
}
@@ -924,9 +925,8 @@ std::string AMDGPUTargetID::toString() const {
auto TargetTriple = STI.getTargetTriple();
auto Version = getIsaVersion(STI.getCPU());
- StreamRep << TargetTriple.getArchName() << '-'
- << TargetTriple.getVendorName() << '-'
- << TargetTriple.getOSName() << '-'
+ StreamRep << TargetTriple.getArchName() << '-' << TargetTriple.getVendorName()
+ << '-' << TargetTriple.getOSName() << '-'
<< TargetTriple.getEnvironmentName() << '-';
std::string Processor;
@@ -1020,9 +1020,7 @@ unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
return std::min(MaxWaves / N, MaxBarriers);
}
-unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
- return 1;
-}
+unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) { return 1; }
unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
// FIXME: Need to take scratch memory into account.
@@ -1039,9 +1037,7 @@ unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
getEUsPerCU(STI));
}
-unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
- return 1;
-}
+unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) { return 1; }
unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
// Some subtargets allow encoding 2048, but this isn't tested or supported.
@@ -1062,9 +1058,7 @@ unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
return 8;
}
-unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
- return 8;
-}
+unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) { return 8; }
unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
IsaVersion Version = getIsaVersion(STI->getCPU());
@@ -1169,9 +1163,9 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
if (STI->getFeatureBits().test(FeatureDynamicVGPR))
return STI->getFeatureBits().test(FeatureDynamicVGPRBlockSize32) ? 32 : 16;
- bool IsWave32 = EnableWavefrontSize32 ?
- *EnableWavefrontSize32 :
- STI->getFeatureBits().test(FeatureWavefrontSize32);
+ bool IsWave32 = EnableWavefrontSize32
+ ? *EnableWavefrontSize32
+ : STI->getFeatureBits().test(FeatureWavefrontSize32);
if (STI->getFeatureBits().test(Feature1_5xVGPRs))
return IsWave32 ? 24 : 12;
@@ -1187,9 +1181,9 @@ unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
if (STI->getFeatureBits().test(FeatureGFX90AInsts))
return 8;
- bool IsWave32 = EnableWavefrontSize32 ?
- *EnableWavefrontSize32 :
- STI->getFeatureBits().test(FeatureWavefrontSize32);
+ bool IsWave32 = EnableWavefrontSize32
+ ? *EnableWavefrontSize32
+ : STI->getFeatureBits().test(FeatureWavefrontSize32);
return IsWave32 ? 8 : 4;
}
@@ -1286,8 +1280,8 @@ unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
assert(WavesPerEU != 0);
- unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
- getVGPRAllocGranule(STI));
+ unsigned MaxNumVGPRs =
+ alignDown(getTotalNumVGPRs(STI) / WavesPerEU, getVGPRAllocGranule(STI));
unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
return std::min(MaxNumVGPRs, AddressableNumVGPRs);
}
@@ -1509,8 +1503,8 @@ unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
getLgkmcntBitWidth(Version.Major));
}
-void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
- unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
+void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt, unsigned &Vmcnt,
+ unsigned &Expcnt, unsigned &Lgkmcnt) {
Vmcnt = decodeVmcnt(Version, Waitcnt);
Expcnt = decodeExpcnt(Version, Waitcnt);
Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
@@ -1545,8 +1539,8 @@ unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
getLgkmcntBitWidth(Version.Major));
}
-unsigned encodeWaitcnt(const IsaVersion &Version,
- unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
+unsigned encodeWaitcnt(const IsaVersion &Version, unsigned Vmcnt,
+ unsigned Expcnt, unsigned Lgkmcnt) {
unsigned Waitcnt = getWaitcntBitMask(Version);
Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
@@ -1846,13 +1840,13 @@ struct ExpTgt {
};
static constexpr ExpTgt ExpTgtInfo[] = {
- {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
- {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
- {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
- {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
- {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
- {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
- {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
+ {{"null"}, ET_NULL, ET_NULL_MAX_IDX},
+ {{"mrtz"}, ET_MRTZ, ET_MRTZ_MAX_IDX},
+ {{"prim"}, ET_PRIM, ET_PRIM_MAX_IDX},
+ {{"mrt"}, ET_MRT0, ET_MRT_MAX_IDX},
+ {{"pos"}, ET_POS0, ET_POS_MAX_IDX},
+ {{"dual_src_blend"}, ET_DUAL_SRC_BLEND0, ET_DUAL_SRC_BLEND_MAX_IDX},
+ {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
};
bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
@@ -1985,7 +1979,7 @@ int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
}
StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) {
- if(isValidUnifiedFormat(Id, STI))
+ if (isValidUnifiedFormat(Id, STI))
return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
return "";
}
@@ -2066,9 +2060,9 @@ bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
case ID_GS_PreGFX11:
return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
case ID_GS_DONE_PreGFX11:
- return (OpId == OP_GS_NOP) ?
- (StreamId == STREAM_ID_NONE_) :
- (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
+ return (OpId == OP_GS_NOP)
+ ? (StreamId == STREAM_ID_NONE_)
+ : (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
}
}
return StreamId == STREAM_ID_NONE_;
@@ -2076,15 +2070,15 @@ bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
return MsgId == ID_SYSMSG ||
- (!isGFX11Plus(STI) &&
- (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
+ (!isGFX11Plus(STI) &&
+ (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
}
bool msgSupportsStream(int64_t MsgId, int64_t OpId,
const MCSubtargetInfo &STI) {
return !isGFX11Plus(STI) &&
- (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
- OpId != OP_GS_NOP;
+ (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
+ OpId != OP_GS_NOP;
}
void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
@@ -2099,9 +2093,7 @@ void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
}
}
-uint64_t encodeMsg(uint64_t MsgId,
- uint64_t OpId,
- uint64_t StreamId) {
+uint64_t encodeMsg(uint64_t MsgId, uint64_t OpId, uint64_t StreamId) {
return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
}
@@ -2127,19 +2119,19 @@ bool getHasDepthExport(const Function &F) {
}
bool isShader(CallingConv::ID cc) {
- switch(cc) {
- case CallingConv::AMDGPU_VS:
- case CallingConv::AMDGPU_LS:
- case CallingConv::AMDGPU_HS:
- case CallingConv::AMDGPU_ES:
- case CallingConv::AMDGPU_GS:
- case CallingConv::AMDGPU_PS:
- case CallingConv::AMDGPU_CS_Chain:
- case CallingConv::AMDGPU_CS_ChainPreserve:
- case CallingConv::AMDGPU_CS:
- return true;
- default:
- return false;
+ switch (cc) {
+ case CallingConv::AMDGPU_VS:
+ case CallingConv::AMDGPU_LS:
+ case CallingConv::AMDGPU_HS:
+ case CallingConv::AMDGPU_ES:
+ case CallingConv::AMDGPU_GS:
+ case CallingConv::AMDGPU_PS:
+ case CallingConv::AMDGPU_CS_Chain:
+ case CallingConv::AMDGPU_CS_ChainPreserve:
+ case CallingConv::AMDGPU_CS:
+ return true;
+ default:
+ return false;
}
}
@@ -2200,7 +2192,8 @@ bool hasSRAMECC(const MCSubtargetInfo &STI) {
}
bool hasMIMG_R128(const MCSubtargetInfo &STI) {
- return STI.hasFeature(AMDGPU::FeatureMIMG_R128) && !STI.hasFeature(AMDGPU::FeatureR128A16);
+ return STI.hasFeature(AMDGPU::FeatureMIMG_R128) &&
+ !STI.hasFeature(AMDGPU::FeatureR128A16);
}
bool hasA16(const MCSubtargetInfo &STI) {
@@ -2299,9 +2292,7 @@ bool isGFX12Plus(const MCSubtargetInfo &STI) { return isGFX12(STI); }
bool isNotGFX12Plus(const MCSubtargetInfo &STI) { return !isGFX12Plus(STI); }
-bool isNotGFX11Plus(const MCSubtargetInfo &STI) {
- return !isGFX11Plus(STI);
-}
+bool isNotGFX11Plus(const MCSubtargetInfo &STI) { return !isGFX11Plus(STI); }
bool isNotGFX10Plus(const MCSubtargetInfo &STI) {
return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
@@ -2370,69 +2361,75 @@ bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI) {
const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
const MCRegister FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
- Reg == AMDGPU::SCC;
+ Reg == AMDGPU::SCC;
}
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI) {
return MRI.getEncodingValue(Reg) & AMDGPU::HWEncoding::IS_HI16;
}
-#define MAP_REG2REG \
- using namespace AMDGPU; \
- switch(Reg.id()) { \
- default: return Reg; \
- CASE_CI_VI(FLAT_SCR) \
- CASE_CI_VI(FLAT_SCR_LO) \
- CASE_CI_VI(FLAT_SCR_HI) \
- CASE_VI_GFX9PLUS(TTMP0) \
- CASE_VI_GFX9PLUS(TTMP1) \
- CASE_VI_GFX9PLUS(TTMP2) \
- CASE_VI_GFX9PLUS(TTMP3) \
- CASE_VI_GFX9PLUS(TTMP4) \
- CASE_VI_GFX9PLUS(TTMP5) \
- CASE_VI_GFX9PLUS(TTMP6) \
- CASE_VI_GFX9PLUS(TTMP7) \
- CASE_VI_GFX9PLUS(TTMP8) \
- CASE_VI_GFX9PLUS(TTMP9) \
- CASE_VI_GFX9PLUS(TTMP10) \
- CASE_VI_GFX9PLUS(TTMP11) \
- CASE_VI_GFX9PLUS(TTMP12) \
- CASE_VI_GFX9PLUS(TTMP13) \
- CASE_VI_GFX9PLUS(TTMP14) \
- CASE_VI_GFX9PLUS(TTMP15) \
- CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
- CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
- CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
- CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
- CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
- CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
- CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
- CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
- CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
- CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
- CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
- CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
- CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
- CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
- CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
- CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
- CASE_GFXPRE11_GFX11PLUS(M0) \
- CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
- CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
+#define MAP_REG2REG \
+ using namespace AMDGPU; \
+ switch (Reg.id()) { \
+ default: \
+ return Reg; \
+ CASE_CI_VI(FLAT_SCR) \
+ CASE_CI_VI(FLAT_SCR_LO) \
+ CASE_CI_VI(FLAT_SCR_HI) \
+ CASE_VI_GFX9PLUS(TTMP0) \
+ CASE_VI_GFX9PLUS(TTMP1) \
+ CASE_VI_GFX9PLUS(TTMP2) \
+ CASE_VI_GFX9PLUS(TTMP3) \
+ CASE_VI_GFX9PLUS(TTMP4) \
+ CASE_VI_GFX9PLUS(TTMP5) \
+ CASE_VI_GFX9PLUS(TTMP6) \
+ CASE_VI_GFX9PLUS(TTMP7) \
+ CASE_VI_GFX9PLUS(TTMP8) \
+ CASE_VI_GFX9PLUS(TTMP9) \
+ CASE_VI_GFX9PLUS(TTMP10) \
+ CASE_VI_GFX9PLUS(TTMP11) \
+ CASE_VI_GFX9PLUS(TTMP12) \
+ CASE_VI_GFX9PLUS(TTMP13) \
+ CASE_VI_GFX9PLUS(TTMP14) \
+ CASE_VI_GFX9PLUS(TTMP15) \
+ CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
+ CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
+ CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
+ CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
+ CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
+ CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
+ CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
+ CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
+ CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
+ CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
+ CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
+ CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
+ CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
+ CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
+ CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
+ CASE_VI_GFX9PLUS( \
+ TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
+ CASE_GFXPRE11_GFX11PLUS(M0) \
+ CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
+ CASE_GFXPRE11_GFX11PLUS_TO(SGPR_NULL64, SGPR_NULL) \
}
-#define CASE_CI_VI(node) \
- assert(!isSI(STI)); \
- case node: return isCI(STI) ? node##_ci : node##_vi;
+#define CASE_CI_VI(node) \
+ assert(!isSI(STI)); \
+ case node: \
+ return isCI(STI) ? node##_ci : node##_vi;
-#define CASE_VI_GFX9PLUS(node) \
- case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
+#define CASE_VI_GFX9PLUS(node) \
+ case node: \
+ return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
-#define CASE_GFXPRE11_GFX11PLUS(node) \
- case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
+#define CASE_GFXPRE11_GFX11PLUS(node) \
+ case node: \
+ return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
-#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
- case node: return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
+#define CASE_GFXPRE11_GFX11PLUS_TO(node, result) \
+ case node: \
+ return isGFX11Plus(STI) ? result##_gfx11plus : result##_gfxpre11;
MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI) {
if (STI.getTargetTriple().getArch() == Triple::r600)
@@ -2445,9 +2442,18 @@ MCRegister getMCReg(MCRegister Reg, const MCSubtargetInfo &STI) {
#undef CASE_GFXPRE11_GFX11PLUS
#undef CASE_GFXPRE11_GFX11PLUS_TO
-#define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
-#define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
-#define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
+#define CASE_CI_VI(node) \
+ case node##_ci: \
+ case node##_vi: \
+ return node;
+#define CASE_VI_GFX9PLUS(node) \
+ case node##_vi: ...
[truncated]
|
arsenm
approved these changes
Mar 29, 2025
c48ce19
to
48e202c
Compare
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
No description provided.