diff --git a/llvm/lib/Target/AMDGPU/AMDGPULaneMaskUtils.h b/llvm/lib/Target/AMDGPU/AMDGPULaneMaskUtils.h index 50c23a156d6fd..d134a7f1ebeee 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPULaneMaskUtils.h +++ b/llvm/lib/Target/AMDGPU/AMDGPULaneMaskUtils.h @@ -70,7 +70,8 @@ class LaneMaskConstants { XorOpc(IsWave32 ? AMDGPU::S_XOR_B32 : AMDGPU::S_XOR_B64), XorTermOpc(IsWave32 ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term), WQMOpc(IsWave32 ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64), - LaneMaskRC(IsWave32 ? &AMDGPU::SReg_32RegClass : &AMDGPU::SReg_64RegClass) {} + LaneMaskRC(IsWave32 ? &AMDGPU::SReg_32RegClass + : &AMDGPU::SReg_64RegClass) {} static inline const LaneMaskConstants &get(const GCNSubtarget &ST); }; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUWaveTransform.cpp b/llvm/lib/Target/AMDGPU/AMDGPUWaveTransform.cpp index b6f53ec2bd427..ceda928f202f5 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUWaveTransform.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUWaveTransform.cpp @@ -1841,8 +1841,8 @@ void ControlFlowRewriter::rewrite() { Register CondReg = Info.OrigCondition; if (!LMA.isSubsetOfExec(CondReg, *Node->Block)) { CondReg = LMU.createLaneMaskReg(); - BuildMI(*Node->Block, Node->Block->end(), {}, - TII.get(LMC.AndOpc), CondReg) + BuildMI(*Node->Block, Node->Block->end(), {}, TII.get(LMC.AndOpc), + CondReg) .addReg(LMC.ExecReg) .addReg(Info.OrigCondition); } diff --git a/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.cpp b/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.cpp index 737c5da32436d..d7b19cbe745a8 100644 --- a/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.cpp +++ b/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.cpp @@ -151,10 +151,9 @@ void GCNLaneMaskUtils::buildMergeLaneMasks(MachineBasicBlock &MBB, CurMaskedReg = CurReg; } else { CurMaskedReg = createLaneMaskReg(); - CurMaskedBuilt = - BuildMI(MBB, I, DL, TII->get(LMC.AndOpc), CurMaskedReg) - .addReg(CurReg) - .addReg(LMC.ExecReg); + CurMaskedBuilt = BuildMI(MBB, I, DL, TII->get(LMC.AndOpc), CurMaskedReg) + .addReg(CurReg) + .addReg(LMC.ExecReg); } } @@ -268,7 +267,7 @@ bool GCNLaneMaskAnalysis::isSubsetOfExec(Register Reg, void GCNLaneMaskUpdater::init(Register Reg) { Processed = false; Blocks.clear(); - //SSAUpdater.Initialize(LMU.getLaneMaskConsts().LaneMaskRC); + // SSAUpdater.Initialize(LMU.getLaneMaskConsts().LaneMaskRC); SSAUpdater.Initialize(Reg); } @@ -418,8 +417,8 @@ void GCNLaneMaskUpdater::process() { // Prepare an all-zero value for the default and reset in accumulating mode. if (Accumulating && !ZeroReg) { ZeroReg = LMU.createLaneMaskReg(); - BuildMI(Entry, Entry.getFirstTerminator(), {}, TII->get(LMU.getLaneMaskConsts().MovOpc), - ZeroReg) + BuildMI(Entry, Entry.getFirstTerminator(), {}, + TII->get(LMU.getLaneMaskConsts().MovOpc), ZeroReg) .addImm(0); } diff --git a/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.h b/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.h index 2c0bd9b09cf6b..f4419f139d92c 100644 --- a/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.h +++ b/llvm/lib/Target/AMDGPU/GCNLaneMaskUtils.h @@ -35,13 +35,12 @@ class GCNLaneMaskUtils { public: GCNLaneMaskUtils() = delete; - explicit GCNLaneMaskUtils(MachineFunction &MF) : MF(MF), - LMC(AMDGPU::LaneMaskConstants::get(MF.getSubtarget())) {} + explicit GCNLaneMaskUtils(MachineFunction &MF) + : MF(MF), + LMC(AMDGPU::LaneMaskConstants::get(MF.getSubtarget())) {} MachineFunction *function() const { return &MF; } - const AMDGPU::LaneMaskConstants &getLaneMaskConsts() const { - return LMC; - } + const AMDGPU::LaneMaskConstants &getLaneMaskConsts() const { return LMC; } bool maybeLaneMask(Register Reg) const; bool isConstantLaneMask(Register Reg, bool &Val) const;