-
Notifications
You must be signed in to change notification settings - Fork 15.5k
[AMDGPU] Schedule independent instructions between s_barrier_signal and s_barrier_wait #172057
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -6,24 +6,34 @@ | |
| // | ||
| //===----------------------------------------------------------------------===// | ||
| // | ||
| /// \file This file contains a DAG scheduling mutation to add latency to | ||
| /// barrier edges between ATOMIC_FENCE instructions and preceding | ||
| /// memory accesses potentially affected by the fence. | ||
| /// This encourages the scheduling of more instructions before | ||
| /// ATOMIC_FENCE instructions. ATOMIC_FENCE instructions may | ||
| /// introduce wait counting or indicate an impending S_BARRIER | ||
| /// wait. Having more instructions in-flight across these | ||
| /// constructs improves latency hiding. | ||
| /// \file This file contains a DAG scheduling mutation to add latency to: | ||
| /// 1. Barrier edges between ATOMIC_FENCE instructions and preceding | ||
| /// memory accesses potentially affected by the fence. | ||
| /// This encourages the scheduling of more instructions before | ||
| /// ATOMIC_FENCE instructions. ATOMIC_FENCE instructions may | ||
| /// introduce wait counting or indicate an impending S_BARRIER | ||
| /// wait. Having more instructions in-flight across these | ||
| /// constructs improves latency hiding. | ||
| /// 2. Barrier edges from S_BARRIER_SIGNAL to S_BARRIER_WAIT. | ||
| /// This encourages independent work to be scheduled between | ||
| /// signal and wait, hiding barrier synchronization latency. | ||
| // | ||
| //===----------------------------------------------------------------------===// | ||
|
|
||
| #include "AMDGPUBarrierLatency.h" | ||
| #include "MCTargetDesc/AMDGPUMCTargetDesc.h" | ||
| #include "SIInstrInfo.h" | ||
| #include "llvm/CodeGen/ScheduleDAGInstrs.h" | ||
| #include "llvm/Support/CommandLine.h" | ||
|
|
||
| using namespace llvm; | ||
|
|
||
| static cl::opt<unsigned> BarrierSignalWaitLatencyOpt( | ||
| "amdgpu-barrier-signal-wait-latency", | ||
| cl::desc("Synthetic latency between S_BARRIER_SIGNAL and S_BARRIER_WAIT " | ||
| "to encourage scheduling independent work between them"), | ||
| cl::init(16), cl::Hidden); | ||
|
|
||
| namespace { | ||
|
|
||
| class BarrierLatency : public ScheduleDAGMutation { | ||
|
|
@@ -41,38 +51,56 @@ class BarrierLatency : public ScheduleDAGMutation { | |
| void apply(ScheduleDAGInstrs *DAG) override; | ||
| }; | ||
|
|
||
| void addLatencyToEdge(SDep &PredDep, SUnit &SU, unsigned Latency) { | ||
| SUnit *PredSU = PredDep.getSUnit(); | ||
| SDep ForwardD = PredDep; | ||
| ForwardD.setSUnit(&SU); | ||
| for (SDep &SuccDep : PredSU->Succs) { | ||
| if (SuccDep == ForwardD) { | ||
| SuccDep.setLatency(SuccDep.getLatency() + Latency); | ||
| break; | ||
| } | ||
| } | ||
| PredDep.setLatency(PredDep.getLatency() + Latency); | ||
| PredSU->setDepthDirty(); | ||
| SU.setDepthDirty(); | ||
| } | ||
|
|
||
| void BarrierLatency::apply(ScheduleDAGInstrs *DAG) { | ||
| constexpr unsigned SyntheticLatency = 2000; | ||
| const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(DAG->TII); | ||
| constexpr unsigned FenceLatency = 2000; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should this come from the sched model for the barrier? I don't think this number agrees with what's there
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
This is the latency that is being added for the edges to memory fences. Most artificial edges that are added for correctness have latency 0 initially, The 2000 or whatever is in the SchedModel for barriers doesn't actually do anything because it is the latency for the instruction and is not applied to any edges. |
||
| const unsigned BarrierSignalWaitLatency = BarrierSignalWaitLatencyOpt; | ||
|
|
||
| for (SUnit &SU : DAG->SUnits) { | ||
| const MachineInstr *MI = SU.getInstr(); | ||
| if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE) | ||
| continue; | ||
|
|
||
| // Update latency on barrier edges of ATOMIC_FENCE. | ||
| // Ignore scopes not expected to have any latency. | ||
| SyncScope::ID SSID = static_cast<SyncScope::ID>(MI->getOperand(1).getImm()); | ||
| if (IgnoredScopes.contains(SSID)) | ||
| continue; | ||
| unsigned Op = MI->getOpcode(); | ||
|
|
||
| for (SDep &PredDep : SU.Preds) { | ||
| if (!PredDep.isBarrier()) | ||
| continue; | ||
| SUnit *PredSU = PredDep.getSUnit(); | ||
| MachineInstr *MI = PredSU->getInstr(); | ||
| // Only consider memory loads | ||
| if (!MI->mayLoad() || MI->mayStore()) | ||
| if (Op == AMDGPU::ATOMIC_FENCE) { | ||
| // Update latency on barrier edges of ATOMIC_FENCE. | ||
| // Ignore scopes not expected to have any latency. | ||
| SyncScope::ID SSID = | ||
| static_cast<SyncScope::ID>(MI->getOperand(1).getImm()); | ||
| if (IgnoredScopes.contains(SSID)) | ||
| continue; | ||
| SDep ForwardD = PredDep; | ||
| ForwardD.setSUnit(&SU); | ||
| for (SDep &SuccDep : PredSU->Succs) { | ||
| if (SuccDep == ForwardD) { | ||
| SuccDep.setLatency(SuccDep.getLatency() + SyntheticLatency); | ||
| break; | ||
|
|
||
| for (SDep &PredDep : SU.Preds) { | ||
| if (!PredDep.isBarrier()) | ||
| continue; | ||
| SUnit *PredSU = PredDep.getSUnit(); | ||
| MachineInstr *MI = PredSU->getInstr(); | ||
| // Only consider memory loads | ||
| if (!MI->mayLoad() || MI->mayStore()) | ||
| continue; | ||
| addLatencyToEdge(PredDep, SU, FenceLatency); | ||
| } | ||
| } else if (Op == AMDGPU::S_BARRIER_WAIT) { | ||
| for (SDep &PredDep : SU.Preds) { | ||
| SUnit *PredSU = PredDep.getSUnit(); | ||
| const MachineInstr *PredMI = PredSU->getInstr(); | ||
| if (TII->isBarrierStart(PredMI->getOpcode())) { | ||
| addLatencyToEdge(PredDep, SU, BarrierSignalWaitLatency); | ||
| } | ||
| } | ||
| PredDep.setLatency(PredDep.getLatency() + SyntheticLatency); | ||
| PredSU->setDepthDirty(); | ||
| SU.setDepthDirty(); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,197 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | ||
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck --check-prefix=OPT %s | ||
| ; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-barrier-signal-wait-latency=0 < %s | FileCheck --check-prefix=NOOPT %s | ||
|
|
||
| ; Tests for scheduling independent work between s_barrier_signal and s_barrier_wait | ||
| ; for latency hiding. | ||
|
|
||
| ; Independent work should be scheduled between signal/wait | ||
| define amdgpu_kernel void @test_barrier_independent_valu(ptr addrspace(1) %out, i32 %size) #0 { | ||
| ; OPT-LABEL: test_barrier_independent_valu: | ||
| ; OPT: ; %bb.0: ; %entry | ||
| ; OPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 | ||
| ; OPT-NEXT: v_and_b32_e32 v1, 0x3ff, v0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; OPT-NEXT: v_lshlrev_b32_e32 v2, 2, v1 | ||
| ; OPT-NEXT: s_wait_kmcnt 0x0 | ||
| ; OPT-NEXT: v_xad_u32 v0, v1, -1, s2 | ||
| ; OPT-NEXT: global_store_b32 v2, v1, s[0:1] | ||
| ; OPT-NEXT: s_barrier_signal -1 | ||
| ; OPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) | ||
| ; OPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1] | ||
| ; OPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; OPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo | ||
| ; OPT-NEXT: s_barrier_wait -1 | ||
| ; OPT-NEXT: global_load_b32 v0, v[0:1], off | ||
| ; OPT-NEXT: s_wait_loadcnt 0x0 | ||
| ; OPT-NEXT: global_store_b32 v2, v0, s[0:1] | ||
| ; OPT-NEXT: s_endpgm | ||
| ; | ||
| ; NOOPT-LABEL: test_barrier_independent_valu: | ||
| ; NOOPT: ; %bb.0: ; %entry | ||
| ; NOOPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 | ||
| ; NOOPT-NEXT: v_and_b32_e32 v2, 0x3ff, v0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_lshlrev_b32_e32 v3, 2, v2 | ||
| ; NOOPT-NEXT: s_wait_kmcnt 0x0 | ||
| ; NOOPT-NEXT: v_xad_u32 v0, v2, -1, s2 | ||
| ; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1] | ||
| ; NOOPT-NEXT: s_barrier_signal -1 | ||
| ; NOOPT-NEXT: s_barrier_wait -1 | ||
| ; NOOPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1] | ||
| ; NOOPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo | ||
| ; NOOPT-NEXT: global_load_b32 v0, v[0:1], off | ||
| ; NOOPT-NEXT: s_wait_loadcnt 0x0 | ||
| ; NOOPT-NEXT: global_store_b32 v3, v0, s[0:1] | ||
| ; NOOPT-NEXT: s_endpgm | ||
| entry: | ||
| %tid = call i32 @llvm.amdgcn.workitem.id.x() | ||
| %addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid | ||
| store i32 %tid, ptr addrspace(1) %addr | ||
| call void @llvm.amdgcn.s.barrier.signal(i32 -1) | ||
| call void @llvm.amdgcn.s.barrier.wait(i16 -1) | ||
| %idx_base = sub i32 %size, 1 | ||
| %idx = sub i32 %idx_base, %tid | ||
| %read_addr = getelementptr i32, ptr addrspace(1) %out, i32 %idx | ||
| %val = load i32, ptr addrspace(1) %read_addr | ||
| store i32 %val, ptr addrspace(1) %addr | ||
| ret void | ||
| } | ||
|
|
||
| ; No independent work - signal/wait should stay adjacent | ||
| define amdgpu_kernel void @test_barrier_no_independent_work(ptr addrspace(3) %lds) #0 { | ||
| ; OPT-LABEL: test_barrier_no_independent_work: | ||
| ; OPT: ; %bb.0: ; %entry | ||
| ; OPT-NEXT: s_load_b32 s0, s[4:5], 0x24 | ||
| ; OPT-NEXT: v_and_b32_e32 v0, 0x3ff, v0 | ||
| ; OPT-NEXT: s_wait_kmcnt 0x0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; OPT-NEXT: v_lshl_add_u32 v1, v0, 2, s0 | ||
| ; OPT-NEXT: ds_store_b32 v1, v0 | ||
| ; OPT-NEXT: s_barrier_signal -1 | ||
| ; OPT-NEXT: s_barrier_wait -1 | ||
| ; OPT-NEXT: ds_load_b32 v0, v1 | ||
| ; OPT-NEXT: s_wait_dscnt 0x0 | ||
| ; OPT-NEXT: ds_store_b32 v1, v0 offset:4 | ||
| ; OPT-NEXT: s_endpgm | ||
| ; | ||
| ; NOOPT-LABEL: test_barrier_no_independent_work: | ||
| ; NOOPT: ; %bb.0: ; %entry | ||
| ; NOOPT-NEXT: s_load_b32 s0, s[4:5], 0x24 | ||
| ; NOOPT-NEXT: v_and_b32_e32 v0, 0x3ff, v0 | ||
| ; NOOPT-NEXT: s_wait_kmcnt 0x0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_lshl_add_u32 v1, v0, 2, s0 | ||
| ; NOOPT-NEXT: ds_store_b32 v1, v0 | ||
| ; NOOPT-NEXT: s_barrier_signal -1 | ||
| ; NOOPT-NEXT: s_barrier_wait -1 | ||
| ; NOOPT-NEXT: ds_load_b32 v0, v1 | ||
| ; NOOPT-NEXT: s_wait_dscnt 0x0 | ||
| ; NOOPT-NEXT: ds_store_b32 v1, v0 offset:4 | ||
| ; NOOPT-NEXT: s_endpgm | ||
| entry: | ||
| %tid = call i32 @llvm.amdgcn.workitem.id.x() | ||
| %addr = getelementptr i32, ptr addrspace(3) %lds, i32 %tid | ||
| store i32 %tid, ptr addrspace(3) %addr | ||
| call void @llvm.amdgcn.s.barrier.signal(i32 -1) | ||
| call void @llvm.amdgcn.s.barrier.wait(i16 -1) | ||
| %val = load i32, ptr addrspace(3) %addr | ||
| %next = add i32 %tid, 1 | ||
| %next_addr = getelementptr i32, ptr addrspace(3) %lds, i32 %next | ||
| store i32 %val, ptr addrspace(3) %next_addr | ||
| ret void | ||
| } | ||
|
|
||
| ; Multiple barriers | ||
| define amdgpu_kernel void @test_barrier_multiple(ptr addrspace(1) %out, i32 %size) #0 { | ||
| ; OPT-LABEL: test_barrier_multiple: | ||
| ; OPT: ; %bb.0: ; %entry | ||
| ; OPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 | ||
| ; OPT-NEXT: v_and_b32_e32 v1, 0x3ff, v0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; OPT-NEXT: v_lshlrev_b32_e32 v2, 2, v1 | ||
| ; OPT-NEXT: s_wait_kmcnt 0x0 | ||
| ; OPT-NEXT: v_xad_u32 v0, v1, -1, s2 | ||
| ; OPT-NEXT: global_store_b32 v2, v1, s[0:1] | ||
| ; OPT-NEXT: s_barrier_signal -1 | ||
| ; OPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) | ||
| ; OPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1] | ||
| ; OPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0 | ||
| ; OPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; OPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo | ||
| ; OPT-NEXT: s_barrier_wait -1 | ||
| ; OPT-NEXT: global_load_b32 v3, v[0:1], off | ||
| ; OPT-NEXT: s_wait_loadcnt 0x0 | ||
| ; OPT-NEXT: global_store_b32 v2, v3, s[0:1] | ||
| ; OPT-NEXT: s_barrier_signal -1 | ||
| ; OPT-NEXT: s_barrier_wait -1 | ||
| ; OPT-NEXT: global_load_b32 v0, v[0:1], off offset:-4 | ||
| ; OPT-NEXT: s_wait_loadcnt 0x0 | ||
| ; OPT-NEXT: global_store_b32 v2, v0, s[0:1] | ||
| ; OPT-NEXT: s_endpgm | ||
| ; | ||
| ; NOOPT-LABEL: test_barrier_multiple: | ||
| ; NOOPT: ; %bb.0: ; %entry | ||
| ; NOOPT-NEXT: s_load_b96 s[0:2], s[4:5], 0x24 | ||
| ; NOOPT-NEXT: v_and_b32_e32 v2, 0x3ff, v0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_lshlrev_b32_e32 v3, 2, v2 | ||
| ; NOOPT-NEXT: s_wait_kmcnt 0x0 | ||
| ; NOOPT-NEXT: v_xad_u32 v0, v2, -1, s2 | ||
| ; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1] | ||
| ; NOOPT-NEXT: s_barrier_signal -1 | ||
| ; NOOPT-NEXT: s_barrier_wait -1 | ||
| ; NOOPT-NEXT: v_ashrrev_i32_e32 v1, 31, v0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_lshlrev_b64_e32 v[0:1], 2, v[0:1] | ||
| ; NOOPT-NEXT: v_add_co_u32 v0, vcc_lo, s0, v0 | ||
| ; NOOPT-NEXT: s_delay_alu instid0(VALU_DEP_1) | ||
| ; NOOPT-NEXT: v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo | ||
| ; NOOPT-NEXT: global_load_b32 v2, v[0:1], off | ||
| ; NOOPT-NEXT: s_wait_loadcnt 0x0 | ||
| ; NOOPT-NEXT: global_store_b32 v3, v2, s[0:1] | ||
| ; NOOPT-NEXT: s_barrier_signal -1 | ||
| ; NOOPT-NEXT: s_barrier_wait -1 | ||
| ; NOOPT-NEXT: global_load_b32 v0, v[0:1], off offset:-4 | ||
| ; NOOPT-NEXT: s_wait_loadcnt 0x0 | ||
| ; NOOPT-NEXT: global_store_b32 v3, v0, s[0:1] | ||
| ; NOOPT-NEXT: s_endpgm | ||
| entry: | ||
| %tid = call i32 @llvm.amdgcn.workitem.id.x() | ||
| %addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid | ||
| store i32 %tid, ptr addrspace(1) %addr | ||
|
|
||
| call void @llvm.amdgcn.s.barrier.signal(i32 -1) | ||
| call void @llvm.amdgcn.s.barrier.wait(i16 -1) | ||
|
|
||
| %idx1_base = sub i32 %size, 1 | ||
| %idx1 = sub i32 %idx1_base, %tid | ||
| %read_addr1 = getelementptr i32, ptr addrspace(1) %out, i32 %idx1 | ||
| %val1 = load i32, ptr addrspace(1) %read_addr1 | ||
| store i32 %val1, ptr addrspace(1) %addr | ||
|
|
||
| call void @llvm.amdgcn.s.barrier.signal(i32 -1) | ||
| call void @llvm.amdgcn.s.barrier.wait(i16 -1) | ||
|
|
||
| %idx2_base = sub i32 %size, 2 | ||
| %idx2 = sub i32 %idx2_base, %tid | ||
| %read_addr2 = getelementptr i32, ptr addrspace(1) %out, i32 %idx2 | ||
| %val2 = load i32, ptr addrspace(1) %read_addr2 | ||
| store i32 %val2, ptr addrspace(1) %addr | ||
| ret void | ||
| } | ||
|
|
||
| declare void @llvm.amdgcn.s.barrier.signal(i32) #1 | ||
| declare void @llvm.amdgcn.s.barrier.wait(i16) #1 | ||
| declare i32 @llvm.amdgcn.workitem.id.x() #2 | ||
|
|
||
| attributes #0 = { nounwind } | ||
| attributes #1 = { convergent nounwind } | ||
| attributes #2 = { nounwind readnone } |
Uh oh!
There was an error while loading. Please reload this page.