Skip to content

Conversation

@PrasoonMishra
Copy link
Contributor

@PrasoonMishra PrasoonMishra commented Dec 12, 2025

On gfx12+, the unified s_barrier is lowered to split s_barrier_signal/s_barrier_wait pairs. By default, the dependency edge between signal and wait has zero latency, causing the scheduler to emit them adjacent to each other. This misses the opportunity to hide barrier latency.

This patch adds synthetic latency to the signal-wait barrier edge to encourage latency hiding. Independent instructions are scheduled in the gap between split barrier signal and wait.

The latency is tunable via -amdgpu-barrier-signal-wait-latency.

Fixes: SWDEV-567090

…_wait

Add synthetic latency to the signal-wait barrier edge to encourage latency
hiding. Independent instructions are scheduled in the gap between split
barrier signal and wait.

The latency is tunable via -amdgpu-barrier-signal-wait-latency.
@llvmbot
Copy link
Member

llvmbot commented Dec 12, 2025

@llvm/pr-subscribers-backend-amdgpu

Author: Dark Steve (PrasoonMishra)

Changes

Add synthetic latency to the signal-wait barrier edge to encourage latency hiding. Independent instructions are scheduled in the gap between split barrier signal and wait.

The latency is tunable via -amdgpu-barrier-signal-wait-latency.


Full diff: https://github.com/llvm/llvm-project/pull/172057.diff

4 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp (+63-33)
  • (added) llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll (+197)
  • (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll (+21-21)
  • (modified) llvm/test/CodeGen/AMDGPU/s-barrier.ll (+1-1)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp b/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
index 2e586ea207af5..39f6f409c8ebe 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUBarrierLatency.cpp
@@ -6,14 +6,17 @@
 //
 //===----------------------------------------------------------------------===//
 //
-/// \file This file contains a DAG scheduling mutation to add latency to
-///       barrier edges between ATOMIC_FENCE instructions and preceding
-///       memory accesses potentially affected by the fence.
-///       This encourages the scheduling of more instructions before
-///       ATOMIC_FENCE instructions.  ATOMIC_FENCE instructions may
-///       introduce wait counting or indicate an impending S_BARRIER
-///       wait.  Having more instructions in-flight across these
-///       constructs improves latency hiding.
+/// \file This file contains a DAG scheduling mutation to add latency to:
+///       1. Barrier edges between ATOMIC_FENCE instructions and preceding
+///          memory accesses potentially affected by the fence.
+///          This encourages the scheduling of more instructions before
+///          ATOMIC_FENCE instructions.  ATOMIC_FENCE instructions may
+///          introduce wait counting or indicate an impending S_BARRIER
+///          wait.  Having more instructions in-flight across these
+///          constructs improves latency hiding.
+///       2. Barrier edges from S_BARRIER_SIGNAL to S_BARRIER_WAIT.
+///          This encourages independent work to be scheduled between
+///          signal and wait, hiding barrier synchronization latency.
 //
 //===----------------------------------------------------------------------===//
 
@@ -21,9 +24,16 @@
 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
 #include "SIInstrInfo.h"
 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/Support/CommandLine.h"
 
 using namespace llvm;
 
+static cl::opt<unsigned> BarrierSignalWaitLatencyOpt(
+    "amdgpu-barrier-signal-wait-latency",
+    cl::desc("Synthetic latency between S_BARRIER_SIGNAL and S_BARRIER_WAIT "
+             "to encourage scheduling independent work between them"),
+    cl::init(16), cl::Hidden);
+
 namespace {
 
 class BarrierLatency : public ScheduleDAGMutation {
@@ -41,38 +51,58 @@ class BarrierLatency : public ScheduleDAGMutation {
   void apply(ScheduleDAGInstrs *DAG) override;
 };
 
+void addLatencyToEdge(SDep &PredDep, SUnit &SU, unsigned Latency) {
+  SUnit *PredSU = PredDep.getSUnit();
+  SDep ForwardD = PredDep;
+  ForwardD.setSUnit(&SU);
+  for (SDep &SuccDep : PredSU->Succs) {
+    if (SuccDep == ForwardD) {
+      SuccDep.setLatency(SuccDep.getLatency() + Latency);
+      break;
+    }
+  }
+  PredDep.setLatency(PredDep.getLatency() + Latency);
+  PredSU->setDepthDirty();
+  SU.setDepthDirty();
+}
+
 void BarrierLatency::apply(ScheduleDAGInstrs *DAG) {
-  constexpr unsigned SyntheticLatency = 2000;
+  const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(DAG->TII);
+  constexpr unsigned FenceLatency = 2000;
+  const unsigned BarrierSignalWaitLatency = BarrierSignalWaitLatencyOpt;
+
   for (SUnit &SU : DAG->SUnits) {
     const MachineInstr *MI = SU.getInstr();
-    if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
-      continue;
+    unsigned Op = MI->getOpcode();
 
-    // Update latency on barrier edges of ATOMIC_FENCE.
-    // Ignore scopes not expected to have any latency.
-    SyncScope::ID SSID = static_cast<SyncScope::ID>(MI->getOperand(1).getImm());
-    if (IgnoredScopes.contains(SSID))
-      continue;
-
-    for (SDep &PredDep : SU.Preds) {
-      if (!PredDep.isBarrier())
-        continue;
-      SUnit *PredSU = PredDep.getSUnit();
-      MachineInstr *MI = PredSU->getInstr();
-      // Only consider memory loads
-      if (!MI->mayLoad() || MI->mayStore())
+    if (Op == AMDGPU::ATOMIC_FENCE) {
+      // Update latency on barrier edges of ATOMIC_FENCE.
+      // Ignore scopes not expected to have any latency.
+      SyncScope::ID SSID =
+          static_cast<SyncScope::ID>(MI->getOperand(1).getImm());
+      if (IgnoredScopes.contains(SSID))
         continue;
-      SDep ForwardD = PredDep;
-      ForwardD.setSUnit(&SU);
-      for (SDep &SuccDep : PredSU->Succs) {
-        if (SuccDep == ForwardD) {
-          SuccDep.setLatency(SuccDep.getLatency() + SyntheticLatency);
-          break;
+
+      for (SDep &PredDep : SU.Preds) {
+        if (!PredDep.isBarrier())
+          continue;
+        SUnit *PredSU = PredDep.getSUnit();
+        MachineInstr *MI = PredSU->getInstr();
+        // Only consider memory loads
+        if (!MI->mayLoad() || MI->mayStore())
+          continue;
+        addLatencyToEdge(PredDep, SU, FenceLatency);
+      }
+    }
+
+    else if (Op == AMDGPU::S_BARRIER_WAIT) {
+      for (SDep &PredDep : SU.Preds) {
+        SUnit *PredSU = PredDep.getSUnit();
+        const MachineInstr *PredMI = PredSU->getInstr();
+        if (TII->isBarrierStart(PredMI->getOpcode())) {
+          addLatencyToEdge(PredDep, SU, BarrierSignalWaitLatency);
         }
       }
-      PredDep.setLatency(PredDep.getLatency() + SyntheticLatency);
-      PredSU->setDepthDirty();
-      SU.setDepthDirty();
     }
   }
 }
diff --git a/llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll b/llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll
new file mode 100644
index 0000000000000..0b512b00c3fbc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/barrier-signal-wait-latency.ll
@@ -0,0 +1,197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck --check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -amdgpu-barrier-signal-wait-latency=0 < %s | FileCheck --check-prefix=NOOPT %s
+
+; Tests for scheduling independent work between s_barrier_signal and s_barrier_wait
+; for latency hiding.
+
+; Independent work should be scheduled between signal/wait
+define amdgpu_kernel void @test_barrier_independent_valu(ptr addrspace(1) %out, i32 %size) #0 {
+; OPT-LABEL: test_barrier_independent_valu:
+; OPT:       ; %bb.0: ; %entry
+; OPT-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
+; OPT-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT:    v_lshlrev_b32_e32 v2, 2, v1
+; OPT-NEXT:    s_wait_kmcnt 0x0
+; OPT-NEXT:    v_xad_u32 v0, v1, -1, s2
+; OPT-NEXT:    global_store_b32 v2, v1, s[0:1]
+; OPT-NEXT:    s_barrier_signal -1
+; OPT-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; OPT-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; OPT-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT:    v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; OPT-NEXT:    s_barrier_wait -1
+; OPT-NEXT:    global_load_b32 v0, v[0:1], off
+; OPT-NEXT:    s_wait_loadcnt 0x0
+; OPT-NEXT:    global_store_b32 v2, v0, s[0:1]
+; OPT-NEXT:    s_endpgm
+;
+; NOOPT-LABEL: test_barrier_independent_valu:
+; NOOPT:       ; %bb.0: ; %entry
+; NOOPT-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
+; NOOPT-NEXT:    v_and_b32_e32 v2, 0x3ff, v0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT:    v_lshlrev_b32_e32 v3, 2, v2
+; NOOPT-NEXT:    s_wait_kmcnt 0x0
+; NOOPT-NEXT:    v_xad_u32 v0, v2, -1, s2
+; NOOPT-NEXT:    global_store_b32 v3, v2, s[0:1]
+; NOOPT-NEXT:    s_barrier_signal -1
+; NOOPT-NEXT:    s_barrier_wait -1
+; NOOPT-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; NOOPT-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; NOOPT-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT:    v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; NOOPT-NEXT:    global_load_b32 v0, v[0:1], off
+; NOOPT-NEXT:    s_wait_loadcnt 0x0
+; NOOPT-NEXT:    global_store_b32 v3, v0, s[0:1]
+; NOOPT-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid
+  store i32 %tid, ptr addrspace(1) %addr
+  call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+  call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+  %idx_base = sub i32 %size, 1
+  %idx = sub i32 %idx_base, %tid
+  %read_addr = getelementptr i32, ptr addrspace(1) %out, i32 %idx
+  %val = load i32, ptr addrspace(1) %read_addr
+  store i32 %val, ptr addrspace(1) %addr
+  ret void
+}
+
+; No independent work - signal/wait should stay adjacent
+define amdgpu_kernel void @test_barrier_no_independent_work(ptr addrspace(3) %lds) #0 {
+; OPT-LABEL: test_barrier_no_independent_work:
+; OPT:       ; %bb.0: ; %entry
+; OPT-NEXT:    s_load_b32 s0, s[4:5], 0x24
+; OPT-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; OPT-NEXT:    s_wait_kmcnt 0x0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT:    v_lshl_add_u32 v1, v0, 2, s0
+; OPT-NEXT:    ds_store_b32 v1, v0
+; OPT-NEXT:    s_barrier_signal -1
+; OPT-NEXT:    s_barrier_wait -1
+; OPT-NEXT:    ds_load_b32 v0, v1
+; OPT-NEXT:    s_wait_dscnt 0x0
+; OPT-NEXT:    ds_store_b32 v1, v0 offset:4
+; OPT-NEXT:    s_endpgm
+;
+; NOOPT-LABEL: test_barrier_no_independent_work:
+; NOOPT:       ; %bb.0: ; %entry
+; NOOPT-NEXT:    s_load_b32 s0, s[4:5], 0x24
+; NOOPT-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; NOOPT-NEXT:    s_wait_kmcnt 0x0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT:    v_lshl_add_u32 v1, v0, 2, s0
+; NOOPT-NEXT:    ds_store_b32 v1, v0
+; NOOPT-NEXT:    s_barrier_signal -1
+; NOOPT-NEXT:    s_barrier_wait -1
+; NOOPT-NEXT:    ds_load_b32 v0, v1
+; NOOPT-NEXT:    s_wait_dscnt 0x0
+; NOOPT-NEXT:    ds_store_b32 v1, v0 offset:4
+; NOOPT-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %addr = getelementptr i32, ptr addrspace(3) %lds, i32 %tid
+  store i32 %tid, ptr addrspace(3) %addr
+  call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+  call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+  %val = load i32, ptr addrspace(3) %addr
+  %next = add i32 %tid, 1
+  %next_addr = getelementptr i32, ptr addrspace(3) %lds, i32 %next
+  store i32 %val, ptr addrspace(3) %next_addr
+  ret void
+}
+
+; Multiple barriers
+define amdgpu_kernel void @test_barrier_multiple(ptr addrspace(1) %out, i32 %size) #0 {
+; OPT-LABEL: test_barrier_multiple:
+; OPT:       ; %bb.0: ; %entry
+; OPT-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
+; OPT-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT:    v_lshlrev_b32_e32 v2, 2, v1
+; OPT-NEXT:    s_wait_kmcnt 0x0
+; OPT-NEXT:    v_xad_u32 v0, v1, -1, s2
+; OPT-NEXT:    global_store_b32 v2, v1, s[0:1]
+; OPT-NEXT:    s_barrier_signal -1
+; OPT-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; OPT-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; OPT-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
+; OPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; OPT-NEXT:    v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; OPT-NEXT:    s_barrier_wait -1
+; OPT-NEXT:    global_load_b32 v3, v[0:1], off
+; OPT-NEXT:    s_wait_loadcnt 0x0
+; OPT-NEXT:    global_store_b32 v2, v3, s[0:1]
+; OPT-NEXT:    s_barrier_signal -1
+; OPT-NEXT:    s_barrier_wait -1
+; OPT-NEXT:    global_load_b32 v0, v[0:1], off offset:-4
+; OPT-NEXT:    s_wait_loadcnt 0x0
+; OPT-NEXT:    global_store_b32 v2, v0, s[0:1]
+; OPT-NEXT:    s_endpgm
+;
+; NOOPT-LABEL: test_barrier_multiple:
+; NOOPT:       ; %bb.0: ; %entry
+; NOOPT-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
+; NOOPT-NEXT:    v_and_b32_e32 v2, 0x3ff, v0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT:    v_lshlrev_b32_e32 v3, 2, v2
+; NOOPT-NEXT:    s_wait_kmcnt 0x0
+; NOOPT-NEXT:    v_xad_u32 v0, v2, -1, s2
+; NOOPT-NEXT:    global_store_b32 v3, v2, s[0:1]
+; NOOPT-NEXT:    s_barrier_signal -1
+; NOOPT-NEXT:    s_barrier_wait -1
+; NOOPT-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; NOOPT-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; NOOPT-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
+; NOOPT-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; NOOPT-NEXT:    v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; NOOPT-NEXT:    global_load_b32 v2, v[0:1], off
+; NOOPT-NEXT:    s_wait_loadcnt 0x0
+; NOOPT-NEXT:    global_store_b32 v3, v2, s[0:1]
+; NOOPT-NEXT:    s_barrier_signal -1
+; NOOPT-NEXT:    s_barrier_wait -1
+; NOOPT-NEXT:    global_load_b32 v0, v[0:1], off offset:-4
+; NOOPT-NEXT:    s_wait_loadcnt 0x0
+; NOOPT-NEXT:    global_store_b32 v3, v0, s[0:1]
+; NOOPT-NEXT:    s_endpgm
+entry:
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %addr = getelementptr i32, ptr addrspace(1) %out, i32 %tid
+  store i32 %tid, ptr addrspace(1) %addr
+
+  call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+  call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+
+  %idx1_base = sub i32 %size, 1
+  %idx1 = sub i32 %idx1_base, %tid
+  %read_addr1 = getelementptr i32, ptr addrspace(1) %out, i32 %idx1
+  %val1 = load i32, ptr addrspace(1) %read_addr1
+  store i32 %val1, ptr addrspace(1) %addr
+
+  call void @llvm.amdgcn.s.barrier.signal(i32 -1)
+  call void @llvm.amdgcn.s.barrier.wait(i16 -1)
+
+  %idx2_base = sub i32 %size, 2
+  %idx2 = sub i32 %idx2_base, %tid
+  %read_addr2 = getelementptr i32, ptr addrspace(1) %out, i32 %idx2
+  %val2 = load i32, ptr addrspace(1) %read_addr2
+  store i32 %val2, ptr addrspace(1) %addr
+  ret void
+}
+
+declare void @llvm.amdgcn.s.barrier.signal(i32) #1
+declare void @llvm.amdgcn.s.barrier.wait(i16) #1
+declare i32 @llvm.amdgcn.workitem.id.x() #2
+
+attributes #0 = { nounwind }
+attributes #1 = { convergent nounwind }
+attributes #2 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
index 9003251253740..139c698873d67 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.s.barrier.ll
@@ -92,68 +92,68 @@ define amdgpu_kernel void @test_barrier(ptr addrspace(1) %out, i32 %size) #0 {
 ; VARIANT4-LABEL: test_barrier:
 ; VARIANT4:       ; %bb.0: ; %entry
 ; VARIANT4-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
-; VARIANT4-NEXT:    v_and_b32_e32 v2, 0x3ff, v0
+; VARIANT4-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
 ; VARIANT4-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; VARIANT4-NEXT:    v_lshlrev_b32_e32 v3, 2, v2
+; VARIANT4-NEXT:    v_lshlrev_b32_e32 v2, 2, v1
 ; VARIANT4-NEXT:    s_wait_kmcnt 0x0
-; VARIANT4-NEXT:    v_xad_u32 v0, v2, -1, s2
-; VARIANT4-NEXT:    global_store_b32 v3, v2, s[0:1]
+; VARIANT4-NEXT:    v_xad_u32 v0, v1, -1, s2
+; VARIANT4-NEXT:    global_store_b32 v2, v1, s[0:1]
 ; VARIANT4-NEXT:    s_barrier_signal -1
-; VARIANT4-NEXT:    s_barrier_wait -1
 ; VARIANT4-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; VARIANT4-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; VARIANT4-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
 ; VARIANT4-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
 ; VARIANT4-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; VARIANT4-NEXT:    v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; VARIANT4-NEXT:    s_barrier_wait -1
 ; VARIANT4-NEXT:    global_load_b32 v0, v[0:1], off
 ; VARIANT4-NEXT:    s_wait_loadcnt 0x0
-; VARIANT4-NEXT:    global_store_b32 v3, v0, s[0:1]
+; VARIANT4-NEXT:    global_store_b32 v2, v0, s[0:1]
 ; VARIANT4-NEXT:    s_endpgm
 ;
 ; VARIANT5-LABEL: test_barrier:
 ; VARIANT5:       ; %bb.0: ; %entry
 ; VARIANT5-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
-; VARIANT5-NEXT:    v_and_b32_e32 v2, 0x3ff, v0
+; VARIANT5-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
 ; VARIANT5-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; VARIANT5-NEXT:    v_lshlrev_b32_e32 v3, 2, v2
+; VARIANT5-NEXT:    v_lshlrev_b32_e32 v2, 2, v1
 ; VARIANT5-NEXT:    s_wait_kmcnt 0x0
-; VARIANT5-NEXT:    v_xad_u32 v0, v2, -1, s2
-; VARIANT5-NEXT:    global_store_b32 v3, v2, s[0:1]
+; VARIANT5-NEXT:    v_xad_u32 v0, v1, -1, s2
+; VARIANT5-NEXT:    global_store_b32 v2, v1, s[0:1]
 ; VARIANT5-NEXT:    s_barrier_signal -1
-; VARIANT5-NEXT:    s_barrier_wait -1
 ; VARIANT5-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; VARIANT5-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; VARIANT5-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
 ; VARIANT5-NEXT:    v_add_co_u32 v0, vcc_lo, s0, v0
 ; VARIANT5-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; VARIANT5-NEXT:    v_add_co_ci_u32_e64 v1, null, s1, v1, vcc_lo
+; VARIANT5-NEXT:    s_barrier_wait -1
 ; VARIANT5-NEXT:    global_load_b32 v0, v[0:1], off
 ; VARIANT5-NEXT:    s_wait_loadcnt 0x0
-; VARIANT5-NEXT:    global_store_b32 v3, v0, s[0:1]
+; VARIANT5-NEXT:    global_store_b32 v2, v0, s[0:1]
 ; VARIANT5-NEXT:    s_endpgm
 ;
 ; VARIANT6-LABEL: test_barrier:
 ; VARIANT6:       ; %bb.0: ; %entry
 ; VARIANT6-NEXT:    s_load_b96 s[0:2], s[4:5], 0x24
+; VARIANT6-NEXT:    v_and_b32_e32 v1, 0x3ff, v0
 ; VARIANT6-NEXT:    s_wait_kmcnt 0x0
-; VARIANT6-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_and_b32 v4, 0x3ff, v0
 ; VARIANT6-NEXT:    s_add_co_i32 s2, s2, -1
-; VARIANT6-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; VARIANT6-NEXT:    v_dual_mov_b32 v2, s0 :: v_dual_lshlrev_b32 v5, 2, v4
-; VARIANT6-NEXT:    v_sub_nc_u32_e32 v0, s2, v4
-; VARIANT6-NEXT:    global_store_b32 v5, v4, s[0:1]
+; VARIANT6-NEXT:    v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; VARIANT6-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
+; VARIANT6-NEXT:    v_lshlrev_b32_e32 v4, 2, v1
+; VARIANT6-NEXT:    v_sub_nc_u32_e32 v0, s2, v1
+; VARIANT6-NEXT:    global_store_b32 v4, v1, s[0:1]
 ; VARIANT6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; VARIANT6-NEXT:    s_barrier_signal -1
-; VARIANT6-NEXT:    s_barrier_wait -1
-; VARIANT6-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; VARIANT6-NEXT:    v_lshlrev_b64_e32 v[0:1], 2, v[0:1]
+; VARIANT6-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; VARIANT6-NEXT:    v_add_co_u32 v0, vcc_lo, v2, v0
-; VARIANT6-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; VARIANT6-NEXT:    v_add_co_ci_u32_e64 v1, null, v3, v1, vcc_lo
+; VARIANT6-NEXT:    s_barrier_wait -1
 ; VARIANT6-NEXT:    global_load_b32 v0, v[0:1], off
 ; VARIANT6-NEXT:    s_wait_loadcnt 0x0
-; VARIANT6-NEXT:    global_store_b32 v5, v0, s[0:1]
+; VARIANT6-NEXT:    global_store_b32 v4, v0, s[0:1]
 ; VARIANT6-NEXT:    s_endpgm
 entry:
   %tmp = call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/s-barrier.ll b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
index 6e51c870cd487..35b86998c9cac 100644
--- a/llvm/test/CodeGen/AMDGPU/s-barrier.ll
+++ b/llvm/test/CodeGen/AMDGPU/s-barrier.ll
@@ -99,8 +99,8 @@ define amdgpu_kernel void @kernel1(ptr addrspace(1) %out, ptr addrspace(3) %in)
 ; GFX12-SDAG-NEXT:    s_mov_b32 m0, s2
 ; GFX12-SDAG-NEXT:    s_barrier_signal -1
 ; GFX12-SDAG-NEXT:    s_barrier_join m0
-; GFX12-SDAG-NEXT:    s_mov_b32 m0, 2
 ; GFX12-SDAG-NEXT:    s_barrier_signal_isfirst -1
+; GFX12-SDAG-NEXT:    s_mov_b32 m0, 2
 ; GFX12-SDAG-NEXT:    s_barrier_wait 1
 ; GFX12-SDAG-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-SDAG-NEXT:    s_barrier_leave

@github-actions
Copy link

github-actions bot commented Dec 12, 2025

✅ With the latest revision this PR passed the C/C++ code formatter.

@PrasoonMishra PrasoonMishra changed the title [AMDGPU] Schedule independent instruction between s_barrier_signal and s_barrier_wait [AMDGPU] Schedule independent instructions between s_barrier_signal and s_barrier_wait Dec 12, 2025
void BarrierLatency::apply(ScheduleDAGInstrs *DAG) {
constexpr unsigned SyntheticLatency = 2000;
const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(DAG->TII);
constexpr unsigned FenceLatency = 2000;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this come from the sched model for the barrier? I don't think this number agrees with what's there

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The FenceLatency = 2000 value is pre-existing code I just renamed SyntheticLatency to FenceLatency to distinguish it from the new BarrierSignalWaitLatency. The 2000 was already in the pass before my changes.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this come from the sched model for the barrier? I don't think this number agrees with what's there

This is the latency that is being added for the edges to memory fences. Most artificial edges that are added for correctness have latency 0 initially, The 2000 or whatever is in the SchedModel for barriers doesn't actually do anything because it is the latency for the instruction and is not applied to any edges.

@PrasoonMishra
Copy link
Contributor Author

Tuning the synthetic latency value:
Initially started with latency=2000 but this was too aggressive. For example, in CodeGen/AMDGPU/ds_read2-gfx1250.ll, it moved a load-dependent v_add_f32 into the signal-wait gap, forcing an extra s_wait_dscnt and breaking the v_dual_add_f32 dual-issue optimization.

; Original:
ds_load_2addr_b32 v[0:1], v4 offset1:8
s_barrier_signal -1
s_barrier_wait -1
ds_load_2addr_b32 v[2:3], v4 offset0:11 offset1:27
s_wait_dscnt 0x0
v_dual_add_f32 v0, v0, v1 :: v_dual_add_f32 v1, v2, v3 ;Single dual-issue instruction
v_add_f32_e32 v0, v0, v1

; With latency=2000:
ds_load_2addr_b32 v[0:1], v4 offset1:8
s_barrier_signal -1
s_wait_dscnt 0x0               ; extra s_wait_dscnt
v_add_f32_e32 v0, v0, v1
s_barrier_wait -1
ds_load_2addr_b32 v[2:3], v4 offset0:11 offset1:27
s_wait_dscnt 0x0
v_add_f32_e32 v1, v2, v3       ;Lost dual-issue

Through binary search, found that 16 is the sweet spot: high enough to schedule independent address calculations, but low enough to avoid pulling in memory-dependent instructions that would force extra waits and break dual-issue optimizations.

Copy link
Collaborator

@nhaehnle nhaehnle left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice. 16 is a good number, it sounds in the right ballpark for actual barrier latency cycles.

For future reference, it's welcome practice to add new tests first in a separate commit without code changes, so that the effect of your code change is more obvious in code review.

@PrasoonMishra
Copy link
Contributor Author

Nice. 16 is a good number, it sounds in the right ballpark for actual barrier latency cycles.

For future reference, it's welcome practice to add new tests first in a separate commit without code changes, so that the effect of your code change is more obvious in code review.

Thanks for the approval. Going forward, I’ll add lit tests first.

Copy link
Contributor

@perlfu perlfu left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM - thanks for looking at this. I had also wondered about filling these gaps.

Wait a day or so before merging in case there are any further comments.

Copy link
Member

@kerbowa kerbowa left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM. There may be edge cases where ATMOIC_FENCEs and BARRIER* are separated when stalls or a different heuristic takes priority over latency. It would also be nice if we could dynamically adjust how much latency hiding we have around the split barrier vs between it based on the characteristics of the scheduling region.

void BarrierLatency::apply(ScheduleDAGInstrs *DAG) {
constexpr unsigned SyntheticLatency = 2000;
const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(DAG->TII);
constexpr unsigned FenceLatency = 2000;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should this come from the sched model for the barrier? I don't think this number agrees with what's there

This is the latency that is being added for the edges to memory fences. Most artificial edges that are added for correctness have latency 0 initially, The 2000 or whatever is in the SchedModel for barriers doesn't actually do anything because it is the latency for the instruction and is not applied to any edges.

@ssahasra
Copy link
Collaborator

Please add the ticket number to the commit description like this:

Fixes: SWDEV-567090

An example can be seen in #171212

@PrasoonMishra
Copy link
Contributor Author

LGTM. There may be edge cases where ATMOIC_FENCEs and BARRIER* are separated when stalls or a different heuristic takes priority over latency. It would also be nice if we could dynamically adjust how much latency hiding we have around the split barrier vs between it based on the characteristics of the scheduling region.

Thanks for the LGTM. I’ll look into dynamic latency hiding around ATOMIC_FENCES and split barriers, as this seems promising for further performance gains.

@PrasoonMishra PrasoonMishra merged commit 7d381f2 into llvm:main Dec 16, 2025
10 checks passed
@llvm-ci
Copy link
Collaborator

llvm-ci commented Dec 16, 2025

LLVM Buildbot has detected a new failure on builder sanitizer-aarch64-linux-bootstrap-asan running on sanitizer-buildbot8 while building llvm at step 2 "annotate".

Full details are available at: https://lab.llvm.org/buildbot/#/builders/24/builds/15706

Here is the relevant piece of the build log for the reference
Step 2 (annotate) failure: 'python ../sanitizer_buildbot/sanitizers/zorg/buildbot/builders/sanitizers/buildbot_selector.py' (failure)
...
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using lld-link: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/lld-link
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using ld64.lld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/ld64.lld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using wasm-ld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/wasm-ld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using ld.lld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/ld.lld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using lld-link: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/lld-link
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using ld64.lld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/ld64.lld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using wasm-ld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/wasm-ld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/main.py:74: note: The test suite configuration requested an individual test timeout of 0 seconds but a timeout of 900 seconds was requested on the command line. Forcing timeout to be 900 seconds.
-- Testing: 93323 tests, 72 workers --
Testing:  0.. 10.. 20.. 30.. 40.. 50.. 
FAIL: LLVM :: ExecutionEngine/JITLink/x86-64/MachO_weak_references.s (56842 of 93323)
******************** TEST 'LLVM :: ExecutionEngine/JITLink/x86-64/MachO_weak_references.s' FAILED ********************
Exit Code: 1

Command Output (stdout):
--
# RUN: at line 1
rm -rf /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp && mkdir -p /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp
# executed command: rm -rf /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp
# note: command had no output on stdout or stderr
# executed command: mkdir -p /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp
# note: command had no output on stdout or stderr
# RUN: at line 2
/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s
# executed command: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s
# note: command had no output on stdout or stderr
# RUN: at line 3
/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-present -abs bar=0x1 -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# executed command: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-present -abs bar=0x1 -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# note: command had no output on stdout or stderr
# RUN: at line 4
/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-absent -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# executed command: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-absent -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# note: command had no output on stdout or stderr
# error: command failed with exit status: 1

--

********************
Testing:  0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. 
Slowest Tests:
--------------------------------------------------------------------------
133.72s: Clang :: Preprocessor/riscv-target-features.c
128.40s: Clang :: Driver/arm-cortex-cpus-1.c
126.37s: Clang :: Driver/arm-cortex-cpus-2.c
120.95s: Clang :: OpenMP/target_defaultmap_codegen_01.cpp
117.39s: Clang :: OpenMP/target_update_codegen.cpp
103.31s: Clang :: Preprocessor/arm-target-features.c
102.69s: Clang :: Preprocessor/aarch64-target-features.c
Step 11 (stage2/asan check) failure: stage2/asan check (failure)
...
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using lld-link: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/lld-link
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using ld64.lld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/ld64.lld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using wasm-ld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/wasm-ld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using ld.lld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/ld.lld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using lld-link: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/lld-link
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using ld64.lld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/ld64.lld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/llvm/config.py:564: note: using wasm-ld: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/wasm-ld
llvm-lit: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/utils/lit/lit/main.py:74: note: The test suite configuration requested an individual test timeout of 0 seconds but a timeout of 900 seconds was requested on the command line. Forcing timeout to be 900 seconds.
-- Testing: 93323 tests, 72 workers --
Testing:  0.. 10.. 20.. 30.. 40.. 50.. 
FAIL: LLVM :: ExecutionEngine/JITLink/x86-64/MachO_weak_references.s (56842 of 93323)
******************** TEST 'LLVM :: ExecutionEngine/JITLink/x86-64/MachO_weak_references.s' FAILED ********************
Exit Code: 1

Command Output (stdout):
--
# RUN: at line 1
rm -rf /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp && mkdir -p /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp
# executed command: rm -rf /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp
# note: command had no output on stdout or stderr
# executed command: mkdir -p /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp
# note: command had no output on stdout or stderr
# RUN: at line 2
/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s
# executed command: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s
# note: command had no output on stdout or stderr
# RUN: at line 3
/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-present -abs bar=0x1 -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# executed command: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-present -abs bar=0x1 -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# note: command had no output on stdout or stderr
# RUN: at line 4
/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-absent -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# executed command: /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/bin/llvm-jitlink -noexec -check-name=jitlink-check-bar-absent -check=/home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm-project/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_weak_references.s /home/b/sanitizer-aarch64-linux-bootstrap-asan/build/llvm_build_asan/test/ExecutionEngine/JITLink/x86-64/Output/MachO_weak_references.s.tmp/macho_weak_refs.o
# note: command had no output on stdout or stderr
# error: command failed with exit status: 1

--

********************
Testing:  0.. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90.. 
Slowest Tests:
--------------------------------------------------------------------------
133.72s: Clang :: Preprocessor/riscv-target-features.c
128.40s: Clang :: Driver/arm-cortex-cpus-1.c
126.37s: Clang :: Driver/arm-cortex-cpus-2.c
120.95s: Clang :: OpenMP/target_defaultmap_codegen_01.cpp
117.39s: Clang :: OpenMP/target_update_codegen.cpp
103.31s: Clang :: Preprocessor/arm-target-features.c
102.69s: Clang :: Preprocessor/aarch64-target-features.c

@PrasoonMishra PrasoonMishra deleted the hide-barrier-latency branch December 17, 2025 08:52
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

8 participants