Skip to content

Commit cbe22c7

Browse files
committed
[RISCV] Testcase to show wrong register allocation result of subreg liveness
This testcase show the live range isn't construct correctly when subreg liveness is enabled. In the testcase `early-clobber-tied-def-subreg-liveness.ll`, first operand of `vsext.vf2 v8, v16, v0.t` is both def and use, and the use is come from the memory location of `.L__const._Z3foov.var_49`, it's load and spilled into stack, and then...v8 is overwrite by another instructions. ``` lui a0, %hi(.L__const._Z3foov.var_49) addi a0, a0, %lo(.L__const._Z3foov.var_49) ... vle16.v v8, (a0) # Load value from var_49 ... addi a0, sp, 16 ... vs2r.v v8, (a0) # Spill ... vl2r.v v8, (a1) # Reload ... lui a0, %hi(.L__const._Z3foov.var_40) addi a0, a0, %lo(.L__const._Z3foov.var_40) vle16.v v8, (a0) # Load value...into v8??? vmsbc.vx v0, v8, a0 # And use that. ... vsext.vf2 v8, v16, v0.t # But v8 is here...which is expect value from the reload ``` The `early-clobber-tied-def-subreg-liveness.mir` has more detailed infomation for that, `%25.sub_vrm2_0` is defined in 64, and used in 464, and defined again in 464, and we has used an inline asm to clobber all vector register for trigger spliter. ``` 0B bb.0.entry: 16B %0:gpr = LUI target-flags(riscv-hi) @__const._Z3foov.var_49 32B %1:gpr = ADDI %0:gpr, target-flags(riscv-lo) @__const._Z3foov.var_49 48B dead $x0 = PseudoVSETIVLI 2, 73, implicit-def $vl, implicit-def $vtype 64B undef %25.sub_vrm2_0:vrn4m2nov0 = PseudoVLE16_V_M2 %1:gpr, 2, 4, implicit $vl, implicit $vtype 80B %3:gpr = LUI target-flags(riscv-hi) @__const._Z3foov.var_48 96B %4:gpr = ADDI %3:gpr, target-flags(riscv-lo) @__const._Z3foov.var_48 112B %5:vr = PseudoVLE8_V_M1 %4:gpr, 2, 3, implicit $vl, implicit $vtype 128B %6:gpr = LUI target-flags(riscv-hi) @__const._Z3foov.var_46 144B %7:gpr = ADDI %6:gpr, target-flags(riscv-lo) @__const._Z3foov.var_46 160B %25.sub_vrm2_1:vrn4m2nov0 = PseudoVLE16_V_M2 %7:gpr, 2, 4, implicit $vl, implicit $vtype 176B %9:gpr = LUI target-flags(riscv-hi) @__const._Z3foov.var_45 192B %10:gpr = ADDI %9:gpr, target-flags(riscv-lo) @__const._Z3foov.var_45 208B %25.sub_vrm2_2:vrn4m2nov0 = PseudoVLE16_V_M2 %10:gpr, 2, 4, implicit $vl, implicit $vtype 224B INLINEASM &"" [sideeffect] [attdialect], $0:[clobber], ... 240B %12:gpr = LUI target-flags(riscv-hi) @__const._Z3foov.var_44 256B %13:gpr = ADDI %12:gpr, target-flags(riscv-lo) @__const._Z3foov.var_44 272B dead $x0 = PseudoVSETIVLI 2, 73, implicit-def $vl, implicit-def $vtype 288B %25.sub_vrm2_3:vrn4m2nov0 = PseudoVLE16_V_M2 %13:gpr, 2, 4, implicit $vl, implicit $vtype 304B $x0 = PseudoVSETIVLI 2, 73, implicit-def $vl, implicit-def $vtype 320B %16:gpr = LUI target-flags(riscv-hi) @__const._Z3foov.var_40 336B %17:gpr = ADDI %16:gpr, target-flags(riscv-lo) @__const._Z3foov.var_40 352B %18:vrm2 = PseudoVLE16_V_M2 %17:gpr, 2, 4, implicit $vl, implicit $vtype 368B $x0 = PseudoVSETIVLI 2, 73, implicit-def $vl, implicit-def $vtype 384B %20:gpr = LUI 1048572 400B %21:gpr = ADDIW %20:gpr, 928 416B early-clobber %22:vr = PseudoVMSBC_VX_M2 %18:vrm2, %21:gpr, 2, 4, implicit $vl, implicit $vtype 432B $x0 = PseudoVSETIVLI 2, 9, implicit-def $vl, implicit-def $vtype 448B $v0 = COPY %22:vr 464B early-clobber %25.sub_vrm2_0:vrn4m2nov0 = PseudoVSEXT_VF2_M2_MASK %25.sub_vrm2_0:vrn4m2nov0(tied-def 0), %5:vr, killed $v0, 2, 4, 0, implicit $vl, implicit $vtype 480B %26:gpr = LUI target-flags(riscv-hi) @var_47 496B %27:gpr = ADDI %26:gpr, target-flags(riscv-lo) @var_47 512B PseudoVSSEG4E16_V_M2 %25:vrn4m2nov0, %27:gpr, 2, 4, implicit $vl, implicit $vtype 528B PseudoRET ``` When spliter will try to split %25: ``` selectOrSplit VRN4M2NoV0:%25 [64r,160r:4)[160r,208r:0)[208r,288r:1)[288r,464e:2)[464e,512r:3) 0@160r 1@208r 2@288r 3@464e 4@64r L0000000000000030 [160r,512r:0) 0@160r L00000000000000C0 [208r,512r:0) 0@208r L0000000000000300 [288r,512r:0) 0@288r L000000000000000C [64r,464e:1)[464e,512r:0) 0@464e 1@64r weight:1.179245e-02 w=1.179245e-02 ``` ``` Best local split range: 64r-208r, 6.999861e-03, 3 instrs enterIntvBefore 64r: not live leaveIntvAfter 208r: valno 1 useIntv [64B;216r): [64B;216r):1 blit [64r,160r:4): [64r;160r)=1(%29)(recalc) blit [160r,208r:0): [160r;208r)=1(%29)(recalc) blit [208r,288r:1): [208r;216r)=1(%29)(recalc) [216r;288r)=0(%28)(recalc) blit [288r,464e:2): [288r;464e)=0(%28)(recalc) blit [464e,512r:3): [464e;512r)=0(%28)(recalc) rewr %bb.0 464e:0 early-clobber %28.sub_vrm2_0:vrn4m2nov0 = PseudoVSEXT_VF2_M2_MASK %25.sub_vrm2_0:vrn4m2nov0(tied-def 0), %5:vr, $v0, 2, 4, 0, implicit $vl, implicit $vtype rewr %bb.0 288r:0 %28.sub_vrm2_3:vrn4m2nov0 = PseudoVLE16_V_M2 %13:gpr, 2, 4, implicit $vl, implicit $vtype rewr %bb.0 208r:1 %29.sub_vrm2_2:vrn4m2nov0 = PseudoVLE16_V_M2 %10:gpr, 2, 4, implicit $vl, implicit $vtype rewr %bb.0 160r:1 %29.sub_vrm2_1:vrn4m2nov0 = PseudoVLE16_V_M2 %7:gpr, 2, 4, implicit $vl, implicit $vtype rewr %bb.0 64r:1 undef %29.sub_vrm2_0:vrn4m2nov0 = PseudoVLE16_V_M2 %1:gpr, 2, 4, implicit $vl, implicit $vtype rewr %bb.0 464B:0 early-clobber %28.sub_vrm2_0:vrn4m2nov0 = PseudoVSEXT_VF2_M2_MASK %28.sub_vrm2_0:vrn4m2nov0(tied-def 0), %5:vr, $v0, 2, 4, 0, implicit $vl, implicit $vtype rewr %bb.0 512B:0 PseudoVSSEG4E16_V_M2 %28:vrn4m2nov0, %27:gpr, 2, 4, implicit $vl, implicit $vtype rewr %bb.0 216B:1 undef %28.sub_vrm1_0_sub_vrm1_1_sub_vrm1_2_sub_vrm1_3_sub_vrm1_4_sub_vrm1_5:vrn4m2nov0 = COPY %29.sub_vrm1_0_sub_vrm1_1_sub_vrm1_2_sub_vrm1_3_sub_vrm1_4_sub_vrm1_5:vrn4m2nov0 queuing new interval: %28 [216r,288r:0)[288r,464e:1)[464e,512r:2) 0@216r 1@288r 2@464e L000000000000000C [216r,216d:0)[464e,512r:1) 0@216r 1@464e L0000000000000300 [288r,512r:0) 0@288r L00000000000000C0 [216r,512r:0) 0@216r L0000000000000030 [216r,512r:0) 0@216r weight:8.706897e-03 Enqueuing %28 queuing new interval: %29 [64r,160r:0)[160r,208r:1)[208r,216r:2) 0@64r 1@160r 2@208r L000000000000000C [64r,216r:0) 0@64r L00000000000000C0 [208r,216r:0) 0@208r L0000000000000030 [160r,216r:0) 0@160r weight:1.097826e-02 Enqueuing %29 ``` The live range of first part subreg of %25 is become [216r,216d:0)[464e,512r:1), however first live range should live until 464e rather than just live and [216r,216d:0). And then the register allocator allocated wrong result accroding the live range info. Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D126047
1 parent f46ce03 commit cbe22c7

File tree

2 files changed

+342
-0
lines changed

2 files changed

+342
-0
lines changed
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc -O2 -mtriple riscv64 -mattr=+v,+m,+zbb -riscv-enable-subreg-liveness < %s \
3+
; RUN: | FileCheck %s
4+
5+
@var_47 = dso_local global [2 x i16] [i16 -32732, i16 19439], align 2
6+
@__const._Z3foov.var_49 = private unnamed_addr constant [2 x i16] [i16 157, i16 24062], align 2
7+
@__const._Z3foov.var_48 = private unnamed_addr constant [2 x i8] c"\AEN", align 1
8+
@__const._Z3foov.var_46 = private unnamed_addr constant [2 x i16] [i16 729, i16 -32215], align 2
9+
@__const._Z3foov.var_45 = private unnamed_addr constant [2 x i16] [i16 -27462, i16 -1435], align 2
10+
@__const._Z3foov.var_44 = private unnamed_addr constant [2 x i16] [i16 22611, i16 -18435], align 2
11+
@__const._Z3foov.var_40 = private unnamed_addr constant [2 x i16] [i16 -19932, i16 -26252], align 2
12+
13+
define void @_Z3foov() {
14+
; CHECK-LABEL: _Z3foov:
15+
; CHECK: # %bb.0: # %entry
16+
; CHECK-NEXT: addi sp, sp, -16
17+
; CHECK-NEXT: .cfi_def_cfa_offset 16
18+
; CHECK-NEXT: csrr a0, vlenb
19+
; CHECK-NEXT: li a1, 10
20+
; CHECK-NEXT: mul a0, a0, a1
21+
; CHECK-NEXT: sub sp, sp, a0
22+
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49)
23+
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49)
24+
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu
25+
; CHECK-NEXT: vle16.v v8, (a0)
26+
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48)
27+
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48)
28+
; CHECK-NEXT: vle8.v v10, (a0)
29+
; CHECK-NEXT: csrr a0, vlenb
30+
; CHECK-NEXT: slli a0, a0, 3
31+
; CHECK-NEXT: add a0, sp, a0
32+
; CHECK-NEXT: addi a0, a0, 16
33+
; CHECK-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
34+
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_46)
35+
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_46)
36+
; CHECK-NEXT: vle16.v v10, (a0)
37+
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_45)
38+
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_45)
39+
; CHECK-NEXT: vle16.v v12, (a0)
40+
; CHECK-NEXT: addi a0, sp, 16
41+
; CHECK-NEXT: csrr a1, vlenb
42+
; CHECK-NEXT: slli a1, a1, 1
43+
; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
44+
; CHECK-NEXT: add a0, a0, a1
45+
; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
46+
; CHECK-NEXT: add a0, a0, a1
47+
; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
48+
; CHECK-NEXT: add a0, a0, a1
49+
; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
50+
; CHECK-NEXT: #APP
51+
; CHECK-NEXT: #NO_APP
52+
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44)
53+
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44)
54+
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu
55+
; CHECK-NEXT: addi a1, sp, 16
56+
; CHECK-NEXT: csrr a2, vlenb
57+
; CHECK-NEXT: slli a2, a2, 1
58+
; CHECK-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
59+
; CHECK-NEXT: add a1, a1, a2
60+
; CHECK-NEXT: vl2r.v v10, (a1) # Unknown-size Folded Reload
61+
; CHECK-NEXT: add a1, a1, a2
62+
; CHECK-NEXT: vl2r.v v12, (a1) # Unknown-size Folded Reload
63+
; CHECK-NEXT: add a1, a1, a2
64+
; CHECK-NEXT: vl2r.v v14, (a1) # Unknown-size Folded Reload
65+
; CHECK-NEXT: vle16.v v14, (a0)
66+
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu
67+
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
68+
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
69+
; CHECK-NEXT: vle16.v v8, (a0)
70+
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, mu
71+
; CHECK-NEXT: lui a0, 1048572
72+
; CHECK-NEXT: addiw a0, a0, 928
73+
; CHECK-NEXT: vmsbc.vx v0, v8, a0
74+
; CHECK-NEXT: vsetivli zero, 2, e16, m2, tu, mu
75+
; CHECK-NEXT: csrr a0, vlenb
76+
; CHECK-NEXT: slli a0, a0, 3
77+
; CHECK-NEXT: add a0, sp, a0
78+
; CHECK-NEXT: addi a0, a0, 16
79+
; CHECK-NEXT: vl1r.v v16, (a0) # Unknown-size Folded Reload
80+
; CHECK-NEXT: vsext.vf2 v8, v16, v0.t
81+
; CHECK-NEXT: lui a0, %hi(var_47)
82+
; CHECK-NEXT: addi a0, a0, %lo(var_47)
83+
; CHECK-NEXT: vsseg4e16.v v8, (a0)
84+
; CHECK-NEXT: csrr a0, vlenb
85+
; CHECK-NEXT: li a1, 10
86+
; CHECK-NEXT: mul a0, a0, a1
87+
; CHECK-NEXT: add sp, sp, a0
88+
; CHECK-NEXT: addi sp, sp, 16
89+
; CHECK-NEXT: ret
90+
entry:
91+
%0 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_49, i64 2)
92+
%1 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> undef, ptr nonnull @__const._Z3foov.var_48, i64 2)
93+
%2 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_46, i64 2)
94+
%3 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_45, i64 2)
95+
tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"() #2
96+
%4 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_44, i64 2)
97+
%5 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
98+
%6 = tail call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16> undef, ptr nonnull @__const._Z3foov.var_40, i64 2)
99+
%7 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
100+
%8 = tail call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> %6, i16 -15456, i64 2)
101+
%9 = tail call i64 @llvm.riscv.vsetvli.i64(i64 2, i64 1, i64 1)
102+
%10 = tail call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %8, i64 2, i64 0)
103+
tail call void @llvm.riscv.vsseg4.nxv8i16.i64(<vscale x 8 x i16> %10, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, <vscale x 8 x i16> %4, ptr nonnull @var_47, i64 2)
104+
ret void
105+
}
106+
107+
declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16.i64(<vscale x 8 x i16>, ptr nocapture, i64)
108+
109+
declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8>, ptr nocapture, i64)
110+
111+
declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
112+
113+
declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16>, i16, i64)
114+
115+
declare <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i1>, i64, i64 immarg)
116+
117+
declare void @llvm.riscv.vsseg4.nxv8i16.i64(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, ptr nocapture, i64)

0 commit comments

Comments
 (0)