From f31a18ded405bdbc7b44a011d1434c83e7c39347 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Wed, 11 Apr 2018 22:47:24 +0100 Subject: [PATCH] cmd/compile: add some generic composite type optimizations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Propagate values through some wide Zero/Move operations. Among other things this allows us to optimize some kinds of array initialization. For example, the following code no longer requires a temporary be allocated on the stack. Instead it writes the values directly into the return value. func f(i uint32) [4]uint32 { return [4]uint32{i, i+1, i+2, i+3} } The return value is unnecessarily cleared but removing that is probably a task for dead store analysis (I think it needs to be able to match multiple Store ops to wide Zero ops). In order to reliably remove stack variables that are rendered unnecessary by these new rules I've added a new generic version of the unread autos elimination pass. These rules are triggered more than 5000 times when building and testing the standard library. Updates #15925 (fixes for arrays of up to 4 elements). Updates #24386 (fixes for up to 4 kept elements). Updates #24416. compilebench results: name old time/op new time/op delta Template 353ms ± 5% 359ms ± 3% ~ (p=0.143 n=10+10) Unicode 219ms ± 1% 217ms ± 4% ~ (p=0.740 n=7+10) GoTypes 1.26s ± 1% 1.26s ± 2% ~ (p=0.549 n=9+10) Compiler 6.00s ± 1% 6.08s ± 1% +1.42% (p=0.000 n=9+8) SSA 15.3s ± 2% 15.6s ± 1% +2.43% (p=0.000 n=10+10) Flate 237ms ± 2% 240ms ± 2% +1.31% (p=0.015 n=10+10) GoParser 285ms ± 1% 285ms ± 1% ~ (p=0.878 n=8+8) Reflect 797ms ± 3% 807ms ± 2% ~ (p=0.065 n=9+10) Tar 334ms ± 0% 335ms ± 4% ~ (p=0.460 n=8+10) XML 419ms ± 0% 423ms ± 1% +0.91% (p=0.001 n=7+9) StdCmd 46.0s ± 0% 46.4s ± 0% +0.85% (p=0.000 n=9+9) name old user-time/op new user-time/op delta Template 337ms ± 3% 346ms ± 5% ~ (p=0.053 n=9+10) Unicode 205ms ±10% 205ms ± 8% ~ (p=1.000 n=10+10) GoTypes 1.22s ± 2% 1.21s ± 3% ~ (p=0.436 n=10+10) Compiler 5.85s ± 1% 5.93s ± 0% +1.46% (p=0.000 n=10+8) SSA 14.9s ± 1% 15.3s ± 1% +2.62% (p=0.000 n=10+10) Flate 229ms ± 4% 228ms ± 6% ~ (p=0.796 n=10+10) GoParser 271ms ± 3% 275ms ± 4% ~ (p=0.165 n=10+10) Reflect 779ms ± 5% 775ms ± 2% ~ (p=0.971 n=10+10) Tar 317ms ± 4% 319ms ± 5% ~ (p=0.853 n=10+10) XML 404ms ± 4% 409ms ± 5% ~ (p=0.436 n=10+10) name old alloc/op new alloc/op delta Template 34.9MB ± 0% 35.0MB ± 0% +0.26% (p=0.000 n=10+10) Unicode 29.3MB ± 0% 29.3MB ± 0% +0.02% (p=0.000 n=10+10) GoTypes 115MB ± 0% 115MB ± 0% +0.30% (p=0.000 n=10+10) Compiler 519MB ± 0% 521MB ± 0% +0.30% (p=0.000 n=10+10) SSA 1.55GB ± 0% 1.57GB ± 0% +1.34% (p=0.000 n=10+9) Flate 24.1MB ± 0% 24.2MB ± 0% +0.10% (p=0.000 n=10+10) GoParser 28.1MB ± 0% 28.1MB ± 0% +0.07% (p=0.000 n=10+10) Reflect 78.7MB ± 0% 78.7MB ± 0% +0.03% (p=0.000 n=8+10) Tar 34.4MB ± 0% 34.5MB ± 0% +0.12% (p=0.000 n=10+10) XML 43.2MB ± 0% 43.2MB ± 0% +0.13% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Template 330k ± 0% 330k ± 0% -0.01% (p=0.017 n=10+10) Unicode 337k ± 0% 337k ± 0% +0.01% (p=0.000 n=9+10) GoTypes 1.15M ± 0% 1.15M ± 0% +0.03% (p=0.000 n=10+10) Compiler 4.77M ± 0% 4.77M ± 0% +0.03% (p=0.000 n=9+10) SSA 12.5M ± 0% 12.6M ± 0% +1.16% (p=0.000 n=10+10) Flate 221k ± 0% 221k ± 0% +0.05% (p=0.000 n=9+10) GoParser 275k ± 0% 275k ± 0% +0.01% (p=0.014 n=10+9) Reflect 944k ± 0% 944k ± 0% -0.02% (p=0.000 n=10+10) Tar 324k ± 0% 323k ± 0% -0.12% (p=0.000 n=10+10) XML 384k ± 0% 384k ± 0% -0.01% (p=0.001 n=10+10) name old object-bytes new object-bytes delta Template 476kB ± 0% 476kB ± 0% -0.04% (p=0.000 n=10+10) Unicode 218kB ± 0% 218kB ± 0% ~ (all equal) GoTypes 1.58MB ± 0% 1.58MB ± 0% -0.04% (p=0.000 n=10+10) Compiler 6.25MB ± 0% 6.24MB ± 0% -0.09% (p=0.000 n=10+10) SSA 15.9MB ± 0% 16.1MB ± 0% +1.22% (p=0.000 n=10+10) Flate 304kB ± 0% 304kB ± 0% -0.13% (p=0.000 n=10+10) GoParser 370kB ± 0% 370kB ± 0% -0.00% (p=0.000 n=10+10) Reflect 1.27MB ± 0% 1.27MB ± 0% -0.12% (p=0.000 n=10+10) Tar 421kB ± 0% 419kB ± 0% -0.64% (p=0.000 n=10+10) XML 518kB ± 0% 517kB ± 0% -0.12% (p=0.000 n=10+10) name old export-bytes new export-bytes delta Template 16.7kB ± 0% 16.7kB ± 0% ~ (all equal) Unicode 6.52kB ± 0% 6.52kB ± 0% ~ (all equal) GoTypes 29.2kB ± 0% 29.2kB ± 0% ~ (all equal) Compiler 88.0kB ± 0% 88.0kB ± 0% ~ (all equal) SSA 109kB ± 0% 109kB ± 0% ~ (all equal) Flate 4.49kB ± 0% 4.49kB ± 0% ~ (all equal) GoParser 8.10kB ± 0% 8.10kB ± 0% ~ (all equal) Reflect 7.71kB ± 0% 7.71kB ± 0% ~ (all equal) Tar 9.15kB ± 0% 9.15kB ± 0% ~ (all equal) XML 12.3kB ± 0% 12.3kB ± 0% ~ (all equal) name old text-bytes new text-bytes delta HelloSize 676kB ± 0% 672kB ± 0% -0.59% (p=0.000 n=10+10) CmdGoSize 7.26MB ± 0% 7.24MB ± 0% -0.18% (p=0.000 n=10+10) name old data-bytes new data-bytes delta HelloSize 10.2kB ± 0% 10.2kB ± 0% ~ (all equal) CmdGoSize 248kB ± 0% 248kB ± 0% ~ (all equal) name old bss-bytes new bss-bytes delta HelloSize 125kB ± 0% 125kB ± 0% ~ (all equal) CmdGoSize 145kB ± 0% 145kB ± 0% ~ (all equal) name old exe-bytes new exe-bytes delta HelloSize 1.46MB ± 0% 1.45MB ± 0% -0.31% (p=0.000 n=10+10) CmdGoSize 14.7MB ± 0% 14.7MB ± 0% -0.17% (p=0.000 n=10+10) Change-Id: Ic72b0c189dd542f391e1c9ab88a76e9148dc4285 Reviewed-on: https://go-review.googlesource.com/106495 Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/compile.go | 1 + src/cmd/compile/internal/ssa/deadstore.go | 147 + .../compile/internal/ssa/gen/generic.rules | 565 ++- src/cmd/compile/internal/ssa/rewrite.go | 71 + .../compile/internal/ssa/rewritegeneric.go | 4470 +++++++++++++---- src/runtime/internal/atomic/atomic_test.go | 6 +- test/codegen/stack.go | 79 +- test/fixedbugs/issue20529.go | 5 +- 8 files changed, 4330 insertions(+), 1014 deletions(-) diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 2f15cfd1e7aa98..69bb35655af575 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -371,6 +371,7 @@ var passes = [...]pass{ {name: "decompose builtin", fn: decomposeBuiltIn, required: true}, {name: "softfloat", fn: softfloat, required: true}, {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules + {name: "dead auto elim", fn: elimDeadAutosGeneric}, {name: "generic deadcode", fn: deadcode}, {name: "check bce", fn: checkbce}, {name: "branchelim", fn: branchelim}, diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 0b98f4104b0b2b..4b2f57dcd94caa 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -133,6 +133,153 @@ func dse(f *Func) { } } +// elimDeadAutosGeneric deletes autos that are never accessed. To acheive this +// we track the operations that the address of each auto reaches and if it only +// reaches stores then we delete all the stores. The other operations will then +// be eliminated by the dead code elimination pass. +func elimDeadAutosGeneric(f *Func) { + addr := make(map[*Value]GCNode) // values that the address of the auto reaches + elim := make(map[*Value]GCNode) // values that could be eliminated if the auto is + used := make(map[GCNode]bool) // used autos that must be kept + + // visit the value and report whether any of the maps are updated + visit := func(v *Value) (changed bool) { + args := v.Args + switch v.Op { + case OpAddr: + // Propagate the address if it points to an auto. + n, ok := v.Aux.(GCNode) + if !ok || n.StorageClass() != ClassAuto { + return + } + if addr[v] == nil { + addr[v] = n + changed = true + } + return + case OpVarDef, OpVarKill: + // v should be eliminated if we eliminate the auto. + n, ok := v.Aux.(GCNode) + if !ok || n.StorageClass() != ClassAuto { + return + } + if elim[v] == nil { + elim[v] = n + changed = true + } + return + case OpVarLive: + // Don't delete the auto if it needs to be kept alive. + n, ok := v.Aux.(GCNode) + if !ok || n.StorageClass() != ClassAuto { + return + } + if !used[n] { + used[n] = true + changed = true + } + return + case OpStore, OpMove, OpZero: + // v should be elimated if we eliminate the auto. + n, ok := addr[args[0]] + if ok && elim[v] == nil { + elim[v] = n + changed = true + } + // Other args might hold pointers to autos. + args = args[1:] + } + + // The code below assumes that we have handled all the ops + // with sym effects already. Sanity check that here. + // Ignore Args since they can't be autos. + if v.Op.SymEffect() != SymNone && v.Op != OpArg { + panic("unhandled op with sym effect") + } + + if v.Uses == 0 || len(args) == 0 { + return + } + + // If the address of the auto reaches a memory or control + // operation not covered above then we probably need to keep it. + if v.Type.IsMemory() || v.Type.IsFlags() || (v.Op != OpPhi && v.MemoryArg() != nil) { + for _, a := range args { + if n, ok := addr[a]; ok { + if !used[n] { + used[n] = true + changed = true + } + } + } + return + } + + // Propagate any auto addresses through v. + node := GCNode(nil) + for _, a := range args { + if n, ok := addr[a]; ok && !used[n] { + if node == nil { + node = n + } else if node != n { + // Most of the time we only see one pointer + // reaching an op, but some ops can take + // multiple pointers (e.g. NeqPtr, Phi etc.). + // This is rare, so just propagate the first + // value to keep things simple. + used[n] = true + changed = true + } + } + } + if node == nil { + return + } + if addr[v] == nil { + // The address of an auto reaches this op. + addr[v] = node + changed = true + return + } + if addr[v] != node { + // This doesn't happen in practice, but catch it just in case. + used[node] = true + changed = true + } + return + } + + iterations := 0 + for { + if iterations == 4 { + // give up + return + } + iterations++ + changed := false + for _, b := range f.Blocks { + for _, v := range b.Values { + changed = visit(v) || changed + } + } + if !changed { + break + } + } + + // Eliminate stores to unread autos. + for v, n := range elim { + if used[n] { + continue + } + // replace with OpCopy + v.SetArgs1(v.MemoryArg()) + v.Aux = nil + v.AuxInt = 0 + v.Op = OpCopy + } +} + // elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill) // to autos that are never read from. func elimUnreadAutos(f *Func) { diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index f6a5f857e4bd0d..a61ce96286a598 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -537,40 +537,162 @@ (NeqSlice x y) -> (NeqPtr (SlicePtr x) (SlicePtr y)) // Load of store of same address, with compatibly typed value and same size -(Load p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size() -> x +(Load p1 (Store {t2} p2 x _)) + && isSamePtr(p1, p2) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == sizeof(t2) + -> x +(Load p1 (Store {t2} p2 _ (Store {t3} p3 x _))) + && isSamePtr(p1, p3) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == sizeof(t2) + && disjoint(p3, sizeof(t3), p2, sizeof(t2)) + -> x +(Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) + && isSamePtr(p1, p4) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == sizeof(t2) + && disjoint(p4, sizeof(t4), p2, sizeof(t2)) + && disjoint(p4, sizeof(t4), p3, sizeof(t3)) + -> x +(Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) + && isSamePtr(p1, p5) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == sizeof(t2) + && disjoint(p5, sizeof(t5), p2, sizeof(t2)) + && disjoint(p5, sizeof(t5), p3, sizeof(t3)) + && disjoint(p5, sizeof(t5), p4, sizeof(t4)) + -> x // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits -(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1) -> (Const64F [x]) -(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1) -> (Const32F [f2i(float64(math.Float32frombits(uint32(x))))]) -(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1) -> (Const64 [x]) -(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))]) +(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x]) +(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [f2i(float64(math.Float32frombits(uint32(x))))]) +(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x]) +(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))]) + +// Float Loads up to Zeros so they can be constant folded. +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + mem:(Zero [n] p3 _))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) + && disjoint(op, t1.Size(), p2, sizeof(t2)) + -> @mem.Block (Load op mem) +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + (Store {t3} p3 _ + mem:(Zero [n] p4 _)))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) + && disjoint(op, t1.Size(), p2, sizeof(t2)) + && disjoint(op, t1.Size(), p3, sizeof(t3)) + -> @mem.Block (Load op mem) +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + (Store {t3} p3 _ + (Store {t4} p4 _ + mem:(Zero [n] p5 _))))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) + && disjoint(op, t1.Size(), p2, sizeof(t2)) + && disjoint(op, t1.Size(), p3, sizeof(t3)) + && disjoint(op, t1.Size(), p4, sizeof(t4)) + -> @mem.Block (Load op mem) +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + (Store {t3} p3 _ + (Store {t4} p4 _ + (Store {t5} p5 _ + mem:(Zero [n] p6 _)))))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) + && disjoint(op, t1.Size(), p2, sizeof(t2)) + && disjoint(op, t1.Size(), p3, sizeof(t3)) + && disjoint(op, t1.Size(), p4, sizeof(t4)) + && disjoint(op, t1.Size(), p5, sizeof(t5)) + -> @mem.Block (Load op mem) + +// Zero to Load forwarding. +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && t1.IsBoolean() + && isSamePtr(p1, p2) + && n >= o + 1 + -> (ConstBool [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is8BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 1 + -> (Const8 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is16BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 2 + -> (Const16 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is32BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 4 + -> (Const32 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is64BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 8 + -> (Const64 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is32BitFloat(t1) + && isSamePtr(p1, p2) + && n >= o + 4 + -> (Const32F [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is64BitFloat(t1) + && isSamePtr(p1, p2) + && n >= o + 8 + -> (Const64F [0]) // Eliminate stores of values that have just been loaded from the same location. -// We also handle the common case where there are some intermediate stores to non-overlapping struct fields. -(Store {t1} p1 (Load p2 mem) mem) && - isSamePtr(p1, p2) && - t2.Size() == t1.(*types.Type).Size() -> mem -(Store {t1} (OffPtr [o1] p1) (Load (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ oldmem)) && - isSamePtr(p1, p2) && - isSamePtr(p1, p3) && - t2.Size() == t1.(*types.Type).Size() && - !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) -> mem -(Store {t1} (OffPtr [o1] p1) (Load (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ (Store {t4} (OffPtr [o4] p4) _ oldmem))) && - isSamePtr(p1, p2) && - isSamePtr(p1, p3) && - isSamePtr(p1, p4) && - t2.Size() == t1.(*types.Type).Size() && - !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) && - !overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) -> mem -(Store {t1} (OffPtr [o1] p1) (Load (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ (Store {t4} (OffPtr [o4] p4) _ (Store {t5} (OffPtr [o5] p5) _ oldmem)))) && - isSamePtr(p1, p2) && - isSamePtr(p1, p3) && - isSamePtr(p1, p4) && - isSamePtr(p1, p5) && - t2.Size() == t1.(*types.Type).Size() && - !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) && - !overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) && - !overlap(o1, t2.Size(), o5, t5.(*types.Type).Size()) -> mem +// We also handle the common case where there are some intermediate stores. +(Store {t1} p1 (Load p2 mem) mem) + && isSamePtr(p1, p2) + && t2.Size() == sizeof(t1) + -> mem +(Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ oldmem)) + && isSamePtr(p1, p2) + && t2.Size() == sizeof(t1) + && disjoint(p1, sizeof(t1), p3, sizeof(t3)) + -> mem +(Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) + && isSamePtr(p1, p2) + && t2.Size() == sizeof(t1) + && disjoint(p1, sizeof(t1), p3, sizeof(t3)) + && disjoint(p1, sizeof(t1), p4, sizeof(t4)) + -> mem +(Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) + && isSamePtr(p1, p2) + && t2.Size() == sizeof(t1) + && disjoint(p1, sizeof(t1), p3, sizeof(t3)) + && disjoint(p1, sizeof(t1), p4, sizeof(t4)) + && disjoint(p1, sizeof(t1), p5, sizeof(t5)) + -> mem + +// Don't Store zeros to cleared variables. +(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) + && isConstZero(x) + && o >= 0 && sizeof(t) + o <= n && isSamePtr(p1, p2) + -> mem +(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) + && isConstZero(x) + && o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p3) + && disjoint(op, sizeof(t1), p2, sizeof(t2)) + -> mem +(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) + && isConstZero(x) + && o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p4) + && disjoint(op, sizeof(t1), p2, sizeof(t2)) + && disjoint(op, sizeof(t1), p3, sizeof(t3)) + -> mem +(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) + && isConstZero(x) + && o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p5) + && disjoint(op, sizeof(t1), p2, sizeof(t2)) + && disjoint(op, sizeof(t1), p3, sizeof(t3)) + && disjoint(op, sizeof(t1), p4, sizeof(t4)) + -> mem // Collapse OffPtr (OffPtr (OffPtr p [b]) [a]) -> (OffPtr p [a+b]) @@ -657,9 +779,9 @@ // un-SSAable values use mem->mem copies (Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(*types.Type)) -> - (Move {t} [t.(*types.Type).Size()] dst src mem) + (Move {t} [sizeof(t)] dst src mem) (Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(*types.Type)) -> - (Move {t} [t.(*types.Type).Size()] dst src (VarDef {x} mem)) + (Move {t} [sizeof(t)] dst src (VarDef {x} mem)) // array ops (ArraySelect (ArrayMake1 x)) -> x @@ -1278,3 +1400,382 @@ // so this rule should trigger reliably. (InterCall [argsize] (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, itab, off) != nil -> (StaticCall [argsize] {devirt(v, itab, off)} mem) + +// Move and Zero optimizations. +// Move source and destination may overlap. + +// Convert Moves into Zeros when the source is known to be zeros. +(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2) + -> (Zero {t} [n] dst1 mem) +(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0) + -> (Zero {t} [n] dst1 mem) + +// Don't Store to variables that are about to be overwritten by Move/Zero. +(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) + && isSamePtr(p1, p2) && store.Uses == 1 + && n >= o2 + sizeof(t2) + && clobber(store) + -> (Zero {t1} [n] p1 mem) +(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) + && isSamePtr(dst1, dst2) && store.Uses == 1 + && n >= o2 + sizeof(t2) + && disjoint(src1, n, op, sizeof(t2)) + && clobber(store) + -> (Move {t1} [n] dst1 src1 mem) + +// Don't Move to variables that are immediately completely overwritten. +(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) + && move.Uses == 1 + && isSamePtr(dst1, dst2) + && clobber(move) + -> (Zero {t} [n] dst1 mem) +(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) + && move.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(move) + -> (Move {t} [n] dst1 src1 mem) +(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + && move.Uses == 1 && vardef.Uses == 1 + && isSamePtr(dst1, dst2) + && clobber(move) && clobber(vardef) + -> (Zero {t} [n] dst1 (VarDef {x} mem)) +(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + && move.Uses == 1 && vardef.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(move) && clobber(vardef) + -> (Move {t} [n] dst1 src1 (VarDef {x} mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [0] p2) d2 + m3:(Move [n] p3 _ mem))) + && m2.Uses == 1 && m3.Uses == 1 + && o1 == sizeof(t2) + && n == sizeof(t2) + sizeof(t1) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && clobber(m2) && clobber(m3) + -> (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [0] p3) d3 + m4:(Move [n] p4 _ mem)))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 + && o2 == sizeof(t3) + && o1-o2 == sizeof(t2) + && n == sizeof(t3) + sizeof(t2) + sizeof(t1) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && clobber(m2) && clobber(m3) && clobber(m4) + -> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [o3] p3) d3 + m4:(Store {t4} op4:(OffPtr [0] p4) d4 + m5:(Move [n] p5 _ mem))))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 + && o3 == sizeof(t4) + && o2-o3 == sizeof(t3) + && o1-o2 == sizeof(t2) + && n == sizeof(t4) + sizeof(t3) + sizeof(t2) + sizeof(t1) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5) + -> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + +// Don't Zero variables that are immediately completely overwritten +// before being accessed. +(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) + && zero.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(zero) + -> (Move {t} [n] dst1 src1 mem) +(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) + && zero.Uses == 1 && vardef.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(zero) && clobber(vardef) + -> (Move {t} [n] dst1 src1 (VarDef {x} mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [0] p2) d2 + m3:(Zero [n] p3 mem))) + && m2.Uses == 1 && m3.Uses == 1 + && o1 == sizeof(t2) + && n == sizeof(t2) + sizeof(t1) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && clobber(m2) && clobber(m3) + -> (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [0] p3) d3 + m4:(Zero [n] p4 mem)))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 + && o2 == sizeof(t3) + && o1-o2 == sizeof(t2) + && n == sizeof(t3) + sizeof(t2) + sizeof(t1) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && clobber(m2) && clobber(m3) && clobber(m4) + -> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [o3] p3) d3 + m4:(Store {t4} op4:(OffPtr [0] p4) d4 + m5:(Zero [n] p5 mem))))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 + && o3 == sizeof(t4) + && o2-o3 == sizeof(t3) + && o1-o2 == sizeof(t2) + && n == sizeof(t4) + sizeof(t3) + sizeof(t2) + sizeof(t1) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5) + -> (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + +// Don't Move from memory if the values are likely to already be +// in registers. +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [0] p3) d2 _))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && o2 == sizeof(t3) + && n == sizeof(t2) + sizeof(t3) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [0] dst) d2 mem)) +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [0] p4) d3 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && o3 == sizeof(t4) + && o2-o3 == sizeof(t3) + && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [0] dst) d3 mem))) +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [o4] p4) d3 + (Store {t5} op5:(OffPtr [0] p5) d4 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && alignof(t5) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && o4 == sizeof(t5) + && o3-o4 == sizeof(t4) + && o2-o3 == sizeof(t3) + && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [0] dst) d4 mem)))) + +// Same thing but with VarDef in the middle. +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [0] p3) d2 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && o2 == sizeof(t3) + && n == sizeof(t2) + sizeof(t3) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [0] dst) d2 mem)) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [0] p4) d3 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && o3 == sizeof(t4) + && o2-o3 == sizeof(t3) + && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [0] dst) d3 mem))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [o4] p4) d3 + (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && alignof(t5) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && o4 == sizeof(t5) + && o3-o4 == sizeof(t4) + && o2-o3 == sizeof(t3) + && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [0] dst) d4 mem)))) + +// Prefer to Zero and Store than to Move. +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Zero {t3} [n] p3 _))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && registerizable(b, t2) + && n >= o2 + sizeof(t2) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Zero {t1} [n] dst mem)) +(Move {t1} [n] dst p1 + mem:(Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Zero {t4} [n] p4 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && n >= o2 + sizeof(t2) + && n >= o3 + sizeof(t3) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Zero {t1} [n] dst mem))) +(Move {t1} [n] dst p1 + mem:(Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Zero {t5} [n] p5 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && alignof(t5) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && n >= o2 + sizeof(t2) + && n >= o3 + sizeof(t3) + && n >= o4 + sizeof(t4) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Zero {t1} [n] dst mem)))) +(Move {t1} [n] dst p1 + mem:(Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Store {t5} (OffPtr [o5] p5) d4 + (Zero {t6} [n] p6 _)))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && alignof(t5) <= alignof(t1) + && alignof(t6) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && n >= o2 + sizeof(t2) + && n >= o3 + sizeof(t3) + && n >= o4 + sizeof(t4) + && n >= o5 + sizeof(t5) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [o5] dst) d4 + (Zero {t1} [n] dst mem))))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Zero {t3} [n] p3 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && registerizable(b, t2) + && n >= o2 + sizeof(t2) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Zero {t1} [n] dst mem)) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Zero {t4} [n] p4 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && n >= o2 + sizeof(t2) + && n >= o3 + sizeof(t3) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Zero {t1} [n] dst mem))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Zero {t5} [n] p5 _)))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && alignof(t5) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && n >= o2 + sizeof(t2) + && n >= o3 + sizeof(t3) + && n >= o4 + sizeof(t4) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Zero {t1} [n] dst mem)))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Store {t5} (OffPtr [o5] p5) d4 + (Zero {t6} [n] p6 _))))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) + && alignof(t2) <= alignof(t1) + && alignof(t3) <= alignof(t1) + && alignof(t4) <= alignof(t1) + && alignof(t5) <= alignof(t1) + && alignof(t6) <= alignof(t1) + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && n >= o2 + sizeof(t2) + && n >= o3 + sizeof(t3) + && n >= o4 + sizeof(t4) + && n >= o5 + sizeof(t5) + -> (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [o5] dst) d4 + (Zero {t1} [n] dst mem))))) diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index c4daa7474f6eb7..4ccebca27c01e3 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -456,6 +456,50 @@ func isSamePtr(p1, p2 *Value) bool { return false } +// disjoint reports whether the memory region specified by [p1:p1+n1) +// does not overlap with [p2:p2+n2). +// A return value of false does not imply the regions overlap. +func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool { + if n1 == 0 || n2 == 0 { + return true + } + if p1 == p2 { + return false + } + baseAndOffset := func(ptr *Value) (base *Value, offset int64) { + base, offset = ptr, 0 + if base.Op == OpOffPtr { + offset += base.AuxInt + base = base.Args[0] + } + return base, offset + } + p1, off1 := baseAndOffset(p1) + p2, off2 := baseAndOffset(p2) + if isSamePtr(p1, p2) { + return !overlap(off1, n1, off2, n2) + } + // p1 and p2 are not the same, so if they are both OpAddrs then + // they point to different variables. + // If one pointer is on the stack and the other is an argument + // then they can't overlap. + switch p1.Op { + case OpAddr: + if p2.Op == OpAddr || p2.Op == OpSP { + return true + } + return p2.Op == OpArg && p1.Args[0].Op == OpSP + case OpArg: + if p2.Op == OpSP { + return true + } + return p2.Op == OpAddr && p2.Args[0].Op == OpSP + case OpSP: + return p2.Op == OpAddr || p2.Op == OpArg || p2.Op == OpSP + } + return false +} + // moveSize returns the number of bytes an aligned MOV instruction moves func moveSize(align int64, c *Config) int64 { switch { @@ -879,3 +923,30 @@ func arm64BFWidth(mask, rshift int64) int64 { } return nto(shiftedMask) } + +// sizeof returns the size of t in bytes. +// It will panic if t is not a *types.Type. +func sizeof(t interface{}) int64 { + return t.(*types.Type).Size() +} + +// alignof returns the alignment of t in bytes. +// It will panic if t is not a *types.Type. +func alignof(t interface{}) int64 { + return t.(*types.Type).Alignment() +} + +// registerizable reports whether t is a primitive type that fits in +// a register. It assumes float64 values will always fit into registers +// even if that isn't strictly true. +// It will panic if t is not a *types.Type. +func registerizable(b *Block, t interface{}) bool { + typ := t.(*types.Type) + if typ.IsPtrShaped() || typ.IsFloat() { + return true + } + if typ.IsInteger() { + return typ.Size() <= b.Func.Config.RegSize + } + return false +} diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index d590a357d140cb..2748c0846c0b85 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -208,7 +208,7 @@ func rewriteValuegeneric(v *Value) bool { case OpLess8U: return rewriteValuegeneric_OpLess8U_0(v) case OpLoad: - return rewriteValuegeneric_OpLoad_0(v) || rewriteValuegeneric_OpLoad_10(v) + return rewriteValuegeneric_OpLoad_0(v) || rewriteValuegeneric_OpLoad_10(v) || rewriteValuegeneric_OpLoad_20(v) case OpLsh16x16: return rewriteValuegeneric_OpLsh16x16_0(v) case OpLsh16x32: @@ -257,6 +257,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpMod8_0(v) case OpMod8u: return rewriteValuegeneric_OpMod8u_0(v) + case OpMove: + return rewriteValuegeneric_OpMove_0(v) || rewriteValuegeneric_OpMove_10(v) || rewriteValuegeneric_OpMove_20(v) case OpMul16: return rewriteValuegeneric_OpMul16_0(v) || rewriteValuegeneric_OpMul16_10(v) case OpMul32: @@ -412,7 +414,7 @@ func rewriteValuegeneric(v *Value) bool { case OpStaticCall: return rewriteValuegeneric_OpStaticCall_0(v) case OpStore: - return rewriteValuegeneric_OpStore_0(v) || rewriteValuegeneric_OpStore_10(v) + return rewriteValuegeneric_OpStore_0(v) || rewriteValuegeneric_OpStore_10(v) || rewriteValuegeneric_OpStore_20(v) case OpStringLen: return rewriteValuegeneric_OpStringLen_0(v) case OpStringPtr: @@ -13131,10 +13133,8 @@ func rewriteValuegeneric_OpLess8U_0(v *Value) bool { func rewriteValuegeneric_OpLoad_0(v *Value) bool { b := v.Block _ = b - fe := b.Func.fe - _ = fe // match: (Load p1 (Store {t2} p2 x _)) - // cond: isSamePtr(p1,p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size() + // cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) // result: x for { t1 := v.Type @@ -13148,7 +13148,118 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { _ = v_1.Args[2] p2 := v_1.Args[0] x := v_1.Args[1] - if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size()) { + if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 x _))) + // cond: isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p3, sizeof(t3), p2, sizeof(t2)) + // result: x + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := v_1_2.Aux + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + x := v_1_2.Args[1] + if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p3, sizeof(t3), p2, sizeof(t2))) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) + // cond: isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p4, sizeof(t4), p2, sizeof(t2)) && disjoint(p4, sizeof(t4), p3, sizeof(t3)) + // result: x + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := v_1_2.Aux + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := v_1_2_2.Aux + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + x := v_1_2_2.Args[1] + if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p4, sizeof(t4), p2, sizeof(t2)) && disjoint(p4, sizeof(t4), p3, sizeof(t3))) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) + // cond: isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p5, sizeof(t5), p2, sizeof(t2)) && disjoint(p5, sizeof(t5), p3, sizeof(t3)) && disjoint(p5, sizeof(t5), p4, sizeof(t4)) + // result: x + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := v_1_2.Aux + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := v_1_2_2.Aux + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + v_1_2_2_2 := v_1_2_2.Args[2] + if v_1_2_2_2.Op != OpStore { + break + } + t5 := v_1_2_2_2.Aux + _ = v_1_2_2_2.Args[2] + p5 := v_1_2_2_2.Args[0] + x := v_1_2_2_2.Args[1] + if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == sizeof(t2) && disjoint(p5, sizeof(t5), p2, sizeof(t2)) && disjoint(p5, sizeof(t5), p3, sizeof(t3)) && disjoint(p5, sizeof(t5), p4, sizeof(t4))) { break } v.reset(OpCopy) @@ -13157,7 +13268,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const64 [x]) _)) - // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) // result: (Const64F [x]) for { t1 := v.Type @@ -13175,7 +13286,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) { break } v.reset(OpConst64F) @@ -13183,7 +13294,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) - // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) // result: (Const32F [f2i(float64(math.Float32frombits(uint32(x))))]) for { t1 := v.Type @@ -13201,7 +13312,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) { break } v.reset(OpConst32F) @@ -13209,7 +13320,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const64F [x]) _)) - // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) // result: (Const64 [x]) for { t1 := v.Type @@ -13227,7 +13338,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitInt(t1)) { break } v.reset(OpConst64) @@ -13235,7 +13346,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { return true } // match: (Load p1 (Store {t2} p2 (Const32F [x]) _)) - // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1) + // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) // result: (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))]) for { t1 := v.Type @@ -13253,149 +13364,92 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { break } x := v_1_1.AuxInt - if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1)) { + if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitInt(t1)) { break } v.reset(OpConst32) v.AuxInt = int64(int32(math.Float32bits(float32(i2f(x))))) return true } - // match: (Load _ _) - // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) - // result: (StructMake0) + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, sizeof(t2)) + // result: @mem.Block (Load op mem) for { - t := v.Type + t1 := v.Type _ = v.Args[1] - if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { + op := v.Args[0] + if op.Op != OpOffPtr { break } - v.reset(OpStructMake0) - return true - } - // match: (Load ptr mem) - // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) - // result: (StructMake1 (Load (OffPtr [0] ptr) mem)) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { + o1 := op.AuxInt + p1 := op.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { break } - v.reset(OpStructMake1) - v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) - v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) - v1.AuxInt = 0 - v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) - return true - } - // match: (Load ptr mem) - // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) - // result: (StructMake2 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem)) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) { + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + mem := v_1.Args[2] + if mem.Op != OpZero { break } - v.reset(OpStructMake2) - v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) - v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) - v1.AuxInt = 0 - v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) - v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) - v3.AuxInt = t.FieldOff(1) - v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) - return true - } - // match: (Load ptr mem) - // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) - // result: (StructMake3 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem)) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) { + n := mem.AuxInt + _ = mem.Args[1] + p3 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, sizeof(t2))) { break } - v.reset(OpStructMake3) - v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) - v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) - v1.AuxInt = 0 - v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.reset(OpCopy) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) - v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) - v3.AuxInt = t.FieldOff(1) - v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) - v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) - v5.AuxInt = t.FieldOff(2) - v5.AddArg(ptr) - v4.AddArg(v5) - v4.AddArg(mem) - v.AddArg(v4) + v0.AddArg(op) + v0.AddArg(mem) return true } - // match: (Load ptr mem) - // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) - // result: (StructMake4 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem) (Load (OffPtr [t.FieldOff(3)] ptr) mem)) + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _)))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) + // result: @mem.Block (Load op mem) for { - t := v.Type + t1 := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) { + op := v.Args[0] + if op.Op != OpOffPtr { break } - v.reset(OpStructMake4) - v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) - v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) - v1.AuxInt = 0 - v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) + o1 := op.AuxInt + p1 := op.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := v_1_2.Aux + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + mem := v_1_2.Args[2] + if mem.Op != OpZero { + break + } + n := mem.AuxInt + _ = mem.Args[1] + p4 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3))) { + break + } + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.reset(OpCopy) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) - v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) - v3.AuxInt = t.FieldOff(1) - v3.AddArg(ptr) - v2.AddArg(v3) - v2.AddArg(mem) - v.AddArg(v2) - v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) - v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) - v5.AuxInt = t.FieldOff(2) - v5.AddArg(ptr) - v4.AddArg(v5) - v4.AddArg(mem) - v.AddArg(v4) - v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3)) - v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) - v7.AuxInt = t.FieldOff(3) - v7.AddArg(ptr) - v6.AddArg(v7) - v6.AddArg(mem) - v.AddArg(v6) + v0.AddArg(op) + v0.AddArg(mem) return true } return false @@ -13405,314 +13459,480 @@ func rewriteValuegeneric_OpLoad_10(v *Value) bool { _ = b fe := b.Func.fe _ = fe - // match: (Load _ _) - // cond: t.IsArray() && t.NumElem() == 0 - // result: (ArrayMake0) + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _))))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4)) + // result: @mem.Block (Load op mem) for { - t := v.Type + t1 := v.Type _ = v.Args[1] - if !(t.IsArray() && t.NumElem() == 0) { + op := v.Args[0] + if op.Op != OpOffPtr { break } - v.reset(OpArrayMake0) + o1 := op.AuxInt + p1 := op.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { + break + } + t3 := v_1_2.Aux + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { + break + } + t4 := v_1_2_2.Aux + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + mem := v_1_2_2.Args[2] + if mem.Op != OpZero { + break + } + n := mem.AuxInt + _ = mem.Args[1] + p5 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4))) { + break + } + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(op) + v0.AddArg(mem) return true } - // match: (Load ptr mem) - // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) - // result: (ArrayMake1 (Load ptr mem)) + // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _)))))) + // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4)) && disjoint(op, t1.Size(), p5, sizeof(t5)) + // result: @mem.Block (Load op mem) for { - t := v.Type + t1 := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) { + op := v.Args[0] + if op.Op != OpOffPtr { break } - v.reset(OpArrayMake1) - v0 := b.NewValue0(v.Pos, OpLoad, t.Elem()) - v0.AddArg(ptr) - v0.AddArg(mem) - v.AddArg(v0) - return true - } - return false -} -func rewriteValuegeneric_OpLsh16x16_0(v *Value) bool { - b := v.Block - _ = b - // match: (Lsh16x16 x (Const16 [c])) - // cond: - // result: (Lsh16x64 x (Const64 [int64(uint16(c))])) - for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] + o1 := op.AuxInt + p1 := op.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpStore { break } - c := v_1.AuxInt - v.reset(OpLsh16x64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = int64(uint16(c)) - v.AddArg(v0) - return true - } - // match: (Lsh16x16 (Const16 [0]) _) - // cond: - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_2 := v_1.Args[2] + if v_1_2.Op != OpStore { break } - if v_0.AuxInt != 0 { + t3 := v_1_2.Aux + _ = v_1_2.Args[2] + p3 := v_1_2.Args[0] + v_1_2_2 := v_1_2.Args[2] + if v_1_2_2.Op != OpStore { break } - v.reset(OpConst16) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValuegeneric_OpLsh16x32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Lsh16x32 x (Const32 [c])) - // cond: - // result: (Lsh16x64 x (Const64 [int64(uint32(c))])) - for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + t4 := v_1_2_2.Aux + _ = v_1_2_2.Args[2] + p4 := v_1_2_2.Args[0] + v_1_2_2_2 := v_1_2_2.Args[2] + if v_1_2_2_2.Op != OpStore { break } - c := v_1.AuxInt - v.reset(OpLsh16x64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = int64(uint32(c)) - v.AddArg(v0) - return true - } - // match: (Lsh16x32 (Const16 [0]) _) - // cond: - // result: (Const16 [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { + t5 := v_1_2_2_2.Aux + _ = v_1_2_2_2.Args[2] + p5 := v_1_2_2_2.Args[0] + mem := v_1_2_2_2.Args[2] + if mem.Op != OpZero { break } - if v_0.AuxInt != 0 { + n := mem.AuxInt + _ = mem.Args[1] + p6 := mem.Args[0] + if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && disjoint(op, t1.Size(), p2, sizeof(t2)) && disjoint(op, t1.Size(), p3, sizeof(t3)) && disjoint(op, t1.Size(), p4, sizeof(t4)) && disjoint(op, t1.Size(), p5, sizeof(t5))) { break } - v.reset(OpConst16) - v.AuxInt = 0 + b = mem.Block + v0 := b.NewValue0(v.Pos, OpLoad, t1) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(op) + v0.AddArg(mem) return true } - return false -} -func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh16x64 (Const16 [c]) (Const64 [d])) - // cond: - // result: (Const16 [int64(int16(c) << uint64(d))]) + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: t1.IsBoolean() && isSamePtr(p1, p2) && n >= o + 1 + // result: (ConstBool [0]) for { + t1 := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst16 { + if v_0.Op != OpOffPtr { break } - c := v_0.AuxInt + o := v_0.AuxInt + p1 := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpZero { break } - d := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c) << uint64(d)) + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) { + break + } + v.reset(OpConstBool) + v.AuxInt = 0 return true } - // match: (Lsh16x64 x (Const64 [0])) - // cond: - // result: x + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is8BitInt(t1) && isSamePtr(p1, p2) && n >= o + 1 + // result: (Const8 [0]) for { + t1 := v.Type _ = v.Args[1] - x := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpOffPtr { + break + } + o := v_0.AuxInt + p1 := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpZero { break } - if v_1.AuxInt != 0 { + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpConst8) + v.AuxInt = 0 return true } - // match: (Lsh16x64 (Const16 [0]) _) - // cond: + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is16BitInt(t1) && isSamePtr(p1, p2) && n >= o + 2 // result: (Const16 [0]) for { + t1 := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst16 { + if v_0.Op != OpOffPtr { break } - if v_0.AuxInt != 0 { + o := v_0.AuxInt + p1 := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpZero { + break + } + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) { break } v.reset(OpConst16) v.AuxInt = 0 return true } - // match: (Lsh16x64 _ (Const64 [c])) - // cond: uint64(c) >= 16 - // result: (Const16 [0]) + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is32BitInt(t1) && isSamePtr(p1, p2) && n >= o + 4 + // result: (Const32 [0]) for { + t1 := v.Type _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpOffPtr { + break + } + o := v_0.AuxInt + p1 := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpZero { break } - c := v_1.AuxInt - if !(uint64(c) >= 16) { + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) { break } - v.reset(OpConst16) + v.reset(OpConst32) v.AuxInt = 0 return true } - // match: (Lsh16x64 (Lsh16x64 x (Const64 [c])) (Const64 [d])) - // cond: !uaddOvf(c,d) - // result: (Lsh16x64 x (Const64 [c+d])) + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is64BitInt(t1) && isSamePtr(p1, p2) && n >= o + 8 + // result: (Const64 [0]) for { - t := v.Type + t1 := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpLsh16x64 { - break - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { + if v_0.Op != OpOffPtr { break } - c := v_0_1.AuxInt + o := v_0.AuxInt + p1 := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpZero { break } - d := v_1.AuxInt - if !(!uaddOvf(c, d)) { + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) { break } - v.reset(OpLsh16x64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) + v.reset(OpConst64) + v.AuxInt = 0 return true } - // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) - // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) - // result: (Lsh16x64 x (Const64 [c1-c2+c3])) + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 4 + // result: (Const32F [0]) for { + t1 := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpRsh16Ux64 { + if v_0.Op != OpOffPtr { break } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLsh16x64 { + o := v_0.AuxInt + p1 := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpZero { break } - _ = v_0_0.Args[1] - x := v_0_0.Args[0] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst64 { + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) { break } - c1 := v_0_0_1.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { + v.reset(OpConst32F) + v.AuxInt = 0 + return true + } + // match: (Load (OffPtr [o] p1) (Zero [n] p2 _)) + // cond: is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 8 + // result: (Const64F [0]) + for { + t1 := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpOffPtr { break } - c2 := v_0_1.AuxInt + o := v_0.AuxInt + p1 := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpZero { break } - c3 := v_1.AuxInt - if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { + n := v_1.AuxInt + _ = v_1.Args[1] + p2 := v_1.Args[0] + if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) { break } - v.reset(OpLsh16x64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) - v0.AuxInt = c1 - c2 + c3 - v.AddArg(v0) + v.reset(OpConst64F) + v.AuxInt = 0 + return true + } + // match: (Load _ _) + // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) + // result: (StructMake0) + for { + t := v.Type + _ = v.Args[1] + if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake0) return true } return false } -func rewriteValuegeneric_OpLsh16x8_0(v *Value) bool { +func rewriteValuegeneric_OpLoad_20(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x8 x (Const8 [c])) - // cond: - // result: (Lsh16x64 x (Const64 [int64(uint8(c))])) + fe := b.Func.fe + _ = fe + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) + // result: (StructMake1 (Load (OffPtr [0] ptr) mem)) for { t := v.Type _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { break } - c := v_1.AuxInt - v.reset(OpLsh16x64) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = int64(uint8(c)) + v.reset(OpStructMake1) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = 0 + v1.AddArg(ptr) + v0.AddArg(v1) + v0.AddArg(mem) v.AddArg(v0) return true } - // match: (Lsh16x8 (Const16 [0]) _) - // cond: - // result: (Const16 [0]) + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) + // result: (StructMake2 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem)) for { + t := v.Type _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) { break } - if v_0.AuxInt != 0 { + v.reset(OpStructMake2) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = 0 + v1.AddArg(ptr) + v0.AddArg(v1) + v0.AddArg(mem) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) + v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v3.AuxInt = t.FieldOff(1) + v3.AddArg(ptr) + v2.AddArg(v3) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) + // result: (StructMake3 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem)) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) { break } - v.reset(OpConst16) - v.AuxInt = 0 + v.reset(OpStructMake3) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = 0 + v1.AddArg(ptr) + v0.AddArg(v1) + v0.AddArg(mem) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) + v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v3.AuxInt = t.FieldOff(1) + v3.AddArg(ptr) + v2.AddArg(v3) + v2.AddArg(mem) + v.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) + v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v5.AuxInt = t.FieldOff(2) + v5.AddArg(ptr) + v4.AddArg(v5) + v4.AddArg(mem) + v.AddArg(v4) + return true + } + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) + // result: (StructMake4 (Load (OffPtr [0] ptr) mem) (Load (OffPtr [t.FieldOff(1)] ptr) mem) (Load (OffPtr [t.FieldOff(2)] ptr) mem) (Load (OffPtr [t.FieldOff(3)] ptr) mem)) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake4) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = 0 + v1.AddArg(ptr) + v0.AddArg(v1) + v0.AddArg(mem) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1)) + v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo()) + v3.AuxInt = t.FieldOff(1) + v3.AddArg(ptr) + v2.AddArg(v3) + v2.AddArg(mem) + v.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2)) + v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo()) + v5.AuxInt = t.FieldOff(2) + v5.AddArg(ptr) + v4.AddArg(v5) + v4.AddArg(mem) + v.AddArg(v4) + v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3)) + v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo()) + v7.AuxInt = t.FieldOff(3) + v7.AddArg(ptr) + v6.AddArg(v7) + v6.AddArg(mem) + v.AddArg(v6) + return true + } + // match: (Load _ _) + // cond: t.IsArray() && t.NumElem() == 0 + // result: (ArrayMake0) + for { + t := v.Type + _ = v.Args[1] + if !(t.IsArray() && t.NumElem() == 0) { + break + } + v.reset(OpArrayMake0) + return true + } + // match: (Load ptr mem) + // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) + // result: (ArrayMake1 (Load ptr mem)) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) { + break + } + v.reset(OpArrayMake1) + v0 := b.NewValue0(v.Pos, OpLoad, t.Elem()) + v0.AddArg(ptr) + v0.AddArg(mem) + v.AddArg(v0) return true } return false } -func rewriteValuegeneric_OpLsh32x16_0(v *Value) bool { +func rewriteValuegeneric_OpLsh16x16_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x16 x (Const16 [c])) + // match: (Lsh16x16 x (Const16 [c])) // cond: - // result: (Lsh32x64 x (Const64 [int64(uint16(c))])) + // result: (Lsh16x64 x (Const64 [int64(uint16(c))])) for { t := v.Type _ = v.Args[1] @@ -13722,37 +13942,37 @@ func rewriteValuegeneric_OpLsh32x16_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh32x64) + v.reset(OpLsh16x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) v.AddArg(v0) return true } - // match: (Lsh32x16 (Const32 [0]) _) + // match: (Lsh16x16 (Const16 [0]) _) // cond: - // result: (Const32 [0]) + // result: (Const16 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst32) + v.reset(OpConst16) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool { +func rewriteValuegeneric_OpLsh16x32_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x32 x (Const32 [c])) + // match: (Lsh16x32 x (Const32 [c])) // cond: - // result: (Lsh32x64 x (Const64 [int64(uint32(c))])) + // result: (Lsh16x64 x (Const64 [int64(uint32(c))])) for { t := v.Type _ = v.Args[1] @@ -13762,43 +13982,43 @@ func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh32x64) + v.reset(OpLsh16x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) v.AddArg(v0) return true } - // match: (Lsh32x32 (Const32 [0]) _) + // match: (Lsh16x32 (Const16 [0]) _) // cond: - // result: (Const32 [0]) + // result: (Const16 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst32) + v.reset(OpConst16) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { +func rewriteValuegeneric_OpLsh16x64_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh32x64 (Const32 [c]) (Const64 [d])) + // match: (Lsh16x64 (Const16 [c]) (Const64 [d])) // cond: - // result: (Const32 [int64(int32(c) << uint64(d))]) + // result: (Const16 [int64(int16(c) << uint64(d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } c := v_0.AuxInt @@ -13807,11 +14027,11 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { break } d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c) << uint64(d)) + v.reset(OpConst16) + v.AuxInt = int64(int16(c) << uint64(d)) return true } - // match: (Lsh32x64 x (Const64 [0])) + // match: (Lsh16x64 x (Const64 [0])) // cond: // result: x for { @@ -13829,25 +14049,25 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh32x64 (Const32 [0]) _) + // match: (Lsh16x64 (Const16 [0]) _) // cond: - // result: (Const32 [0]) + // result: (Const16 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst32) + v.reset(OpConst16) v.AuxInt = 0 return true } - // match: (Lsh32x64 _ (Const64 [c])) - // cond: uint64(c) >= 32 - // result: (Const32 [0]) + // match: (Lsh16x64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) for { _ = v.Args[1] v_1 := v.Args[1] @@ -13855,21 +14075,21 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { break } c := v_1.AuxInt - if !(uint64(c) >= 32) { + if !(uint64(c) >= 16) { break } - v.reset(OpConst32) + v.reset(OpConst16) v.AuxInt = 0 return true } - // match: (Lsh32x64 (Lsh32x64 x (Const64 [c])) (Const64 [d])) + // match: (Lsh16x64 (Lsh16x64 x (Const64 [c])) (Const64 [d])) // cond: !uaddOvf(c,d) - // result: (Lsh32x64 x (Const64 [c+d])) + // result: (Lsh16x64 x (Const64 [c+d])) for { t := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpLsh32x64 { + if v_0.Op != OpLsh16x64 { break } _ = v_0.Args[1] @@ -13887,25 +14107,25 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { if !(!uaddOvf(c, d)) { break } - v.reset(OpLsh32x64) + v.reset(OpLsh16x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d v.AddArg(v0) return true } - // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) - // result: (Lsh32x64 x (Const64 [c1-c2+c3])) + // result: (Lsh16x64 x (Const64 [c1-c2+c3])) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpRsh32Ux64 { + if v_0.Op != OpRsh16Ux64 { break } _ = v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLsh32x64 { + if v_0_0.Op != OpLsh16x64 { break } _ = v_0_0.Args[1] @@ -13928,7 +14148,7 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { break } - v.reset(OpLsh32x64) + v.reset(OpLsh16x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 @@ -13937,12 +14157,12 @@ func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { } return false } -func rewriteValuegeneric_OpLsh32x8_0(v *Value) bool { +func rewriteValuegeneric_OpLsh16x8_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x8 x (Const8 [c])) + // match: (Lsh16x8 x (Const8 [c])) // cond: - // result: (Lsh32x64 x (Const64 [int64(uint8(c))])) + // result: (Lsh16x64 x (Const64 [int64(uint8(c))])) for { t := v.Type _ = v.Args[1] @@ -13952,37 +14172,37 @@ func rewriteValuegeneric_OpLsh32x8_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh32x64) + v.reset(OpLsh16x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) v.AddArg(v0) return true } - // match: (Lsh32x8 (Const32 [0]) _) + // match: (Lsh16x8 (Const16 [0]) _) // cond: - // result: (Const32 [0]) + // result: (Const16 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst32) + v.reset(OpConst16) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh64x16_0(v *Value) bool { +func rewriteValuegeneric_OpLsh32x16_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x16 x (Const16 [c])) + // match: (Lsh32x16 x (Const16 [c])) // cond: - // result: (Lsh64x64 x (Const64 [int64(uint16(c))])) + // result: (Lsh32x64 x (Const64 [int64(uint16(c))])) for { t := v.Type _ = v.Args[1] @@ -13992,37 +14212,37 @@ func rewriteValuegeneric_OpLsh64x16_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh64x64) + v.reset(OpLsh32x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) v.AddArg(v0) return true } - // match: (Lsh64x16 (Const64 [0]) _) + // match: (Lsh32x16 (Const32 [0]) _) // cond: - // result: (Const64 [0]) + // result: (Const32 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst64) + v.reset(OpConst32) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool { +func rewriteValuegeneric_OpLsh32x32_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x32 x (Const32 [c])) + // match: (Lsh32x32 x (Const32 [c])) // cond: - // result: (Lsh64x64 x (Const64 [int64(uint32(c))])) + // result: (Lsh32x64 x (Const64 [int64(uint32(c))])) for { t := v.Type _ = v.Args[1] @@ -14032,43 +14252,43 @@ func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh64x64) + v.reset(OpLsh32x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) v.AddArg(v0) return true } - // match: (Lsh64x32 (Const64 [0]) _) + // match: (Lsh32x32 (Const32 [0]) _) // cond: - // result: (Const64 [0]) + // result: (Const32 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst64) + v.reset(OpConst32) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { +func rewriteValuegeneric_OpLsh32x64_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh64x64 (Const64 [c]) (Const64 [d])) + // match: (Lsh32x64 (Const32 [c]) (Const64 [d])) // cond: - // result: (Const64 [c << uint64(d)]) + // result: (Const32 [int64(int32(c) << uint64(d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt @@ -14077,11 +14297,11 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { break } d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c << uint64(d) + v.reset(OpConst32) + v.AuxInt = int64(int32(c) << uint64(d)) return true } - // match: (Lsh64x64 x (Const64 [0])) + // match: (Lsh32x64 x (Const64 [0])) // cond: // result: x for { @@ -14099,25 +14319,25 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh64x64 (Const64 [0]) _) + // match: (Lsh32x64 (Const32 [0]) _) // cond: - // result: (Const64 [0]) + // result: (Const32 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst64) + v.reset(OpConst32) v.AuxInt = 0 return true } - // match: (Lsh64x64 _ (Const64 [c])) - // cond: uint64(c) >= 64 - // result: (Const64 [0]) + // match: (Lsh32x64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) for { _ = v.Args[1] v_1 := v.Args[1] @@ -14125,21 +14345,21 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { break } c := v_1.AuxInt - if !(uint64(c) >= 64) { + if !(uint64(c) >= 32) { break } - v.reset(OpConst64) + v.reset(OpConst32) v.AuxInt = 0 return true } - // match: (Lsh64x64 (Lsh64x64 x (Const64 [c])) (Const64 [d])) + // match: (Lsh32x64 (Lsh32x64 x (Const64 [c])) (Const64 [d])) // cond: !uaddOvf(c,d) - // result: (Lsh64x64 x (Const64 [c+d])) + // result: (Lsh32x64 x (Const64 [c+d])) for { t := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpLsh64x64 { + if v_0.Op != OpLsh32x64 { break } _ = v_0.Args[1] @@ -14157,25 +14377,25 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { if !(!uaddOvf(c, d)) { break } - v.reset(OpLsh64x64) + v.reset(OpLsh32x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d v.AddArg(v0) return true } - // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) - // result: (Lsh64x64 x (Const64 [c1-c2+c3])) + // result: (Lsh32x64 x (Const64 [c1-c2+c3])) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpRsh64Ux64 { + if v_0.Op != OpRsh32Ux64 { break } _ = v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLsh64x64 { + if v_0_0.Op != OpLsh32x64 { break } _ = v_0_0.Args[1] @@ -14198,7 +14418,7 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { break } - v.reset(OpLsh64x64) + v.reset(OpLsh32x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 @@ -14207,12 +14427,12 @@ func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { } return false } -func rewriteValuegeneric_OpLsh64x8_0(v *Value) bool { +func rewriteValuegeneric_OpLsh32x8_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x8 x (Const8 [c])) + // match: (Lsh32x8 x (Const8 [c])) // cond: - // result: (Lsh64x64 x (Const64 [int64(uint8(c))])) + // result: (Lsh32x64 x (Const64 [int64(uint8(c))])) for { t := v.Type _ = v.Args[1] @@ -14222,37 +14442,37 @@ func rewriteValuegeneric_OpLsh64x8_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh64x64) + v.reset(OpLsh32x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) v.AddArg(v0) return true } - // match: (Lsh64x8 (Const64 [0]) _) + // match: (Lsh32x8 (Const32 [0]) _) // cond: - // result: (Const64 [0]) + // result: (Const32 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst64) + v.reset(OpConst32) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh8x16_0(v *Value) bool { +func rewriteValuegeneric_OpLsh64x16_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x16 x (Const16 [c])) + // match: (Lsh64x16 x (Const16 [c])) // cond: - // result: (Lsh8x64 x (Const64 [int64(uint16(c))])) + // result: (Lsh64x64 x (Const64 [int64(uint16(c))])) for { t := v.Type _ = v.Args[1] @@ -14262,37 +14482,37 @@ func rewriteValuegeneric_OpLsh8x16_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh8x64) + v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint16(c)) v.AddArg(v0) return true } - // match: (Lsh8x16 (Const8 [0]) _) + // match: (Lsh64x16 (Const64 [0]) _) // cond: - // result: (Const8 [0]) + // result: (Const64 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst8) + v.reset(OpConst64) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool { +func rewriteValuegeneric_OpLsh64x32_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x32 x (Const32 [c])) + // match: (Lsh64x32 x (Const32 [c])) // cond: - // result: (Lsh8x64 x (Const64 [int64(uint32(c))])) + // result: (Lsh64x64 x (Const64 [int64(uint32(c))])) for { t := v.Type _ = v.Args[1] @@ -14302,43 +14522,43 @@ func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh8x64) + v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint32(c)) v.AddArg(v0) return true } - // match: (Lsh8x32 (Const8 [0]) _) + // match: (Lsh64x32 (Const64 [0]) _) // cond: - // result: (Const8 [0]) + // result: (Const64 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst8) + v.reset(OpConst64) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { +func rewriteValuegeneric_OpLsh64x64_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh8x64 (Const8 [c]) (Const64 [d])) + // match: (Lsh64x64 (Const64 [c]) (Const64 [d])) // cond: - // result: (Const8 [int64(int8(c) << uint64(d))]) + // result: (Const64 [c << uint64(d)]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } c := v_0.AuxInt @@ -14347,11 +14567,11 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { break } d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c) << uint64(d)) + v.reset(OpConst64) + v.AuxInt = c << uint64(d) return true } - // match: (Lsh8x64 x (Const64 [0])) + // match: (Lsh64x64 x (Const64 [0])) // cond: // result: x for { @@ -14369,25 +14589,25 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh8x64 (Const8 [0]) _) + // match: (Lsh64x64 (Const64 [0]) _) // cond: - // result: (Const8 [0]) + // result: (Const64 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst8) + v.reset(OpConst64) v.AuxInt = 0 return true } - // match: (Lsh8x64 _ (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (Const8 [0]) + // match: (Lsh64x64 _ (Const64 [c])) + // cond: uint64(c) >= 64 + // result: (Const64 [0]) for { _ = v.Args[1] v_1 := v.Args[1] @@ -14395,21 +14615,21 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { break } c := v_1.AuxInt - if !(uint64(c) >= 8) { + if !(uint64(c) >= 64) { break } - v.reset(OpConst8) + v.reset(OpConst64) v.AuxInt = 0 return true } - // match: (Lsh8x64 (Lsh8x64 x (Const64 [c])) (Const64 [d])) + // match: (Lsh64x64 (Lsh64x64 x (Const64 [c])) (Const64 [d])) // cond: !uaddOvf(c,d) - // result: (Lsh8x64 x (Const64 [c+d])) + // result: (Lsh64x64 x (Const64 [c+d])) for { t := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpLsh8x64 { + if v_0.Op != OpLsh64x64 { break } _ = v_0.Args[1] @@ -14427,25 +14647,25 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { if !(!uaddOvf(c, d)) { break } - v.reset(OpLsh8x64) + v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d v.AddArg(v0) return true } - // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) - // result: (Lsh8x64 x (Const64 [c1-c2+c3])) + // result: (Lsh64x64 x (Const64 [c1-c2+c3])) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpRsh8Ux64 { + if v_0.Op != OpRsh64Ux64 { break } _ = v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpLsh8x64 { + if v_0_0.Op != OpLsh64x64 { break } _ = v_0_0.Args[1] @@ -14468,7 +14688,7 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { break } - v.reset(OpLsh8x64) + v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) v0.AuxInt = c1 - c2 + c3 @@ -14477,12 +14697,12 @@ func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { } return false } -func rewriteValuegeneric_OpLsh8x8_0(v *Value) bool { +func rewriteValuegeneric_OpLsh64x8_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x8 x (Const8 [c])) + // match: (Lsh64x8 x (Const8 [c])) // cond: - // result: (Lsh8x64 x (Const64 [int64(uint8(c))])) + // result: (Lsh64x64 x (Const64 [int64(uint8(c))])) for { t := v.Type _ = v.Args[1] @@ -14492,386 +14712,396 @@ func rewriteValuegeneric_OpLsh8x8_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpLsh8x64) + v.reset(OpLsh64x64) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = int64(uint8(c)) v.AddArg(v0) return true } - // match: (Lsh8x8 (Const8 [0]) _) + // match: (Lsh64x8 (Const64 [0]) _) // cond: - // result: (Const8 [0]) + // result: (Const64 [0]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } if v_0.AuxInt != 0 { break } - v.reset(OpConst8) + v.reset(OpConst64) v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpMod16_0(v *Value) bool { +func rewriteValuegeneric_OpLsh8x16_0(v *Value) bool { b := v.Block _ = b - // match: (Mod16 (Const16 [c]) (Const16 [d])) - // cond: d != 0 - // result: (Const16 [int64(int16(c % d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - if !(d != 0) { - break - } - v.reset(OpConst16) - v.AuxInt = int64(int16(c % d)) - return true - } - // match: (Mod16 n (Const16 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c&0xffff) - // result: (And16 n (Const16 [(c&0xffff)-1])) + // match: (Lsh8x16 x (Const16 [c])) + // cond: + // result: (Lsh8x64 x (Const64 [int64(uint16(c))])) for { t := v.Type _ = v.Args[1] - n := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } c := v_1.AuxInt - if !(isNonNegative(n) && isPowerOfTwo(c&0xffff)) { - break - } - v.reset(OpAnd16) - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = (c & 0xffff) - 1 + v.reset(OpLsh8x64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64(uint16(c)) v.AddArg(v0) return true } - // match: (Mod16 n (Const16 [c])) - // cond: c < 0 && c != -1<<15 - // result: (Mod16 n (Const16 [-c])) + // match: (Lsh8x16 (Const8 [0]) _) + // cond: + // result: (Const8 [0]) for { - t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + v_0 := v.Args[0] + if v_0.Op != OpConst8 { break } - c := v_1.AuxInt - if !(c < 0 && c != -1<<15) { + if v_0.AuxInt != 0 { break } - v.reset(OpMod16) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = -c - v.AddArg(v0) + v.reset(OpConst8) + v.AuxInt = 0 return true } - // match: (Mod16 x (Const16 [c])) - // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15) - // result: (Sub16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) + return false +} +func rewriteValuegeneric_OpLsh8x32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Lsh8x32 x (Const32 [c])) + // cond: + // result: (Lsh8x64 x (Const64 [int64(uint32(c))])) for { t := v.Type _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpConst32 { break } c := v_1.AuxInt - if !(x.Op != OpConst16 && (c > 0 || c == -1<<15)) { - break - } - v.reset(OpSub16) + v.reset(OpLsh8x64) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul16, t) - v1 := b.NewValue0(v.Pos, OpDiv16, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, t) - v3.AuxInt = c - v0.AddArg(v3) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64(uint32(c)) v.AddArg(v0) return true } + // match: (Lsh8x32 (Const8 [0]) _) + // cond: + // result: (Const8 [0]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + if v_0.AuxInt != 0 { + break + } + v.reset(OpConst8) + v.AuxInt = 0 + return true + } return false } -func rewriteValuegeneric_OpMod16u_0(v *Value) bool { +func rewriteValuegeneric_OpLsh8x64_0(v *Value) bool { b := v.Block _ = b - // match: (Mod16u (Const16 [c]) (Const16 [d])) - // cond: d != 0 - // result: (Const16 [int64(uint16(c) % uint16(d))]) + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh8x64 (Const8 [c]) (Const64 [d])) + // cond: + // result: (Const8 [int64(int8(c) << uint64(d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst16 { + if v_0.Op != OpConst8 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt - if !(d != 0) { + v.reset(OpConst8) + v.AuxInt = int64(int8(c) << uint64(d)) + return true + } + // match: (Lsh8x64 x (Const64 [0])) + // cond: + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - v.reset(OpConst16) - v.AuxInt = int64(uint16(c) % uint16(d)) + if v_1.AuxInt != 0 { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Mod16u n (Const16 [c])) - // cond: isPowerOfTwo(c&0xffff) - // result: (And16 n (Const16 [(c&0xffff)-1])) + // match: (Lsh8x64 (Const8 [0]) _) + // cond: + // result: (Const8 [0]) for { - t := v.Type _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + v_0 := v.Args[0] + if v_0.Op != OpConst8 { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c & 0xffff)) { + if v_0.AuxInt != 0 { break } - v.reset(OpAnd16) - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = (c & 0xffff) - 1 - v.AddArg(v0) + v.reset(OpConst8) + v.AuxInt = 0 return true } - // match: (Mod16u x (Const16 [c])) - // cond: x.Op != OpConst16 && c > 0 && umagicOK(16,c) - // result: (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpConst64 { break } c := v_1.AuxInt - if !(x.Op != OpConst16 && c > 0 && umagicOK(16, c)) { + if !(uint64(c) >= 8) { break } - v.reset(OpSub16) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul16, t) - v1 := b.NewValue0(v.Pos, OpDiv16u, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst16, t) - v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst16, t) - v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v.reset(OpConst8) + v.AuxInt = 0 return true } - return false -} -func rewriteValuegeneric_OpMod32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Mod32 (Const32 [c]) (Const32 [d])) - // cond: d != 0 - // result: (Const32 [int64(int32(c % d))]) + // match: (Lsh8x64 (Lsh8x64 x (Const64 [c])) (Const64 [d])) + // cond: !uaddOvf(c,d) + // result: (Lsh8x64 x (Const64 [c+d])) for { + t := v.Type _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpLsh8x64 { break } - c := v_0.AuxInt + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c := v_0_1.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt - if !(d != 0) { + if !(!uaddOvf(c, d)) { break } - v.reset(OpConst32) - v.AuxInt = int64(int32(c % d)) + v.reset(OpLsh8x64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + d + v.AddArg(v0) return true } - // match: (Mod32 n (Const32 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c&0xffffffff) - // result: (And32 n (Const32 [(c&0xffffffff)-1])) + // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + // result: (Lsh8x64 x (Const64 [c1-c2+c3])) for { - t := v.Type _ = v.Args[1] - n := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpRsh8Ux64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLsh8x64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpConst64 { + break + } + c1 := v_0_0_1.AuxInt + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpConst64 { + break + } + c2 := v_0_1.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst64 { break } - c := v_1.AuxInt - if !(isNonNegative(n) && isPowerOfTwo(c&0xffffffff)) { + c3 := v_1.AuxInt + if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) { break } - v.reset(OpAnd32) - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = (c & 0xffffffff) - 1 + v.reset(OpLsh8x64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = c1 - c2 + c3 v.AddArg(v0) return true } - // match: (Mod32 n (Const32 [c])) - // cond: c < 0 && c != -1<<31 - // result: (Mod32 n (Const32 [-c])) + return false +} +func rewriteValuegeneric_OpLsh8x8_0(v *Value) bool { + b := v.Block + _ = b + // match: (Lsh8x8 x (Const8 [c])) + // cond: + // result: (Lsh8x64 x (Const64 [int64(uint8(c))])) for { t := v.Type _ = v.Args[1] - n := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst8 { break } c := v_1.AuxInt - if !(c < 0 && c != -1<<31) { - break - } - v.reset(OpMod32) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = -c + v.reset(OpLsh8x64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64(uint8(c)) v.AddArg(v0) return true } - // match: (Mod32 x (Const32 [c])) - // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31) - // result: (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) + // match: (Lsh8x8 (Const8 [0]) _) + // cond: + // result: (Const8 [0]) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + v_0 := v.Args[0] + if v_0.Op != OpConst8 { break } - c := v_1.AuxInt - if !(x.Op != OpConst32 && (c > 0 || c == -1<<31)) { + if v_0.AuxInt != 0 { break } - v.reset(OpSub32) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul32, t) - v1 := b.NewValue0(v.Pos, OpDiv32, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, t) - v3.AuxInt = c - v0.AddArg(v3) - v.AddArg(v0) + v.reset(OpConst8) + v.AuxInt = 0 return true } return false } -func rewriteValuegeneric_OpMod32u_0(v *Value) bool { +func rewriteValuegeneric_OpMod16_0(v *Value) bool { b := v.Block _ = b - // match: (Mod32u (Const32 [c]) (Const32 [d])) + // match: (Mod16 (Const16 [c]) (Const16 [d])) // cond: d != 0 - // result: (Const32 [int64(uint32(c) % uint32(d))]) + // result: (Const16 [int64(int16(c % d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt if !(d != 0) { break } - v.reset(OpConst32) - v.AuxInt = int64(uint32(c) % uint32(d)) + v.reset(OpConst16) + v.AuxInt = int64(int16(c % d)) return true } - // match: (Mod32u n (Const32 [c])) - // cond: isPowerOfTwo(c&0xffffffff) - // result: (And32 n (Const32 [(c&0xffffffff)-1])) + // match: (Mod16 n (Const16 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xffff) + // result: (And16 n (Const16 [(c&0xffff)-1])) for { t := v.Type _ = v.Args[1] n := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst16 { break } c := v_1.AuxInt - if !(isPowerOfTwo(c & 0xffffffff)) { + if !(isNonNegative(n) && isPowerOfTwo(c&0xffff)) { break } - v.reset(OpAnd32) + v.reset(OpAnd16) v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = (c & 0xffffffff) - 1 + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = (c & 0xffff) - 1 v.AddArg(v0) return true } - // match: (Mod32u x (Const32 [c])) - // cond: x.Op != OpConst32 && c > 0 && umagicOK(32,c) - // result: (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) + // match: (Mod16 n (Const16 [c])) + // cond: c < 0 && c != -1<<15 + // result: (Mod16 n (Const16 [-c])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + c := v_1.AuxInt + if !(c < 0 && c != -1<<15) { + break + } + v.reset(OpMod16) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = -c + v.AddArg(v0) + return true + } + // match: (Mod16 x (Const16 [c])) + // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15) + // result: (Sub16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) for { t := v.Type _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst16 { break } c := v_1.AuxInt - if !(x.Op != OpConst32 && c > 0 && umagicOK(32, c)) { + if !(x.Op != OpConst16 && (c > 0 || c == -1<<15)) { break } - v.reset(OpSub32) + v.reset(OpSub16) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul32, t) - v1 := b.NewValue0(v.Pos, OpDiv32u, t) + v0 := b.NewValue0(v.Pos, OpMul16, t) + v1 := b.NewValue0(v.Pos, OpDiv16, t) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst32, t) + v2 := b.NewValue0(v.Pos, OpConst16, t) v2.AuxInt = c v1.AddArg(v2) v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst32, t) + v3 := b.NewValue0(v.Pos, OpConst16, t) v3.AuxInt = c v0.AddArg(v3) v.AddArg(v0) @@ -14879,122 +15109,78 @@ func rewriteValuegeneric_OpMod32u_0(v *Value) bool { } return false } -func rewriteValuegeneric_OpMod64_0(v *Value) bool { +func rewriteValuegeneric_OpMod16u_0(v *Value) bool { b := v.Block _ = b - // match: (Mod64 (Const64 [c]) (Const64 [d])) + // match: (Mod16u (Const16 [c]) (Const16 [d])) // cond: d != 0 - // result: (Const64 [c % d]) + // result: (Const16 [int64(uint16(c) % uint16(d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst16 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt if !(d != 0) { break } - v.reset(OpConst64) - v.AuxInt = c % d - return true - } - // match: (Mod64 n (Const64 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c) - // result: (And64 n (Const64 [c-1])) - for { - t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(isNonNegative(n) && isPowerOfTwo(c)) { - break - } - v.reset(OpAnd64) - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - 1 - v.AddArg(v0) - return true - } - // match: (Mod64 n (Const64 [-1<<63])) - // cond: isNonNegative(n) - // result: n - for { - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != -1<<63 { - break - } - if !(isNonNegative(n)) { - break - } - v.reset(OpCopy) - v.Type = n.Type - v.AddArg(n) + v.reset(OpConst16) + v.AuxInt = int64(uint16(c) % uint16(d)) return true } - // match: (Mod64 n (Const64 [c])) - // cond: c < 0 && c != -1<<63 - // result: (Mod64 n (Const64 [-c])) + // match: (Mod16u n (Const16 [c])) + // cond: isPowerOfTwo(c&0xffff) + // result: (And16 n (Const16 [(c&0xffff)-1])) for { t := v.Type _ = v.Args[1] n := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst16 { break } c := v_1.AuxInt - if !(c < 0 && c != -1<<63) { + if !(isPowerOfTwo(c & 0xffff)) { break } - v.reset(OpMod64) - v.Type = t + v.reset(OpAnd16) v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = -c + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = (c & 0xffff) - 1 v.AddArg(v0) return true } - // match: (Mod64 x (Const64 [c])) - // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63) - // result: (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) + // match: (Mod16u x (Const16 [c])) + // cond: x.Op != OpConst16 && c > 0 && umagicOK(16,c) + // result: (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) for { t := v.Type _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst16 { break } c := v_1.AuxInt - if !(x.Op != OpConst64 && (c > 0 || c == -1<<63)) { + if !(x.Op != OpConst16 && c > 0 && umagicOK(16, c)) { break } - v.reset(OpSub64) + v.reset(OpSub16) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul64, t) - v1 := b.NewValue0(v.Pos, OpDiv64, t) + v0 := b.NewValue0(v.Pos, OpMul16, t) + v1 := b.NewValue0(v.Pos, OpDiv16u, t) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpConst16, t) v2.AuxInt = c v1.AddArg(v2) v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpConst16, t) v3.AuxInt = c v0.AddArg(v3) v.AddArg(v0) @@ -15002,99 +15188,101 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { } return false } -func rewriteValuegeneric_OpMod64u_0(v *Value) bool { +func rewriteValuegeneric_OpMod32_0(v *Value) bool { b := v.Block _ = b - // match: (Mod64u (Const64 [c]) (Const64 [d])) + // match: (Mod32 (Const32 [c]) (Const32 [d])) // cond: d != 0 - // result: (Const64 [int64(uint64(c) % uint64(d))]) + // result: (Const32 [int64(int32(c % d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt if !(d != 0) { break } - v.reset(OpConst64) - v.AuxInt = int64(uint64(c) % uint64(d)) + v.reset(OpConst32) + v.AuxInt = int64(int32(c % d)) return true } - // match: (Mod64u n (Const64 [c])) - // cond: isPowerOfTwo(c) - // result: (And64 n (Const64 [c-1])) + // match: (Mod32 n (Const32 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xffffffff) + // result: (And32 n (Const32 [(c&0xffffffff)-1])) for { t := v.Type _ = v.Args[1] n := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + if !(isNonNegative(n) && isPowerOfTwo(c&0xffffffff)) { break } - v.reset(OpAnd64) + v.reset(OpAnd32) v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - 1 + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = (c & 0xffffffff) - 1 v.AddArg(v0) return true } - // match: (Mod64u n (Const64 [-1<<63])) - // cond: - // result: (And64 n (Const64 [1<<63-1])) + // match: (Mod32 n (Const32 [c])) + // cond: c < 0 && c != -1<<31 + // result: (Mod32 n (Const32 [-c])) for { t := v.Type _ = v.Args[1] n := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } - if v_1.AuxInt != -1<<63 { + c := v_1.AuxInt + if !(c < 0 && c != -1<<31) { break } - v.reset(OpAnd64) + v.reset(OpMod32) + v.Type = t v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = 1<<63 - 1 + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = -c v.AddArg(v0) return true } - // match: (Mod64u x (Const64 [c])) - // cond: x.Op != OpConst64 && c > 0 && umagicOK(64,c) - // result: (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) + // match: (Mod32 x (Const32 [c])) + // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31) + // result: (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) for { t := v.Type _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } c := v_1.AuxInt - if !(x.Op != OpConst64 && c > 0 && umagicOK(64, c)) { + if !(x.Op != OpConst32 && (c > 0 || c == -1<<31)) { break } - v.reset(OpSub64) + v.reset(OpSub32) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul64, t) - v1 := b.NewValue0(v.Pos, OpDiv64u, t) + v0 := b.NewValue0(v.Pos, OpMul32, t) + v1 := b.NewValue0(v.Pos, OpDiv32, t) v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpConst32, t) v2.AuxInt = c v1.AddArg(v2) v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpConst32, t) v3.AuxInt = c v0.AddArg(v3) v.AddArg(v0) @@ -15102,183 +15290,1921 @@ func rewriteValuegeneric_OpMod64u_0(v *Value) bool { } return false } -func rewriteValuegeneric_OpMod8_0(v *Value) bool { +func rewriteValuegeneric_OpMod32u_0(v *Value) bool { b := v.Block _ = b - // match: (Mod8 (Const8 [c]) (Const8 [d])) + // match: (Mod32u (Const32 [c]) (Const32 [d])) // cond: d != 0 - // result: (Const8 [int64(int8(c % d))]) + // result: (Const32 [int64(uint32(c) % uint32(d))]) for { _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst8 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt if !(d != 0) { break } - v.reset(OpConst8) - v.AuxInt = int64(int8(c % d)) + v.reset(OpConst32) + v.AuxInt = int64(uint32(c) % uint32(d)) return true } - // match: (Mod8 n (Const8 [c])) - // cond: isNonNegative(n) && isPowerOfTwo(c&0xff) - // result: (And8 n (Const8 [(c&0xff)-1])) + // match: (Mod32u n (Const32 [c])) + // cond: isPowerOfTwo(c&0xffffffff) + // result: (And32 n (Const32 [(c&0xffffffff)-1])) for { t := v.Type _ = v.Args[1] n := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst8 { + if v_1.Op != OpConst32 { break } c := v_1.AuxInt - if !(isNonNegative(n) && isPowerOfTwo(c&0xff)) { + if !(isPowerOfTwo(c & 0xffffffff)) { break } - v.reset(OpAnd8) + v.reset(OpAnd32) v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = (c & 0xff) - 1 + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = (c & 0xffffffff) - 1 v.AddArg(v0) return true } - // match: (Mod8 n (Const8 [c])) - // cond: c < 0 && c != -1<<7 - // result: (Mod8 n (Const8 [-c])) + // match: (Mod32u x (Const32 [c])) + // cond: x.Op != OpConst32 && c > 0 && umagicOK(32,c) + // result: (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + c := v_1.AuxInt + if !(x.Op != OpConst32 && c > 0 && umagicOK(32, c)) { + break + } + v.reset(OpSub32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpMul32, t) + v1 := b.NewValue0(v.Pos, OpDiv32u, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst32, t) + v2.AuxInt = c + v1.AddArg(v2) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst32, t) + v3.AuxInt = c + v0.AddArg(v3) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Mod64 (Const64 [c]) (Const64 [d])) + // cond: d != 0 + // result: (Const64 [c % d]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + d := v_1.AuxInt + if !(d != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = c % d + return true + } + // match: (Mod64 n (Const64 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c) + // result: (And64 n (Const64 [c-1])) for { t := v.Type _ = v.Args[1] n := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst8 { + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c)) { + break + } + v.reset(OpAnd64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - 1 + v.AddArg(v0) + return true + } + // match: (Mod64 n (Const64 [-1<<63])) + // cond: isNonNegative(n) + // result: n + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.AuxInt != -1<<63 { + break + } + if !(isNonNegative(n)) { + break + } + v.reset(OpCopy) + v.Type = n.Type + v.AddArg(n) + return true + } + // match: (Mod64 n (Const64 [c])) + // cond: c < 0 && c != -1<<63 + // result: (Mod64 n (Const64 [-c])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(c < 0 && c != -1<<63) { + break + } + v.reset(OpMod64) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = -c + v.AddArg(v0) + return true + } + // match: (Mod64 x (Const64 [c])) + // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63) + // result: (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(x.Op != OpConst64 && (c > 0 || c == -1<<63)) { + break + } + v.reset(OpSub64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpMul64, t) + v1 := b.NewValue0(v.Pos, OpDiv64, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = c + v1.AddArg(v2) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = c + v0.AddArg(v3) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod64u_0(v *Value) bool { + b := v.Block + _ = b + // match: (Mod64u (Const64 [c]) (Const64 [d])) + // cond: d != 0 + // result: (Const64 [int64(uint64(c) % uint64(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + d := v_1.AuxInt + if !(d != 0) { + break + } + v.reset(OpConst64) + v.AuxInt = int64(uint64(c) % uint64(d)) + return true + } + // match: (Mod64u n (Const64 [c])) + // cond: isPowerOfTwo(c) + // result: (And64 n (Const64 [c-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpAnd64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - 1 + v.AddArg(v0) + return true + } + // match: (Mod64u n (Const64 [-1<<63])) + // cond: + // result: (And64 n (Const64 [1<<63-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.AuxInt != -1<<63 { + break + } + v.reset(OpAnd64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = 1<<63 - 1 + v.AddArg(v0) + return true + } + // match: (Mod64u x (Const64 [c])) + // cond: x.Op != OpConst64 && c > 0 && umagicOK(64,c) + // result: (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(x.Op != OpConst64 && c > 0 && umagicOK(64, c)) { + break + } + v.reset(OpSub64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpMul64, t) + v1 := b.NewValue0(v.Pos, OpDiv64u, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = c + v1.AddArg(v2) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = c + v0.AddArg(v3) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod8_0(v *Value) bool { + b := v.Block + _ = b + // match: (Mod8 (Const8 [c]) (Const8 [d])) + // cond: d != 0 + // result: (Const8 [int64(int8(c % d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + d := v_1.AuxInt + if !(d != 0) { + break + } + v.reset(OpConst8) + v.AuxInt = int64(int8(c % d)) + return true + } + // match: (Mod8 n (Const8 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xff) + // result: (And8 n (Const8 [(c&0xff)-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xff)) { + break + } + v.reset(OpAnd8) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = (c & 0xff) - 1 + v.AddArg(v0) + return true + } + // match: (Mod8 n (Const8 [c])) + // cond: c < 0 && c != -1<<7 + // result: (Mod8 n (Const8 [-c])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(c < 0 && c != -1<<7) { + break + } + v.reset(OpMod8) + v.Type = t + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = -c + v.AddArg(v0) + return true + } + // match: (Mod8 x (Const8 [c])) + // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7) + // result: (Sub8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(x.Op != OpConst8 && (c > 0 || c == -1<<7)) { + break + } + v.reset(OpSub8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpMul8, t) + v1 := b.NewValue0(v.Pos, OpDiv8, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = c + v1.AddArg(v2) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst8, t) + v3.AuxInt = c + v0.AddArg(v3) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpMod8u_0(v *Value) bool { + b := v.Block + _ = b + // match: (Mod8u (Const8 [c]) (Const8 [d])) + // cond: d != 0 + // result: (Const8 [int64(uint8(c) % uint8(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + d := v_1.AuxInt + if !(d != 0) { + break + } + v.reset(OpConst8) + v.AuxInt = int64(uint8(c) % uint8(d)) + return true + } + // match: (Mod8u n (Const8 [c])) + // cond: isPowerOfTwo(c&0xff) + // result: (And8 n (Const8 [(c&0xff)-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c & 0xff)) { + break + } + v.reset(OpAnd8) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = (c & 0xff) - 1 + v.AddArg(v0) + return true + } + // match: (Mod8u x (Const8 [c])) + // cond: x.Op != OpConst8 && c > 0 && umagicOK(8 ,c) + // result: (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(x.Op != OpConst8 && c > 0 && umagicOK(8, c)) { + break + } + v.reset(OpSub8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpMul8, t) + v1 := b.NewValue0(v.Pos, OpDiv8u, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpConst8, t) + v2.AuxInt = c + v1.AddArg(v2) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpConst8, t) + v3.AuxInt = c + v0.AddArg(v3) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpMove_0(v *Value) bool { + b := v.Block + _ = b + // match: (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) + // cond: isSamePtr(src, dst2) + // result: (Zero {t} [n] dst1 mem) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if mem.Op != OpZero { + break + } + if mem.AuxInt != n { + break + } + if mem.Aux != t { + break + } + _ = mem.Args[1] + dst2 := mem.Args[0] + if !(isSamePtr(src, dst2)) { + break + } + v.reset(OpZero) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(mem) + return true + } + // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) + // cond: isSamePtr(src, dst0) + // result: (Zero {t} [n] dst1 mem) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpZero { + break + } + if mem_0.AuxInt != n { + break + } + if mem_0.Aux != t { + break + } + _ = mem_0.Args[1] + dst0 := mem_0.Args[0] + if !(isSamePtr(src, dst0)) { + break + } + v.reset(OpZero) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(mem) + return true + } + // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) + // cond: isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2 + sizeof(t2) && disjoint(src1, n, op, sizeof(t2)) && clobber(store) + // result: (Move {t1} [n] dst1 src1 mem) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src1 := v.Args[1] + store := v.Args[2] + if store.Op != OpStore { + break + } + t2 := store.Aux + _ = store.Args[2] + op := store.Args[0] + if op.Op != OpOffPtr { + break + } + o2 := op.AuxInt + dst2 := op.Args[0] + mem := store.Args[2] + if !(isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2+sizeof(t2) && disjoint(src1, n, op, sizeof(t2)) && clobber(store)) { + break + } + v.reset(OpMove) + v.AuxInt = n + v.Aux = t1 + v.AddArg(dst1) + v.AddArg(src1) + v.AddArg(mem) + return true + } + // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) + // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move) + // result: (Move {t} [n] dst1 src1 mem) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src1 := v.Args[1] + move := v.Args[2] + if move.Op != OpMove { + break + } + if move.AuxInt != n { + break + } + if move.Aux != t { + break + } + _ = move.Args[2] + dst2 := move.Args[0] + mem := move.Args[2] + if !(move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)) { + break + } + v.reset(OpMove) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(src1) + v.AddArg(mem) + return true + } + // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move) && clobber(vardef) + // result: (Move {t} [n] dst1 src1 (VarDef {x} mem)) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src1 := v.Args[1] + vardef := v.Args[2] + if vardef.Op != OpVarDef { + break + } + x := vardef.Aux + move := vardef.Args[0] + if move.Op != OpMove { + break + } + if move.AuxInt != n { + break + } + if move.Aux != t { + break + } + _ = move.Args[2] + dst2 := move.Args[0] + mem := move.Args[2] + if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move) && clobber(vardef)) { + break + } + v.reset(OpMove) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(src1) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = x + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) + // cond: zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero) + // result: (Move {t} [n] dst1 src1 mem) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src1 := v.Args[1] + zero := v.Args[2] + if zero.Op != OpZero { + break + } + if zero.AuxInt != n { + break + } + if zero.Aux != t { + break + } + _ = zero.Args[1] + dst2 := zero.Args[0] + mem := zero.Args[1] + if !(zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)) { + break + } + v.reset(OpMove) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(src1) + v.AddArg(mem) + return true + } + // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) + // cond: zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero) && clobber(vardef) + // result: (Move {t} [n] dst1 src1 (VarDef {x} mem)) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[2] + dst1 := v.Args[0] + src1 := v.Args[1] + vardef := v.Args[2] + if vardef.Op != OpVarDef { + break + } + x := vardef.Aux + zero := vardef.Args[0] + if zero.Op != OpZero { + break + } + if zero.AuxInt != n { + break + } + if zero.Aux != t { + break + } + _ = zero.Args[1] + dst2 := zero.Args[0] + mem := zero.Args[1] + if !(zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero) && clobber(vardef)) { + break + } + v.reset(OpMove) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(src1) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = x + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + op3 := mem_2.Args[0] + if op3.Op != OpOffPtr { + break + } + if op3.AuxInt != 0 { + break + } + p3 := op3.Args[0] + d2 := mem_2.Args[1] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2.AuxInt = 0 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + op3 := mem_2.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := op3.AuxInt + p3 := op3.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := mem_2_2.Aux + _ = mem_2_2.Args[2] + op4 := mem_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + if op4.AuxInt != 0 { + break + } + p4 := op4.Args[0] + d3 := mem_2_2.Args[1] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4.AuxInt = 0 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + op3 := mem_2.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := op3.AuxInt + p3 := op3.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := mem_2_2.Aux + _ = mem_2_2.Args[2] + op4 := mem_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + o4 := op4.AuxInt + p4 := op4.Args[0] + d3 := mem_2_2.Args[1] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpStore { + break + } + t5 := mem_2_2_2.Aux + _ = mem_2_2_2.Args[2] + op5 := mem_2_2_2.Args[0] + if op5.Op != OpOffPtr { + break + } + if op5.AuxInt != 0 { + break + } + p5 := op5.Args[0] + d4 := mem_2_2_2.Args[1] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)+sizeof(t5)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4.AuxInt = o4 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = t5 + v6 := b.NewValue0(v.Pos, OpOffPtr, t5.(*types.Type)) + v6.AuxInt = 0 + v6.AddArg(dst) + v5.AddArg(v6) + v5.AddArg(d4) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + return false +} +func rewriteValuegeneric_OpMove_10(v *Value) bool { + b := v.Block + _ = b + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := mem_0.Aux + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := mem_0_2.Aux + _ = mem_0_2.Args[2] + op3 := mem_0_2.Args[0] + if op3.Op != OpOffPtr { + break + } + if op3.AuxInt != 0 { + break + } + p3 := op3.Args[0] + d2 := mem_0_2.Args[1] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2.AuxInt = 0 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := mem_0.Aux + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := mem_0_2.Aux + _ = mem_0_2.Args[2] + op3 := mem_0_2.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := op3.AuxInt + p3 := op3.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := mem_0_2_2.Aux + _ = mem_0_2_2.Args[2] + op4 := mem_0_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + if op4.AuxInt != 0 { + break + } + p4 := op4.Args[0] + d3 := mem_0_2_2.Args[1] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4.AuxInt = 0 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := mem_0.Aux + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := mem_0_2.Aux + _ = mem_0_2.Args[2] + op3 := mem_0_2.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := op3.AuxInt + p3 := op3.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := mem_0_2_2.Aux + _ = mem_0_2_2.Args[2] + op4 := mem_0_2_2.Args[0] + if op4.Op != OpOffPtr { + break + } + o4 := op4.AuxInt + p4 := op4.Args[0] + d3 := mem_0_2_2.Args[1] + mem_0_2_2_2 := mem_0_2_2.Args[2] + if mem_0_2_2_2.Op != OpStore { + break + } + t5 := mem_0_2_2_2.Aux + _ = mem_0_2_2_2.Args[2] + op5 := mem_0_2_2_2.Args[0] + if op5.Op != OpOffPtr { + break + } + if op5.AuxInt != 0 { + break + } + p5 := op5.Args[0] + d4 := mem_0_2_2_2.Args[1] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2)+sizeof(t3)+sizeof(t4)+sizeof(t5)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4.AuxInt = o4 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = t5 + v6 := b.NewValue0(v.Pos, OpOffPtr, t5.(*types.Type)) + v6.AuxInt = 0 + v6.AddArg(dst) + v5.AddArg(v6) + v5.AddArg(d4) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2 + sizeof(t2) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Zero {t1} [n] dst mem)) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + op2 := mem.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpZero { + break + } + if mem_2.AuxInt != n { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[1] + p3 := mem_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2+sizeof(t2)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = n + v1.Aux = t1 + v1.AddArg(dst) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Zero {t1} [n] dst mem))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + mem_0 := mem.Args[0] + if mem_0.Op != OpOffPtr { + break + } + tt2 := mem_0.Type + o2 := mem_0.AuxInt + p2 := mem_0.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + mem_2_0 := mem_2.Args[0] + if mem_2_0.Op != OpOffPtr { + break + } + tt3 := mem_2_0.Type + o3 := mem_2_0.AuxInt + p3 := mem_2_0.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpZero { + break + } + if mem_2_2.AuxInt != n { + break + } + t4 := mem_2_2.Aux + _ = mem_2_2.Args[1] + p4 := mem_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v3.AuxInt = n + v3.Aux = t1 + v3.AddArg(dst) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) && n >= o4 + sizeof(t4) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Zero {t1} [n] dst mem)))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + mem_0 := mem.Args[0] + if mem_0.Op != OpOffPtr { + break + } + tt2 := mem_0.Type + o2 := mem_0.AuxInt + p2 := mem_0.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + mem_2_0 := mem_2.Args[0] + if mem_2_0.Op != OpOffPtr { + break + } + tt3 := mem_2_0.Type + o3 := mem_2_0.AuxInt + p3 := mem_2_0.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := mem_2_2.Aux + _ = mem_2_2.Args[2] + mem_2_2_0 := mem_2_2.Args[0] + if mem_2_2_0.Op != OpOffPtr { + break + } + tt4 := mem_2_2_0.Type + o4 := mem_2_2_0.AuxInt + p4 := mem_2_2_0.Args[0] + d3 := mem_2_2.Args[1] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpZero { + break + } + if mem_2_2_2.AuxInt != n { + break + } + t5 := mem_2_2_2.Aux + _ = mem_2_2_2.Args[1] + p5 := mem_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = o4 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v5.AuxInt = n + v5.Aux = t1 + v5.AddArg(dst) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _)))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) && n >= o4 + sizeof(t4) && n >= o5 + sizeof(t5) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [o5] dst) d4 (Zero {t1} [n] dst mem))))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + mem_0 := mem.Args[0] + if mem_0.Op != OpOffPtr { + break + } + tt2 := mem_0.Type + o2 := mem_0.AuxInt + p2 := mem_0.Args[0] + d1 := mem.Args[1] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + mem_2_0 := mem_2.Args[0] + if mem_2_0.Op != OpOffPtr { + break + } + tt3 := mem_2_0.Type + o3 := mem_2_0.AuxInt + p3 := mem_2_0.Args[0] + d2 := mem_2.Args[1] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { + break + } + t4 := mem_2_2.Aux + _ = mem_2_2.Args[2] + mem_2_2_0 := mem_2_2.Args[0] + if mem_2_2_0.Op != OpOffPtr { + break + } + tt4 := mem_2_2_0.Type + o4 := mem_2_2_0.AuxInt + p4 := mem_2_2_0.Args[0] + d3 := mem_2_2.Args[1] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpStore { + break + } + t5 := mem_2_2_2.Aux + _ = mem_2_2_2.Args[2] + mem_2_2_2_0 := mem_2_2_2.Args[0] + if mem_2_2_2_0.Op != OpOffPtr { + break + } + tt5 := mem_2_2_2_0.Type + o5 := mem_2_2_2_0.AuxInt + p5 := mem_2_2_2_0.Args[0] + d4 := mem_2_2_2.Args[1] + mem_2_2_2_2 := mem_2_2_2.Args[2] + if mem_2_2_2_2.Op != OpZero { + break + } + if mem_2_2_2_2.AuxInt != n { + break + } + t6 := mem_2_2_2_2.Aux + _ = mem_2_2_2_2.Args[1] + p6 := mem_2_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4) && n >= o5+sizeof(t5)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = o4 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = t5 + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) + v6.AuxInt = o5 + v6.AddArg(dst) + v5.AddArg(v6) + v5.AddArg(d4) + v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v7.AuxInt = n + v7.Aux = t1 + v7.AddArg(dst) + v7.AddArg(mem) + v5.AddArg(v7) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Zero {t3} [n] p3 _)))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2 + sizeof(t2) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Zero {t1} [n] dst mem)) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := mem_0.Aux + _ = mem_0.Args[2] + op2 := mem_0.Args[0] + if op2.Op != OpOffPtr { + break + } + tt2 := op2.Type + o2 := op2.AuxInt + p2 := op2.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpZero { + break + } + if mem_0_2.AuxInt != n { + break + } + t3 := mem_0_2.Aux + _ = mem_0_2.Args[1] + p3 := mem_0_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && n >= o2+sizeof(t2)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = n + v1.Aux = t1 + v1.AddArg(dst) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Zero {t4} [n] p4 _))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Zero {t1} [n] dst mem))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := mem_0.Aux + _ = mem_0.Args[2] + mem_0_0 := mem_0.Args[0] + if mem_0_0.Op != OpOffPtr { + break + } + tt2 := mem_0_0.Type + o2 := mem_0_0.AuxInt + p2 := mem_0_0.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := mem_0_2.Aux + _ = mem_0_2.Args[2] + mem_0_2_0 := mem_0_2.Args[0] + if mem_0_2_0.Op != OpOffPtr { + break + } + tt3 := mem_0_2_0.Type + o3 := mem_0_2_0.AuxInt + p3 := mem_0_2_0.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpZero { + break + } + if mem_0_2_2.AuxInt != n { + break + } + t4 := mem_0_2_2.Aux + _ = mem_0_2_2.Args[1] + p4 := mem_0_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) + v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v3.AuxInt = n + v3.Aux = t1 + v3.AddArg(dst) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Zero {t5} [n] p5 _)))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) && n >= o4 + sizeof(t4) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Zero {t1} [n] dst mem)))) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { + break + } + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { + break + } + t2 := mem_0.Aux + _ = mem_0.Args[2] + mem_0_0 := mem_0.Args[0] + if mem_0_0.Op != OpOffPtr { + break + } + tt2 := mem_0_0.Type + o2 := mem_0_0.AuxInt + p2 := mem_0_0.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { + break + } + t3 := mem_0_2.Aux + _ = mem_0_2.Args[2] + mem_0_2_0 := mem_0_2.Args[0] + if mem_0_2_0.Op != OpOffPtr { + break + } + tt3 := mem_0_2_0.Type + o3 := mem_0_2_0.AuxInt + p3 := mem_0_2_0.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { + break + } + t4 := mem_0_2_2.Aux + _ = mem_0_2_2.Args[2] + mem_0_2_2_0 := mem_0_2_2.Args[0] + if mem_0_2_2_0.Op != OpOffPtr { break } - c := v_1.AuxInt - if !(c < 0 && c != -1<<7) { + tt4 := mem_0_2_2_0.Type + o4 := mem_0_2_2_0.AuxInt + p4 := mem_0_2_2_0.Args[0] + d3 := mem_0_2_2.Args[1] + mem_0_2_2_2 := mem_0_2_2.Args[2] + if mem_0_2_2_2.Op != OpZero { break } - v.reset(OpMod8) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = -c - v.AddArg(v0) - return true - } - // match: (Mod8 x (Const8 [c])) - // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7) - // result: (Sub8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) - for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + if mem_0_2_2_2.AuxInt != n { break } - c := v_1.AuxInt - if !(x.Op != OpConst8 && (c > 0 || c == -1<<7)) { + t5 := mem_0_2_2_2.Aux + _ = mem_0_2_2_2.Args[1] + p5 := mem_0_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4)) { break } - v.reset(OpSub8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul8, t) - v1 := b.NewValue0(v.Pos, OpDiv8, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, t) - v3.AuxInt = c - v0.AddArg(v3) + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = o4 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v5.AuxInt = n + v5.Aux = t1 + v5.AddArg(dst) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) return true } return false } -func rewriteValuegeneric_OpMod8u_0(v *Value) bool { +func rewriteValuegeneric_OpMove_20(v *Value) bool { b := v.Block _ = b - // match: (Mod8u (Const8 [c]) (Const8 [d])) - // cond: d != 0 - // result: (Const8 [int64(uint8(c) % uint8(d))]) + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _))))))) + // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) && n >= o4 + sizeof(t4) && n >= o5 + sizeof(t5) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [o5] dst) d4 (Zero {t1} [n] dst mem))))) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + p1 := v.Args[1] + mem := v.Args[2] + if mem.Op != OpVarDef { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + mem_0 := mem.Args[0] + if mem_0.Op != OpStore { break } - d := v_1.AuxInt - if !(d != 0) { + t2 := mem_0.Aux + _ = mem_0.Args[2] + mem_0_0 := mem_0.Args[0] + if mem_0_0.Op != OpOffPtr { break } - v.reset(OpConst8) - v.AuxInt = int64(uint8(c) % uint8(d)) - return true - } - // match: (Mod8u n (Const8 [c])) - // cond: isPowerOfTwo(c&0xff) - // result: (And8 n (Const8 [(c&0xff)-1])) - for { - t := v.Type - _ = v.Args[1] - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + tt2 := mem_0_0.Type + o2 := mem_0_0.AuxInt + p2 := mem_0_0.Args[0] + d1 := mem_0.Args[1] + mem_0_2 := mem_0.Args[2] + if mem_0_2.Op != OpStore { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c & 0xff)) { + t3 := mem_0_2.Aux + _ = mem_0_2.Args[2] + mem_0_2_0 := mem_0_2.Args[0] + if mem_0_2_0.Op != OpOffPtr { break } - v.reset(OpAnd8) - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = (c & 0xff) - 1 - v.AddArg(v0) - return true - } - // match: (Mod8u x (Const8 [c])) - // cond: x.Op != OpConst8 && c > 0 && umagicOK(8 ,c) - // result: (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) - for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + tt3 := mem_0_2_0.Type + o3 := mem_0_2_0.AuxInt + p3 := mem_0_2_0.Args[0] + d2 := mem_0_2.Args[1] + mem_0_2_2 := mem_0_2.Args[2] + if mem_0_2_2.Op != OpStore { break } - c := v_1.AuxInt - if !(x.Op != OpConst8 && c > 0 && umagicOK(8, c)) { + t4 := mem_0_2_2.Aux + _ = mem_0_2_2.Args[2] + mem_0_2_2_0 := mem_0_2_2.Args[0] + if mem_0_2_2_0.Op != OpOffPtr { break } - v.reset(OpSub8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpMul8, t) - v1 := b.NewValue0(v.Pos, OpDiv8u, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpConst8, t) - v2.AuxInt = c - v1.AddArg(v2) - v0.AddArg(v1) - v3 := b.NewValue0(v.Pos, OpConst8, t) - v3.AuxInt = c - v0.AddArg(v3) + tt4 := mem_0_2_2_0.Type + o4 := mem_0_2_2_0.AuxInt + p4 := mem_0_2_2_0.Args[0] + d3 := mem_0_2_2.Args[1] + mem_0_2_2_2 := mem_0_2_2.Args[2] + if mem_0_2_2_2.Op != OpStore { + break + } + t5 := mem_0_2_2_2.Aux + _ = mem_0_2_2_2.Args[2] + mem_0_2_2_2_0 := mem_0_2_2_2.Args[0] + if mem_0_2_2_2_0.Op != OpOffPtr { + break + } + tt5 := mem_0_2_2_2_0.Type + o5 := mem_0_2_2_2_0.AuxInt + p5 := mem_0_2_2_2_0.Args[0] + d4 := mem_0_2_2_2.Args[1] + mem_0_2_2_2_2 := mem_0_2_2_2.Args[2] + if mem_0_2_2_2_2.Op != OpZero { + break + } + if mem_0_2_2_2_2.AuxInt != n { + break + } + t6 := mem_0_2_2_2_2.Aux + _ = mem_0_2_2_2_2.Args[1] + p6 := mem_0_2_2_2_2.Args[0] + if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+sizeof(t2) && n >= o3+sizeof(t3) && n >= o4+sizeof(t4) && n >= o5+sizeof(t5)) { + break + } + v.reset(OpStore) + v.Aux = t2 + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) + v0.AuxInt = o2 + v0.AddArg(dst) v.AddArg(v0) + v.AddArg(d1) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) + v2.AuxInt = o3 + v2.AddArg(dst) + v1.AddArg(v2) + v1.AddArg(d2) + v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v3.Aux = t4 + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) + v4.AuxInt = o4 + v4.AddArg(dst) + v3.AddArg(v4) + v3.AddArg(d3) + v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v5.Aux = t5 + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) + v6.AuxInt = o5 + v6.AddArg(dst) + v5.AddArg(v6) + v5.AddArg(d4) + v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v7.AuxInt = n + v7.Aux = t1 + v7.AddArg(dst) + v7.AddArg(mem) + v5.AddArg(v7) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) return true } return false @@ -25357,10 +27283,8 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { func rewriteValuegeneric_OpStore_0(v *Value) bool { b := v.Block _ = b - fe := b.Func.fe - _ = fe // match: (Store {t1} p1 (Load p2 mem) mem) - // cond: isSamePtr(p1, p2) && t2.Size() == t1.(*types.Type).Size() + // cond: isSamePtr(p1, p2) && t2.Size() == sizeof(t1) // result: mem for { t1 := v.Aux @@ -25377,7 +27301,7 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { if mem != v.Args[2] { break } - if !(isSamePtr(p1, p2) && t2.Size() == t1.(*types.Type).Size()) { + if !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1)) { break } v.reset(OpCopy) @@ -25385,32 +27309,53 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (Store {t1} (OffPtr [o1] p1) (Load (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ oldmem)) - // cond: isSamePtr(p1, p2) && isSamePtr(p1, p3) && t2.Size() == t1.(*types.Type).Size() && !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) + // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ oldmem)) + // cond: isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) // result: mem for { t1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o1 := v_0.AuxInt - p1 := v_0.Args[0] + p1 := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpLoad { break } t2 := v_1.Type _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpOffPtr { + p2 := v_1.Args[0] + oldmem := v_1.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t3 := mem.Aux + _ = mem.Args[2] + p3 := mem.Args[0] + if oldmem != mem.Args[2] { break } - if v_1_0.AuxInt != o1 { + if !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3))) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) + // cond: isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4)) + // result: mem + for { + t1 := v.Aux + _ = v.Args[2] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpLoad { break } - p2 := v_1_0.Args[0] + t2 := v_1.Type + _ = v_1.Args[1] + p2 := v_1.Args[0] oldmem := v_1.Args[1] mem := v.Args[2] if mem.Op != OpStore { @@ -25418,16 +27363,18 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { } t3 := mem.Aux _ = mem.Args[2] - mem_0 := mem.Args[0] - if mem_0.Op != OpOffPtr { + p3 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { break } - o3 := mem_0.AuxInt - p3 := mem_0.Args[0] - if oldmem != mem.Args[2] { + t4 := mem_2.Aux + _ = mem_2.Args[2] + p4 := mem_2.Args[0] + if oldmem != mem_2.Args[2] { break } - if !(isSamePtr(p1, p2) && isSamePtr(p1, p3) && t2.Size() == t1.(*types.Type).Size() && !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size())) { + if !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4))) { break } v.reset(OpCopy) @@ -25435,32 +27382,20 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (Store {t1} (OffPtr [o1] p1) (Load (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ (Store {t4} (OffPtr [o4] p4) _ oldmem))) - // cond: isSamePtr(p1, p2) && isSamePtr(p1, p3) && isSamePtr(p1, p4) && t2.Size() == t1.(*types.Type).Size() && !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) && !overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) + // match: (Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) + // cond: isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4)) && disjoint(p1, sizeof(t1), p5, sizeof(t5)) // result: mem for { t1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpOffPtr { - break - } - o1 := v_0.AuxInt - p1 := v_0.Args[0] + p1 := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpLoad { break } t2 := v_1.Type _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpOffPtr { - break - } - if v_1_0.AuxInt != o1 { - break - } - p2 := v_1_0.Args[0] + p2 := v_1.Args[0] oldmem := v_1.Args[1] mem := v.Args[2] if mem.Op != OpStore { @@ -25468,28 +27403,25 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { } t3 := mem.Aux _ = mem.Args[2] - mem_0 := mem.Args[0] - if mem_0.Op != OpOffPtr { - break - } - o3 := mem_0.AuxInt - p3 := mem_0.Args[0] + p3 := mem.Args[0] mem_2 := mem.Args[2] if mem_2.Op != OpStore { break } t4 := mem_2.Aux _ = mem_2.Args[2] - mem_2_0 := mem_2.Args[0] - if mem_2_0.Op != OpOffPtr { + p4 := mem_2.Args[0] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpStore { break } - o4 := mem_2_0.AuxInt - p4 := mem_2_0.Args[0] - if oldmem != mem_2.Args[2] { + t5 := mem_2_2.Aux + _ = mem_2_2.Args[2] + p5 := mem_2_2.Args[0] + if oldmem != mem_2_2.Args[2] { break } - if !(isSamePtr(p1, p2) && isSamePtr(p1, p3) && isSamePtr(p1, p4) && t2.Size() == t1.(*types.Type).Size() && !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) && !overlap(o1, t2.Size(), o4, t4.(*types.Type).Size())) { + if !(isSamePtr(p1, p2) && t2.Size() == sizeof(t1) && disjoint(p1, sizeof(t1), p3, sizeof(t3)) && disjoint(p1, sizeof(t1), p4, sizeof(t4)) && disjoint(p1, sizeof(t1), p5, sizeof(t5))) { break } v.reset(OpCopy) @@ -25497,73 +27429,153 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (Store {t1} (OffPtr [o1] p1) (Load (OffPtr [o1] p2) oldmem) mem:(Store {t3} (OffPtr [o3] p3) _ (Store {t4} (OffPtr [o4] p4) _ (Store {t5} (OffPtr [o5] p5) _ oldmem)))) - // cond: isSamePtr(p1, p2) && isSamePtr(p1, p3) && isSamePtr(p1, p4) && isSamePtr(p1, p5) && t2.Size() == t1.(*types.Type).Size() && !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) && !overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) && !overlap(o1, t2.Size(), o5, t5.(*types.Type).Size()) + // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) + // cond: isConstZero(x) && o >= 0 && sizeof(t) + o <= n && isSamePtr(p1, p2) // result: mem for { - t1 := v.Aux + t := v.Aux _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpOffPtr { break } - o1 := v_0.AuxInt + o := v_0.AuxInt p1 := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpLoad { + x := v.Args[1] + mem := v.Args[2] + if mem.Op != OpZero { + break + } + n := mem.AuxInt + _ = mem.Args[1] + p2 := mem.Args[0] + if !(isConstZero(x) && o >= 0 && sizeof(t)+o <= n && isSamePtr(p1, p2)) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) + // cond: isConstZero(x) && o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p3) && disjoint(op, sizeof(t1), p2, sizeof(t2)) + // result: mem + for { + t1 := v.Aux + _ = v.Args[2] + op := v.Args[0] + if op.Op != OpOffPtr { + break + } + o1 := op.AuxInt + p1 := op.Args[0] + x := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + p2 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpZero { + break + } + n := mem_2.AuxInt + _ = mem_2.Args[1] + p3 := mem_2.Args[0] + if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p3) && disjoint(op, sizeof(t1), p2, sizeof(t2))) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) + // cond: isConstZero(x) && o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p4) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3)) + // result: mem + for { + t1 := v.Aux + _ = v.Args[2] + op := v.Args[0] + if op.Op != OpOffPtr { + break + } + o1 := op.AuxInt + p1 := op.Args[0] + x := v.Args[1] + mem := v.Args[2] + if mem.Op != OpStore { + break + } + t2 := mem.Aux + _ = mem.Args[2] + p2 := mem.Args[0] + mem_2 := mem.Args[2] + if mem_2.Op != OpStore { + break + } + t3 := mem_2.Aux + _ = mem_2.Args[2] + p3 := mem_2.Args[0] + mem_2_2 := mem_2.Args[2] + if mem_2_2.Op != OpZero { break } - t2 := v_1.Type - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpOffPtr { + n := mem_2_2.AuxInt + _ = mem_2_2.Args[1] + p4 := mem_2_2.Args[0] + if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p4) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3))) { break } - if v_1_0.AuxInt != o1 { + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) + // cond: isConstZero(x) && o1 >= 0 && sizeof(t1) + o1 <= n && isSamePtr(p1, p5) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3)) && disjoint(op, sizeof(t1), p4, sizeof(t4)) + // result: mem + for { + t1 := v.Aux + _ = v.Args[2] + op := v.Args[0] + if op.Op != OpOffPtr { break } - p2 := v_1_0.Args[0] - oldmem := v_1.Args[1] + o1 := op.AuxInt + p1 := op.Args[0] + x := v.Args[1] mem := v.Args[2] if mem.Op != OpStore { break } - t3 := mem.Aux + t2 := mem.Aux _ = mem.Args[2] - mem_0 := mem.Args[0] - if mem_0.Op != OpOffPtr { - break - } - o3 := mem_0.AuxInt - p3 := mem_0.Args[0] + p2 := mem.Args[0] mem_2 := mem.Args[2] if mem_2.Op != OpStore { break } - t4 := mem_2.Aux + t3 := mem_2.Aux _ = mem_2.Args[2] - mem_2_0 := mem_2.Args[0] - if mem_2_0.Op != OpOffPtr { - break - } - o4 := mem_2_0.AuxInt - p4 := mem_2_0.Args[0] + p3 := mem_2.Args[0] mem_2_2 := mem_2.Args[2] if mem_2_2.Op != OpStore { break } - t5 := mem_2_2.Aux + t4 := mem_2_2.Aux _ = mem_2_2.Args[2] - mem_2_2_0 := mem_2_2.Args[0] - if mem_2_2_0.Op != OpOffPtr { - break - } - o5 := mem_2_2_0.AuxInt - p5 := mem_2_2_0.Args[0] - if oldmem != mem_2_2.Args[2] { + p4 := mem_2_2.Args[0] + mem_2_2_2 := mem_2_2.Args[2] + if mem_2_2_2.Op != OpZero { break } - if !(isSamePtr(p1, p2) && isSamePtr(p1, p3) && isSamePtr(p1, p4) && isSamePtr(p1, p5) && t2.Size() == t1.(*types.Type).Size() && !overlap(o1, t2.Size(), o3, t3.(*types.Type).Size()) && !overlap(o1, t2.Size(), o4, t4.(*types.Type).Size()) && !overlap(o1, t2.Size(), o5, t5.(*types.Type).Size())) { + n := mem_2_2_2.AuxInt + _ = mem_2_2_2.Args[1] + p5 := mem_2_2_2.Args[0] + if !(isConstZero(x) && o1 >= 0 && sizeof(t1)+o1 <= n && isSamePtr(p1, p5) && disjoint(op, sizeof(t1), p2, sizeof(t2)) && disjoint(op, sizeof(t1), p3, sizeof(t3)) && disjoint(op, sizeof(t1), p4, sizeof(t4))) { break } v.reset(OpCopy) @@ -25609,6 +27621,15 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValuegeneric_OpStore_10(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + fe := b.Func.fe + _ = fe // match: (Store dst (StructMake2 f0 f1) mem) // cond: // result: (Store {t.FieldType(1)} (OffPtr [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem)) @@ -25737,7 +27758,7 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { } // match: (Store {t} dst (Load src mem) mem) // cond: !fe.CanSSA(t.(*types.Type)) - // result: (Move {t} [t.(*types.Type).Size()] dst src mem) + // result: (Move {t} [sizeof(t)] dst src mem) for { t := v.Aux _ = v.Args[2] @@ -25756,25 +27777,16 @@ func rewriteValuegeneric_OpStore_0(v *Value) bool { break } v.reset(OpMove) - v.AuxInt = t.(*types.Type).Size() + v.AuxInt = sizeof(t) v.Aux = t v.AddArg(dst) v.AddArg(src) v.AddArg(mem) return true } - return false -} -func rewriteValuegeneric_OpStore_10(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - fe := b.Func.fe - _ = fe // match: (Store {t} dst (Load src mem) (VarDef {x} mem)) // cond: !fe.CanSSA(t.(*types.Type)) - // result: (Move {t} [t.(*types.Type).Size()] dst src (VarDef {x} mem)) + // result: (Move {t} [sizeof(t)] dst src (VarDef {x} mem)) for { t := v.Aux _ = v.Args[2] @@ -25798,7 +27810,7 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { break } v.reset(OpMove) - v.AuxInt = t.(*types.Type).Size() + v.AuxInt = sizeof(t) v.Aux = t v.AddArg(dst) v.AddArg(src) @@ -25910,6 +27922,425 @@ func rewriteValuegeneric_OpStore_10(v *Value) bool { v.AddArg(mem) return true } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem))) + // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == sizeof(t2) && n == sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2) && clobber(m3) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) + for { + t1 := v.Aux + _ = v.Args[2] + op1 := v.Args[0] + if op1.Op != OpOffPtr { + break + } + o1 := op1.AuxInt + p1 := op1.Args[0] + d1 := v.Args[1] + m2 := v.Args[2] + if m2.Op != OpStore { + break + } + t2 := m2.Aux + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + if op2.AuxInt != 0 { + break + } + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpMove { + break + } + n := m3.AuxInt + _ = m3.Args[2] + p3 := m3.Args[0] + mem := m3.Args[2] + if !(m2.Uses == 1 && m3.Uses == 1 && o1 == sizeof(t2) && n == sizeof(t2)+sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2) && clobber(m3)) { + break + } + v.reset(OpStore) + v.Aux = t1 + v.AddArg(op1) + v.AddArg(d1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = t2 + v0.AddArg(op2) + v0.AddArg(d2) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpStore_20(v *Value) bool { + b := v.Block + _ = b + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem)))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t3) + sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2) && clobber(m3) && clobber(m4) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) + for { + t1 := v.Aux + _ = v.Args[2] + op1 := v.Args[0] + if op1.Op != OpOffPtr { + break + } + o1 := op1.AuxInt + p1 := op1.Args[0] + d1 := v.Args[1] + m2 := v.Args[2] + if m2.Op != OpStore { + break + } + t2 := m2.Aux + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := m3.Aux + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr { + break + } + if op3.AuxInt != 0 { + break + } + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpMove { + break + } + n := m4.AuxInt + _ = m4.Args[2] + p4 := m4.Args[0] + mem := m4.Args[2] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t3)+sizeof(t2)+sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2) && clobber(m3) && clobber(m4)) { + break + } + v.reset(OpStore) + v.Aux = t1 + v.AddArg(op1) + v.AddArg(d1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = t2 + v0.AddArg(op2) + v0.AddArg(d2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v1.AddArg(op3) + v1.AddArg(d3) + v1.AddArg(mem) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem))))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t4) + sizeof(t3) + sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + for { + t1 := v.Aux + _ = v.Args[2] + op1 := v.Args[0] + if op1.Op != OpOffPtr { + break + } + o1 := op1.AuxInt + p1 := op1.Args[0] + d1 := v.Args[1] + m2 := v.Args[2] + if m2.Op != OpStore { + break + } + t2 := m2.Aux + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := m3.Aux + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := op3.AuxInt + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpStore { + break + } + t4 := m4.Aux + _ = m4.Args[2] + op4 := m4.Args[0] + if op4.Op != OpOffPtr { + break + } + if op4.AuxInt != 0 { + break + } + p4 := op4.Args[0] + d4 := m4.Args[1] + m5 := m4.Args[2] + if m5.Op != OpMove { + break + } + n := m5.AuxInt + _ = m5.Args[2] + p5 := m5.Args[0] + mem := m5.Args[2] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t4)+sizeof(t3)+sizeof(t2)+sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5)) { + break + } + v.reset(OpStore) + v.Aux = t1 + v.AddArg(op1) + v.AddArg(d1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = t2 + v0.AddArg(op2) + v0.AddArg(d2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v1.AddArg(op3) + v1.AddArg(d3) + v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v2.Aux = t4 + v2.AddArg(op4) + v2.AddArg(d4) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem))) + // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == sizeof(t2) && n == sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2) && clobber(m3) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) + for { + t1 := v.Aux + _ = v.Args[2] + op1 := v.Args[0] + if op1.Op != OpOffPtr { + break + } + o1 := op1.AuxInt + p1 := op1.Args[0] + d1 := v.Args[1] + m2 := v.Args[2] + if m2.Op != OpStore { + break + } + t2 := m2.Aux + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + if op2.AuxInt != 0 { + break + } + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpZero { + break + } + n := m3.AuxInt + _ = m3.Args[1] + p3 := m3.Args[0] + mem := m3.Args[1] + if !(m2.Uses == 1 && m3.Uses == 1 && o1 == sizeof(t2) && n == sizeof(t2)+sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2) && clobber(m3)) { + break + } + v.reset(OpStore) + v.Aux = t1 + v.AddArg(op1) + v.AddArg(d1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = t2 + v0.AddArg(op2) + v0.AddArg(d2) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem)))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t3) + sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2) && clobber(m3) && clobber(m4) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) + for { + t1 := v.Aux + _ = v.Args[2] + op1 := v.Args[0] + if op1.Op != OpOffPtr { + break + } + o1 := op1.AuxInt + p1 := op1.Args[0] + d1 := v.Args[1] + m2 := v.Args[2] + if m2.Op != OpStore { + break + } + t2 := m2.Aux + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := m3.Aux + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr { + break + } + if op3.AuxInt != 0 { + break + } + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpZero { + break + } + n := m4.AuxInt + _ = m4.Args[1] + p4 := m4.Args[0] + mem := m4.Args[1] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t3)+sizeof(t2)+sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2) && clobber(m3) && clobber(m4)) { + break + } + v.reset(OpStore) + v.Aux = t1 + v.AddArg(op1) + v.AddArg(d1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = t2 + v0.AddArg(op2) + v0.AddArg(d2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v1.AddArg(op3) + v1.AddArg(d3) + v1.AddArg(mem) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem))))) + // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t4) + sizeof(t3) + sizeof(t2) + sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5) + // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + for { + t1 := v.Aux + _ = v.Args[2] + op1 := v.Args[0] + if op1.Op != OpOffPtr { + break + } + o1 := op1.AuxInt + p1 := op1.Args[0] + d1 := v.Args[1] + m2 := v.Args[2] + if m2.Op != OpStore { + break + } + t2 := m2.Aux + _ = m2.Args[2] + op2 := m2.Args[0] + if op2.Op != OpOffPtr { + break + } + o2 := op2.AuxInt + p2 := op2.Args[0] + d2 := m2.Args[1] + m3 := m2.Args[2] + if m3.Op != OpStore { + break + } + t3 := m3.Aux + _ = m3.Args[2] + op3 := m3.Args[0] + if op3.Op != OpOffPtr { + break + } + o3 := op3.AuxInt + p3 := op3.Args[0] + d3 := m3.Args[1] + m4 := m3.Args[2] + if m4.Op != OpStore { + break + } + t4 := m4.Aux + _ = m4.Args[2] + op4 := m4.Args[0] + if op4.Op != OpOffPtr { + break + } + if op4.AuxInt != 0 { + break + } + p4 := op4.Args[0] + d4 := m4.Args[1] + m5 := m4.Args[2] + if m5.Op != OpZero { + break + } + n := m5.AuxInt + _ = m5.Args[1] + p5 := m5.Args[0] + mem := m5.Args[1] + if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && o1-o2 == sizeof(t2) && n == sizeof(t4)+sizeof(t3)+sizeof(t2)+sizeof(t1) && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2) && clobber(m3) && clobber(m4) && clobber(m5)) { + break + } + v.reset(OpStore) + v.Aux = t1 + v.AddArg(op1) + v.AddArg(d1) + v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v0.Aux = t2 + v0.AddArg(op2) + v0.AddArg(d2) + v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v1.Aux = t3 + v1.AddArg(op3) + v1.AddArg(d3) + v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem) + v2.Aux = t4 + v2.AddArg(op4) + v2.AddArg(d4) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } return false } func rewriteValuegeneric_OpStringLen_0(v *Value) bool { @@ -30139,6 +32570,107 @@ func rewriteValuegeneric_OpZero_0(v *Value) bool { v.AddArg(mem) return true } + // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) + // cond: isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + sizeof(t2) && clobber(store) + // result: (Zero {t1} [n] p1 mem) + for { + n := v.AuxInt + t1 := v.Aux + _ = v.Args[1] + p1 := v.Args[0] + store := v.Args[1] + if store.Op != OpStore { + break + } + t2 := store.Aux + _ = store.Args[2] + store_0 := store.Args[0] + if store_0.Op != OpOffPtr { + break + } + o2 := store_0.AuxInt + p2 := store_0.Args[0] + mem := store.Args[2] + if !(isSamePtr(p1, p2) && store.Uses == 1 && n >= o2+sizeof(t2) && clobber(store)) { + break + } + v.reset(OpZero) + v.AuxInt = n + v.Aux = t1 + v.AddArg(p1) + v.AddArg(mem) + return true + } + // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) + // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move) + // result: (Zero {t} [n] dst1 mem) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[1] + dst1 := v.Args[0] + move := v.Args[1] + if move.Op != OpMove { + break + } + if move.AuxInt != n { + break + } + if move.Aux != t { + break + } + _ = move.Args[2] + dst2 := move.Args[0] + mem := move.Args[2] + if !(move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)) { + break + } + v.reset(OpZero) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v.AddArg(mem) + return true + } + // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move) && clobber(vardef) + // result: (Zero {t} [n] dst1 (VarDef {x} mem)) + for { + n := v.AuxInt + t := v.Aux + _ = v.Args[1] + dst1 := v.Args[0] + vardef := v.Args[1] + if vardef.Op != OpVarDef { + break + } + x := vardef.Aux + move := vardef.Args[0] + if move.Op != OpMove { + break + } + if move.AuxInt != n { + break + } + if move.Aux != t { + break + } + _ = move.Args[2] + dst2 := move.Args[0] + mem := move.Args[2] + if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move) && clobber(vardef)) { + break + } + v.reset(OpZero) + v.AuxInt = n + v.Aux = t + v.AddArg(dst1) + v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem) + v0.Aux = x + v0.AddArg(mem) + v.AddArg(v0) + return true + } return false } func rewriteValuegeneric_OpZeroExt16to32_0(v *Value) bool { diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go index b697aa8bd39d61..25ece4354eab75 100644 --- a/src/runtime/internal/atomic/atomic_test.go +++ b/src/runtime/internal/atomic/atomic_test.go @@ -93,8 +93,10 @@ func TestUnaligned64(t *testing.T) { } x := make([]uint32, 4) - up64 := (*uint64)(unsafe.Pointer(&x[1])) // misaligned - p64 := (*int64)(unsafe.Pointer(&x[1])) // misaligned + u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4 + + up64 := (*uint64)(u) // misaligned + p64 := (*int64)(u) // misaligned shouldPanic(t, "Load64", func() { atomic.Load64(up64) }) shouldPanic(t, "Loadint64", func() { atomic.Loadint64(p64) }) diff --git a/test/codegen/stack.go b/test/codegen/stack.go index da5ef24e13165f..4469b574491bcb 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -11,22 +11,81 @@ import "runtime" // This file contains code generation tests related to the use of the // stack. -// check that stack stores are optimized away - -// 386:"TEXT\t.*, [$]0-4" -// amd64:"TEXT\t.*, [$]0-8" -// arm:"TEXT\t.*, [$]-4-4" -// arm64:"TEXT\t.*, [$]-8-8" -// s390x:"TEXT\t.*, [$]0-8" -// ppc64le:"TEXT\t.*, [$]0-8" -// mips:"TEXT\t.*, [$]-4-4" +// Check that stack stores are optimized away. + +// 386:"TEXT\t.*, [$]0-" +// amd64:"TEXT\t.*, [$]0-" +// arm:"TEXT\t.*, [$]-4-" +// arm64:"TEXT\t.*, [$]-8-" +// mips:"TEXT\t.*, [$]-4-" +// ppc64le:"TEXT\t.*, [$]0-" +// s390x:"TEXT\t.*, [$]0-" func StackStore() int { var x int return *(&x) } +type T struct { + A, B, C, D int // keep exported fields + x, y, z int // reset unexported fields +} + +// Check that large structs are cleared directly (issue #24416). + +// 386:"TEXT\t.*, [$]0-" +// amd64:"TEXT\t.*, [$]0-" +// arm:"TEXT\t.*, [$]0-" (spills return address) +// arm64:"TEXT\t.*, [$]-8-" +// mips:"TEXT\t.*, [$]-4-" +// ppc64le:"TEXT\t.*, [$]0-" +// s390x:"TEXT\t.*, [$]0-" +func ZeroLargeStruct(x *T) { + t := T{} + *x = t +} + +// Check that structs are partially initialised directly (issue #24386). + +// Notes: +// - 386 fails due to spilling a register +// amd64:"TEXT\t.*, [$]0-" +// arm:"TEXT\t.*, [$]0-" (spills return address) +// arm64:"TEXT\t.*, [$]-8-" +// ppc64le:"TEXT\t.*, [$]0-" +// s390x:"TEXT\t.*, [$]0-" +// Note: that 386 currently has to spill a register. +func KeepWanted(t *T) { + *t = T{A: t.A, B: t.B, C: t.C, D: t.D} +} + +// Check that small array operations avoid using the stack (issue #15925). + +// Notes: +// - 386 fails due to spilling a register +// - arm & mips fail due to softfloat calls +// amd64:"TEXT\t.*, [$]0-" +// arm64:"TEXT\t.*, [$]-8-" +// ppc64le:"TEXT\t.*, [$]0-" +// s390x:"TEXT\t.*, [$]0-" +func ArrayAdd64(a, b [4]float64) [4]float64 { + return [4]float64{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]} +} + +// Check that small array initialization avoids using the stack. + +// 386:"TEXT\t.*, [$]0-" +// amd64:"TEXT\t.*, [$]0-" +// arm:"TEXT\t.*, [$]0-" (spills return address) +// arm64:"TEXT\t.*, [$]-8-" +// mips:"TEXT\t.*, [$]-4-" +// ppc64le:"TEXT\t.*, [$]0-" +// s390x:"TEXT\t.*, [$]0-" +func ArrayInit(i, j int) [4]int { + return [4]int{i, 0, j, 0} +} + // Check that assembly output has matching offset and base register -// (Issue #21064). +// (issue #21064). // amd64:`.*b\+24\(SP\)` // arm:`.*b\+4\(FP\)` diff --git a/test/fixedbugs/issue20529.go b/test/fixedbugs/issue20529.go index cd0c23da036f8c..669064c2eaf381 100644 --- a/test/fixedbugs/issue20529.go +++ b/test/fixedbugs/issue20529.go @@ -13,6 +13,9 @@ package p +import "runtime" + func f() { // ERROR "stack frame too large" - _ = [][]int{1e9: []int{}} + x := [][]int{1e9: []int{}} + runtime.KeepAlive(x) }