diff --git a/misc/ios/detect.go b/misc/ios/detect.go index 2594185c11e86..1d47e47c86097 100644 --- a/misc/ios/detect.go +++ b/misc/ios/detect.go @@ -33,9 +33,9 @@ func main() { fail("did not find mobile provision matching device udids %q", udids) } - fmt.Println("Available provisioning profiles below.") - fmt.Println("NOTE: Any existing app on the device with the app id specified by GOIOS_APP_ID") - fmt.Println("will be overwritten when running Go programs.") + fmt.Println("# Available provisioning profiles below.") + fmt.Println("# NOTE: Any existing app on the device with the app id specified by GOIOS_APP_ID") + fmt.Println("# will be overwritten when running Go programs.") for _, mp := range mps { fmt.Println() f, err := ioutil.TempFile("", "go_ios_detect_") diff --git a/misc/wasm/wasm_exec.js b/misc/wasm/wasm_exec.js index 78eb306253d66..e47663783e628 100644 --- a/misc/wasm/wasm_exec.js +++ b/misc/wasm/wasm_exec.js @@ -3,8 +3,18 @@ // license that can be found in the LICENSE file. (() => { + if (typeof global !== "undefined") { + // global already exists + } else if (typeof window !== "undefined") { + window.global = window; + } else if (typeof self !== "undefined") { + self.global = self; + } else { + throw new Error("cannot export Go (neither global, window nor self is defined)"); + } + // Map web browser API and Node.js API to a single common API (preferring web standards over Node.js API). - const isNodeJS = typeof process !== "undefined"; + const isNodeJS = global.process && global.process.title === "node"; if (isNodeJS) { global.require = require; global.fs = require("fs"); @@ -27,14 +37,6 @@ global.TextEncoder = util.TextEncoder; global.TextDecoder = util.TextDecoder; } else { - if (typeof window !== "undefined") { - window.global = window; - } else if (typeof self !== "undefined") { - self.global = self; - } else { - throw new Error("cannot export Go (neither window nor self is defined)"); - } - let outputBuf = ""; global.fs = { constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused diff --git a/src/cmd/asm/internal/asm/testdata/s390x.s b/src/cmd/asm/internal/asm/testdata/s390x.s index ad70d2af44822..0e50303d70e34 100644 --- a/src/cmd/asm/internal/asm/testdata/s390x.s +++ b/src/cmd/asm/internal/asm/testdata/s390x.s @@ -369,6 +369,9 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16- VSTEH $7, V31, (R2) // e7f020007809 VSTEB $15, V29, 4094(R12) // e7d0cffef808 VMSLG V21, V22, V23, V24 // e78563007fb8 + VMSLEG V21, V22, V23, V24 // e78563807fb8 + VMSLOG V21, V22, V23, V24 // e78563407fb8 + VMSLEOG V21, V22, V23, V24 // e78563c07fb8 RET RET foo(SB) diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index b6f059001f7b0..5bcb9754d71e5 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -211,6 +211,8 @@ var exportHeader = flag.String("exportheader", "", "where to write export header var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo") var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo") var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo") +var gccgoMangleCheckDone bool +var gccgoNewmanglingInEffect bool var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code") var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code") var goarch, goos string diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 8a26d5c063210..a93ff365b019c 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -15,7 +15,9 @@ import ( "go/printer" "go/token" "io" + "io/ioutil" "os" + "os/exec" "path/filepath" "regexp" "sort" @@ -1186,12 +1188,91 @@ func (p *Package) writeExportHeader(fgcch io.Writer) { fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog()) } -// Return the package prefix when using gccgo. -func (p *Package) gccgoSymbolPrefix() string { - if !*gccgo { - return "" +// gccgoUsesNewMangling returns whether gccgo uses the new collision-free +// packagepath mangling scheme (see determineGccgoManglingScheme for more +// info). +func gccgoUsesNewMangling() bool { + if !gccgoMangleCheckDone { + gccgoNewmanglingInEffect = determineGccgoManglingScheme() + gccgoMangleCheckDone = true + } + return gccgoNewmanglingInEffect +} + +const mangleCheckCode = ` +package läufer +func Run(x int) int { + return 1 +} +` + +// determineGccgoManglingScheme performs a runtime test to see which +// flavor of packagepath mangling gccgo is using. Older versions of +// gccgo use a simple mangling scheme where there can be collisions +// between packages whose paths are different but mangle to the same +// string. More recent versions of gccgo use a new mangler that avoids +// these collisions. Return value is whether gccgo uses the new mangling. +func determineGccgoManglingScheme() bool { + + // Emit a small Go file for gccgo to compile. + filepat := "*_gccgo_manglecheck.go" + var f *os.File + var err error + if f, err = ioutil.TempFile(*objDir, filepat); err != nil { + fatalf("%v", err) + } + gofilename := f.Name() + defer os.Remove(gofilename) + + if err = ioutil.WriteFile(gofilename, []byte(mangleCheckCode), 0666); err != nil { + fatalf("%v", err) + } + + // Compile with gccgo, capturing generated assembly. + gccgocmd := os.Getenv("GCCGO") + if gccgocmd == "" { + gpath, gerr := exec.LookPath("gccgo") + if gerr != nil { + fatalf("unable to locate gccgo: %v", gerr) + } + gccgocmd = gpath + } + cmd := exec.Command(gccgocmd, "-S", "-o", "-", gofilename) + buf, cerr := cmd.CombinedOutput() + if cerr != nil { + fatalf("%s", err) + } + + // New mangling: expect go.l..u00e4ufer.Run + // Old mangling: expect go.l__ufer.Run + return regexp.MustCompile(`go\.l\.\.u00e4ufer\.Run`).Match(buf) +} + +// gccgoPkgpathToSymbolNew converts a package path to a gccgo-style +// package symbol. +func gccgoPkgpathToSymbolNew(ppath string) string { + bsl := []byte{} + changed := false + for _, c := range []byte(ppath) { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z', + '0' <= c && c <= '9', '_' == c: + bsl = append(bsl, c) + default: + changed = true + encbytes := []byte(fmt.Sprintf("..z%02x", c)) + bsl = append(bsl, encbytes...) + } + } + if !changed { + return ppath } + return string(bsl) +} +// gccgoPkgpathToSymbolOld converts a package path to a gccgo-style +// package symbol using the older mangling scheme. +func gccgoPkgpathToSymbolOld(ppath string) string { clean := func(r rune) rune { switch { case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', @@ -1200,14 +1281,32 @@ func (p *Package) gccgoSymbolPrefix() string { } return '_' } + return strings.Map(clean, ppath) +} + +// gccgoPkgpathToSymbol converts a package path to a mangled packagepath +// symbol. +func gccgoPkgpathToSymbol(ppath string) string { + if gccgoUsesNewMangling() { + return gccgoPkgpathToSymbolNew(ppath) + } else { + return gccgoPkgpathToSymbolOld(ppath) + } +} + +// Return the package prefix when using gccgo. +func (p *Package) gccgoSymbolPrefix() string { + if !*gccgo { + return "" + } if *gccgopkgpath != "" { - return strings.Map(clean, *gccgopkgpath) + return gccgoPkgpathToSymbol(*gccgopkgpath) } if *gccgoprefix == "" && p.PackageName == "main" { return "main" } - prefix := strings.Map(clean, *gccgoprefix) + prefix := gccgoPkgpathToSymbol(*gccgoprefix) if prefix == "" { prefix = "go" } diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 749dbf1d5d555..144a1f51f8fef 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -360,6 +360,41 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r + case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ: + r := v.Reg0() + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + switch r { + case r0: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case r1: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r0 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + default: + v.Fatalf("output not in same register as an input %s", v.LongString()) + } + + case ssa.OpAMD64SUBQborrow, ssa.OpAMD64SBBQ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.OpAMD64ADDQconstcarry, ssa.OpAMD64ADCQconst, ssa.OpAMD64SUBQconstborrow, ssa.OpAMD64SBBQconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst: r := v.Reg() a := v.Args[0].Reg() @@ -946,6 +981,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG p.To.Reg = r + + case ssa.OpAMD64NEGLflags: + r := v.Reg0() + if r != v.Args[0].Reg() { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) + } + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 516c33d0bbb4d..22201e504460d 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -863,7 +863,7 @@ func methodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sy // Add a method, declared as a function. // - msym is the method symbol // - t is function type (with receiver) -// Returns a pointer to the existing or added Field. +// Returns a pointer to the existing or added Field; or nil if there's an error. func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { Fatalf("no method symbol") @@ -918,6 +918,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F for _, f := range mt.Fields().Slice() { if f.Sym == msym { yyerror("type %v has both field and method named %v", mt, msym) + f.SetBroke(true) return nil } } @@ -927,7 +928,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.F if msym.Name != f.Sym.Name { continue } - // eqtype only checks that incoming and result parameters match, + // types.Identical only checks that incoming and result parameters match, // so explicitly check that the receiver parameters match too. if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) { yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index ad43b3caec1bf..2310b1e5fd2e9 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -621,23 +621,23 @@ func (e *EscState) escloopdepth(n *Node) { switch n.Op { case OLABEL: - if n.Left == nil || n.Left.Sym == nil { + if n.Sym == nil { Fatalf("esc:label without label: %+v", n) } // Walk will complain about this label being already defined, but that's not until // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc - n.Left.Sym.Label = asTypesNode(&nonlooping) + n.Sym.Label = asTypesNode(&nonlooping) case OGOTO: - if n.Left == nil || n.Left.Sym == nil { + if n.Sym == nil { Fatalf("esc:goto without label: %+v", n) } // If we come past one that's uninitialized, this must be a (harmless) forward jump // but if it's set to nonlooping the label must have preceded this goto. - if asNode(n.Left.Sym.Label) == &nonlooping { - n.Left.Sym.Label = asTypesNode(&looping) + if asNode(n.Sym.Label) == &nonlooping { + n.Sym.Label = asTypesNode(&looping) } } @@ -851,18 +851,19 @@ opSwitch: } case OLABEL: - if asNode(n.Left.Sym.Label) == &nonlooping { + switch asNode(n.Sym.Label) { + case &nonlooping: if Debug['m'] > 2 { fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n) } - } else if asNode(n.Left.Sym.Label) == &looping { + case &looping: if Debug['m'] > 2 { fmt.Printf("%v: %v looping label\n", linestr(lineno), n) } e.loopdepth++ } - n.Left.Sym.Label = nil + n.Sym.Label = nil case ORANGE: if n.List.Len() >= 2 { diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 23ed3f7844cfa..0f4b6c9936b70 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1045,8 +1045,8 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) { mode.Fprintf(s, ": %v", n.Nbody) case OBREAK, OCONTINUE, OGOTO, OFALL: - if n.Left != nil { - mode.Fprintf(s, "%#v %v", n.Op, n.Left) + if n.Sym != nil { + mode.Fprintf(s, "%#v %v", n.Op, n.Sym) } else { mode.Fprintf(s, "%#v", n.Op) } @@ -1055,7 +1055,7 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) { break case OLABEL: - mode.Fprintf(s, "%v: ", n.Left) + mode.Fprintf(s, "%v: ", n.Sym) } if extrablock { diff --git a/src/cmd/compile/internal/gc/iexport.go b/src/cmd/compile/internal/gc/iexport.go index b141e5fc09a91..d21378df4ab71 100644 --- a/src/cmd/compile/internal/gc/iexport.go +++ b/src/cmd/compile/internal/gc/iexport.go @@ -1102,7 +1102,7 @@ func (w *exportWriter) stmt(n *Node) { case OGOTO, OLABEL: w.op(op) w.pos(n.Pos) - w.expr(n.Left) + w.string(n.Sym.Name) default: Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op) diff --git a/src/cmd/compile/internal/gc/iimport.go b/src/cmd/compile/internal/gc/iimport.go index 4fea314263e71..8614c7a14f383 100644 --- a/src/cmd/compile/internal/gc/iimport.go +++ b/src/cmd/compile/internal/gc/iimport.go @@ -1043,7 +1043,9 @@ func (r *importReader) node() *Node { // unreachable - not emitted by exporter case OGOTO, OLABEL: - return nodl(r.pos(), op, newname(r.expr().Sym), nil) + n := nodl(r.pos(), op, nil, nil) + n.Sym = lookup(r.string()) + return n case OEND: return nil diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index ae37c956a2520..0b91d4918844c 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -1072,7 +1072,7 @@ func mkinlcall(n, fn *Node, maxCost int32) *Node { body := subst.list(asNodes(fn.Func.Inl.Body)) - lab := nod(OLABEL, retlabel, nil) + lab := nodSym(OLABEL, nil, retlabel) body = append(body, lab) typecheckslice(body, Etop) @@ -1158,7 +1158,7 @@ func argvar(t *types.Type, i int) *Node { // function call. type inlsubst struct { // Target of the goto substituted in place of a return. - retlabel *Node + retlabel *types.Sym // Temporary result variables. retvars []*Node @@ -1218,7 +1218,7 @@ func (subst *inlsubst) node(n *Node) *Node { // dump("Return before substitution", n); case ORETURN: - m := nod(OGOTO, subst.retlabel, nil) + m := nodSym(OGOTO, nil, subst.retlabel) m.Ninit.Set(subst.list(n.Ninit)) if len(subst.retvars) != 0 && n.List.Len() != 0 { @@ -1245,8 +1245,8 @@ func (subst *inlsubst) node(n *Node) *Node { m := n.copy() m.Pos = subst.updatedPos(m.Pos) m.Ninit.Set(nil) - p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen) - m.Left = newname(lookup(p)) + p := fmt.Sprintf("%s·%d", n.Sym.Name, inlgen) + m.Sym = lookup(p) return m } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 68aac8b99ece9..339e8e08cda17 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -205,7 +205,6 @@ func Main(archInit func(*Arch)) { flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", true, "add location lists to DWARF in optimized mode") flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records") objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e']) - objabi.Flagcount("f", "debug stack frames", &Debug['f']) objabi.Flagcount("h", "halt on error", &Debug['h']) objabi.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap) objabi.Flagfn1("importcfg", "read import configuration from `file`", readImportCfg) @@ -478,9 +477,6 @@ func Main(archInit func(*Arch)) { finishUniverse() typecheckok = true - if Debug['f'] != 0 { - frame(1) - } // Process top-level declarations in phases. @@ -693,10 +689,14 @@ func Main(archInit func(*Arch)) { // Check whether any of the functions we have compiled have gigantic stack frames. obj.SortSlice(largeStackFrames, func(i, j int) bool { - return largeStackFrames[i].Before(largeStackFrames[j]) + return largeStackFrames[i].pos.Before(largeStackFrames[j].pos) }) - for _, largePos := range largeStackFrames { - yyerrorl(largePos, "stack frame too large (>1GB)") + for _, large := range largeStackFrames { + if large.callee != 0 { + yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20) + } else { + yyerrorl(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20) + } } if len(compilequeue) != 0 { diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 8a42fcefd1f2e..ca65c7ccca03f 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -819,13 +819,18 @@ func (p *noder) packname(expr syntax.Expr) *types.Sym { return name case *syntax.SelectorExpr: name := p.name(expr.X.(*syntax.Name)) + def := asNode(name.Def) + if def == nil { + yyerror("undefined: %v", name) + return name + } var pkg *types.Pkg - if asNode(name.Def) == nil || asNode(name.Def).Op != OPACK { + if def.Op != OPACK { yyerror("%v is not a package", name) pkg = localpkg } else { - asNode(name.Def).Name.SetUsed(true) - pkg = asNode(name.Def).Name.Pkg + def.Name.SetUsed(true) + pkg = def.Name.Pkg } return restrictlookup(expr.Sel.Value, pkg) } @@ -936,7 +941,7 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { } n := p.nod(stmt, op, nil, nil) if stmt.Label != nil { - n.Left = p.newname(stmt.Label) + n.Sym = p.name(stmt.Label) } return n case *syntax.CallStmt: @@ -1200,7 +1205,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []* } func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node { - lhs := p.nod(label, OLABEL, p.newname(label.Label), nil) + lhs := p.nodSym(label, OLABEL, nil, p.name(label.Label)) var ls *Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index e6bbf0440058d..01dacb783bf86 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -279,7 +279,7 @@ func compileSSA(fn *Node, worker int) { // Note: check arg size to fix issue 25507. if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type.ArgWidth() >= maxStackSize { largeStackFramesMu.Lock() - largeStackFrames = append(largeStackFrames, fn.Pos) + largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type.ArgWidth(), pos: fn.Pos}) largeStackFramesMu.Unlock() return } @@ -294,7 +294,8 @@ func compileSSA(fn *Node, worker int) { // the assembler may emit inscrutable complaints about invalid instructions. if pp.Text.To.Offset >= maxStackSize { largeStackFramesMu.Lock() - largeStackFrames = append(largeStackFrames, fn.Pos) + locals := f.Frontend().(*ssafn).stksize + largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos}) largeStackFramesMu.Unlock() return } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 303658a3e118b..549038e7d1445 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -845,7 +845,7 @@ func (s *state) stmt(n *Node) { } case OLABEL: - sym := n.Left.Sym + sym := n.Sym lab := s.label(sym) // Associate label with its control flow node, if any @@ -867,7 +867,7 @@ func (s *state) stmt(n *Node) { s.startBlock(lab.target) case OGOTO: - sym := n.Left.Sym + sym := n.Sym lab := s.label(sym) if lab.target == nil { @@ -1033,7 +1033,7 @@ func (s *state) stmt(n *Node) { case OCONTINUE, OBREAK: var to *ssa.Block - if n.Left == nil { + if n.Sym == nil { // plain break/continue switch n.Op { case OCONTINUE: @@ -1043,7 +1043,7 @@ func (s *state) stmt(n *Node) { } } else { // labeled break/continue; look up the target - sym := n.Left.Sym + sym := n.Sym lab := s.label(sym) switch n.Op { case OCONTINUE: @@ -3474,12 +3474,26 @@ func init() { addF("math/bits", "OnesCount", makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32), sys.AMD64) - alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64) addF("math/bits", "Mul64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1]) }, sys.AMD64, sys.ARM64, sys.PPC64) + alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64) + + addF("math/bits", "Add64", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) + }, + sys.AMD64) + alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64) + + addF("math/bits", "Sub64", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2]) + }, + sys.AMD64) + alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64) /******** sync/atomic ********/ @@ -5225,9 +5239,6 @@ func genssa(f *ssa.Func, pp *Progs) { } defframe(&s, e) - if Debug['f'] != 0 { - frame(0) - } f.HTMLWriter.Close() f.HTMLWriter = nil diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 8e643e6690a5b..df3bde86eace5 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -28,9 +28,17 @@ type Error struct { var errors []Error +// largeStack is info about a function whose stack frame is too large (rare). +type largeStack struct { + locals int64 + args int64 + callee int64 + pos src.XPos +} + var ( largeStackFramesMu sync.Mutex // protects largeStackFrames - largeStackFrames []src.XPos // positions of functions whose stack frames are too large (rare) + largeStackFrames []largeStack ) func errorexit() { @@ -234,7 +242,7 @@ func lookupN(prefix string, n int) *types.Sym { // to help with debugging. // It should begin with "." to avoid conflicts with // user labels. -func autolabel(prefix string) *Node { +func autolabel(prefix string) *types.Sym { if prefix[0] != '.' { Fatalf("autolabel prefix must start with '.', have %q", prefix) } @@ -244,7 +252,7 @@ func autolabel(prefix string) *Node { } n := fn.Func.Label fn.Func.Label++ - return newname(lookupN(prefix, int(n))) + return lookupN(prefix, int(n)) } func restrictlookup(name string, pkg *types.Pkg) *types.Sym { @@ -954,36 +962,6 @@ func typehash(t *types.Type) uint32 { return binary.LittleEndian.Uint32(h[:4]) } -func frame(context int) { - if context != 0 { - fmt.Printf("--- external frame ---\n") - for _, n := range externdcl { - printframenode(n) - } - return - } - - if Curfn != nil { - fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym) - for _, ln := range Curfn.Func.Dcl { - printframenode(ln) - } - } -} - -func printframenode(n *Node) { - w := int64(-1) - if n.Type != nil { - w = n.Type.Width - } - switch n.Op { - case ONAME: - fmt.Printf("%v %v G%d %v width=%d\n", n.Op, n.Sym, n.Name.Vargen, n.Type, w) - case OTYPE: - fmt.Printf("%v %v width=%d\n", n.Op, n.Type, w) - } -} - // updateHasCall checks whether expression n contains any function // calls and sets the n.HasCall flag if so. func updateHasCall(n *Node) { diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 965c54566085d..f1c153937fba5 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -421,7 +421,8 @@ func casebody(sw *Node, typeswvar *Node) { n.Op = OCASE needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL - jmp := nod(OGOTO, autolabel(".s"), nil) + lbl := autolabel(".s") + jmp := nodSym(OGOTO, nil, lbl) switch n.List.Len() { case 0: // default @@ -486,7 +487,7 @@ func casebody(sw *Node, typeswvar *Node) { } } - stat = append(stat, nod(OLABEL, jmp.Left, nil)) + stat = append(stat, nodSym(OLABEL, nil, lbl)) if typeswvar != nil && needvar && n.Rlist.Len() != 0 { l := []*Node{ nod(ODCL, n.Rlist.First(), nil), @@ -778,10 +779,10 @@ func (s *typeSwitch) walk(sw *Node) { } else { // Jump to default case. lbl := autolabel(".s") - i.Nbody.Set1(nod(OGOTO, lbl, nil)) + i.Nbody.Set1(nodSym(OGOTO, nil, lbl)) // Wrap default case with label. blk := nod(OBLOCK, nil, nil) - blk.List.Set2(nod(OLABEL, lbl, nil), def) + blk.List.Set2(nodSym(OLABEL, nil, lbl), def) def = blk } i.Left = typecheck(i.Left, Erv) diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 9ea727fa64be5..87b6d036c5e58 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -574,7 +574,7 @@ const ( OXXX Op = iota // names - ONAME // var, const or func name + ONAME // var or func name ONONAME // unnamed arg or return value: f(int, string) (int, error) { etc } OTYPE // type name OPACK // import @@ -698,10 +698,10 @@ const ( // statements OBLOCK // { List } (block of code) - OBREAK // break + OBREAK // break [Sym] OCASE // case Left or List[0]..List[1]: Nbody (select case after processing; Left==nil and List==nil means default) OXCASE // case List: Nbody (select case before processing; List==nil means default) - OCONTINUE // continue + OCONTINUE // continue [Sym] ODEFER // defer Left (Left must be call) OEMPTY // no-op (empty statement) OFALL // fallthrough @@ -716,9 +716,9 @@ const ( // } // OFORUNTIL is created by walk. There's no way to write this in Go code. OFORUNTIL - OGOTO // goto Left + OGOTO // goto Sym OIF // if Ninit; Left { Nbody } else { Rlist } - OLABEL // Left: + OLABEL // Sym: OPROC // go Left (Left must be call) ORANGE // for List = range Right { Nbody } ORETURN // return List diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 0bbd89f05e4a1..cf26d84521904 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1984,7 +1984,7 @@ func typecheck1(n *Node, top int) *Node { case OLABEL: ok |= Etop decldepth++ - if n.Left.Sym.IsBlank() { + if n.Sym.IsBlank() { // Empty identifier is valid but useless. // Eliminate now to simplify life later. // See issues 7538, 11589, 11593. @@ -2443,7 +2443,7 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field { } if f1 != nil { - if dostrcmp > 1 { + if dostrcmp > 1 || f1.Broke() { // Already in the process of diagnosing an error. return f1 } @@ -3831,12 +3831,12 @@ func markbreak(n *Node, implicit *Node) { switch n.Op { case OBREAK: - if n.Left == nil { + if n.Sym == nil { if implicit != nil { implicit.SetHasBreak(true) } } else { - lab := asNode(n.Left.Sym.Label) + lab := asNode(n.Sym.Label) if lab != nil { lab.SetHasBreak(true) } @@ -3864,9 +3864,9 @@ func markbreaklist(l Nodes, implicit *Node) { if n.Op == OLABEL && i+1 < len(s) && n.Name.Defn == s[i+1] { switch n.Name.Defn.Op { case OFOR, OFORUNTIL, OSWITCH, OTYPESW, OSELECT, ORANGE: - n.Left.Sym.Label = asTypesNode(n.Name.Defn) + n.Sym.Label = asTypesNode(n.Name.Defn) markbreak(n.Name.Defn, n.Name.Defn) - n.Left.Sym.Label = nil + n.Sym.Label = nil i++ continue } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 9ec6f8286ef97..6c1110a2945dd 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1772,10 +1772,22 @@ func walkCall(n *Node, init *Nodes) { var tempAssigns []*Node for i, arg := range args { updateHasCall(arg) - if instrumenting || arg.HasCall() { + // Determine param type. + var t *types.Type + if n.Op == OCALLMETH { + if i == 0 { + t = n.Left.Type.Recv().Type + } else { + t = params.Field(i - 1).Type + } + } else { + t = params.Field(i).Type + } + if instrumenting || fncall(arg, t) { // make assignment of fncall to tempname - tmp := temp(arg.Type) + tmp := temp(t) a := nod(OAS, tmp, arg) + a = convas(a, init) tempAssigns = append(tempAssigns, a) // replace arg with temp args[i] = tmp diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index e1680ec37ce56..2ee0b2a928889 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -771,14 +771,14 @@ (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) -> (MOVSDstoreidx8 [int64(int32(c+8*d))] {sym} ptr idx val mem) // Merge load/store to op -((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem) -((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) && canMergeLoad(v, l, x) && clobber(l) -> +((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem) +((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|AND|OR|XOR|SUB|MUL)Lloadidx4 x [off] {sym} ptr idx mem) ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> ((ADD|SUB|MUL|AND|OR|XOR)Lloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem) -((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) @@ -1217,8 +1217,8 @@ (MOVSSconst [c]) && config.ctxt.Flag_shared -> (MOVSSconst2 (MOVSSconst1 [c])) (MOVSDconst [c]) && config.ctxt.Flag_shared -> (MOVSDconst2 (MOVSDconst1 [c])) -(CMP(L|W|B) l:(MOV(L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (CMP(L|W|B)load {sym} [off] ptr x mem) -(CMP(L|W|B) x l:(MOV(L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (InvertFlags (CMP(L|W|B)load {sym} [off] ptr x mem)) +(CMP(L|W|B) l:(MOV(L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) -> (CMP(L|W|B)load {sym} [off] ptr x mem) +(CMP(L|W|B) x l:(MOV(L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) -> (InvertFlags (CMP(L|W|B)load {sym} [off] ptr x mem)) (CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c]) && l.Uses == 1 diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 86f7d921e4e0d..3a40d98495437 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -29,6 +29,28 @@ (Div8u x y) -> (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) (Div(32|64)F x y) -> (DIVS(S|D) x y) +(Select0 (Add64carry x y c)) -> + (Select0 (ADCQ x y (Select1 (NEGLflags c)))) +(Select1 (Add64carry x y c)) -> + (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) +(Select0 (Sub64borrow x y c)) -> + (Select0 (SBBQ x y (Select1 (NEGLflags c)))) +(Select1 (Sub64borrow x y c)) -> + (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) + +// Optimize ADCQ and friends +(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) -> (ADCQconst x [c] carry) +(ADCQ x y (FlagEQ)) -> (ADDQcarry x y) +(ADCQconst x [c] (FlagEQ)) -> (ADDQconstcarry x [c]) +(ADDQcarry x (MOVQconst [c])) && is32Bit(c) -> (ADDQconstcarry x [c]) +(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) -> (SBBQconst x [c] borrow) +(SBBQ x y (FlagEQ)) -> (SUBQborrow x y) +(SBBQconst x [c] (FlagEQ)) -> (SUBQconstborrow x [c]) +(SUBQborrow x (MOVQconst [c])) && is32Bit(c) -> (SUBQconstborrow x [c]) +(Select1 (NEGLflags (MOVQconst [0]))) -> (FlagEQ) +(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) -> x + + (Mul64uhilo x y) -> (MULQU2 x y) (Div128u xhi xlo y) -> (DIVQU2 xhi xlo y) @@ -2340,10 +2362,10 @@ // Merge load and op // TODO: add indexed variants? -((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem) -((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) -((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem) +((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) (MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) (MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) -> ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem) @@ -2492,8 +2514,8 @@ // Fold loads into compares // Note: these may be undone by the flagalloc pass. -(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (CMP(Q|L|W|B)load {sym} [off] ptr x mem) -(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) +(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) -> (CMP(Q|L|W|B)load {sym} [off] ptr x mem) +(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) -> (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) (CMP(Q|L|W|B)const l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) [c]) && l.Uses == 1 diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 29f208f0d032b..bd1339b43ab66 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -107,16 +107,18 @@ func init() { // Common regInfo var ( - gp01 = regInfo{inputs: nil, outputs: gponly} - gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} - gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} - gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} - gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} - gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} - gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} - gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} - gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}} - gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} + gp01 = regInfo{inputs: nil, outputs: gponly} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} + gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} + gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} + gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}} + gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} + gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}} gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} gp1flags = regInfo{inputs: []regMask{gpsp}} @@ -124,7 +126,8 @@ func init() { gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} flagsgp = regInfo{inputs: nil, outputs: gponly} - gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} + gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} + gp1flags1flags = regInfo{inputs: []regMask{gp, 0}, outputs: []regMask{gp, 0}} readflags = regInfo{inputs: nil, outputs: gponly} flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}} @@ -229,6 +232,21 @@ func init() { {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] + {name: "NEGLflags", argLength: 1, reg: gp11flags, typ: "(UInt32,Flags)", asm: "NEGL", resultInArg0: true}, // -arg0, flags set for 0-arg0. + // The following 4 add opcodes return the low 64 bits of the sum in the first result and + // the carry (the 65th bit) in the carry flag. + {name: "ADDQcarry", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDQ", commutative: true, resultInArg0: true}, // r = arg0+arg1 + {name: "ADCQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", commutative: true, resultInArg0: true}, // r = arg0+arg1+carry(arg2) + {name: "ADDQconstcarry", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint + {name: "ADCQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint+carry(arg1) + + // The following 4 add opcodes return the low 64 bits of the difference in the first result and + // the borrow (if the result is negative) in the carry flag. + {name: "SUBQborrow", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBQ", resultInArg0: true}, // r = arg0-arg1 + {name: "SBBQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", resultInArg0: true}, // r = arg0-(arg1+carry(arg2)) + {name: "SUBQconstborrow", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "SUBQ", aux: "Int32", resultInArg0: true}, // r = arg0-auxint + {name: "SBBQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", aux: "Int32", resultInArg0: true}, // r = arg0-(auxint+carry(arg1)) + {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo) {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index 47766fa77dd85..de2c09c2d1de2 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -1125,71 +1125,71 @@ // Exclude global data (SB) because these instructions cannot handle relative addresses. // TODO(mundaym): use LARL in the assembler to handle SB? // TODO(mundaym): indexed versions of these? -(ADD x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ADD x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ADDload [off] {sym} x ptr mem) -(ADD g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ADD g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ADDload [off] {sym} x ptr mem) -(ADDW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ADDW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ADDWload [off] {sym} x ptr mem) -(ADDW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ADDW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ADDWload [off] {sym} x ptr mem) -(ADDW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ADDW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ADDWload [off] {sym} x ptr mem) -(ADDW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ADDW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ADDWload [off] {sym} x ptr mem) -(MULLD x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(MULLD x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (MULLDload [off] {sym} x ptr mem) -(MULLD g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(MULLD g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (MULLDload [off] {sym} x ptr mem) -(MULLW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(MULLW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (MULLWload [off] {sym} x ptr mem) -(MULLW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(MULLW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (MULLWload [off] {sym} x ptr mem) -(MULLW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(MULLW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (MULLWload [off] {sym} x ptr mem) -(MULLW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(MULLW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (MULLWload [off] {sym} x ptr mem) -(SUB x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(SUB x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (SUBload [off] {sym} x ptr mem) -(SUBW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(SUBW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (SUBWload [off] {sym} x ptr mem) -(SUBW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(SUBW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (SUBWload [off] {sym} x ptr mem) -(AND x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(AND x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ANDload [off] {sym} x ptr mem) -(AND g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(AND g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ANDload [off] {sym} x ptr mem) -(ANDW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ANDW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ANDWload [off] {sym} x ptr mem) -(ANDW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ANDW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ANDWload [off] {sym} x ptr mem) -(ANDW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ANDW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ANDWload [off] {sym} x ptr mem) -(ANDW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ANDW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ANDWload [off] {sym} x ptr mem) -(OR x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(OR x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ORload [off] {sym} x ptr mem) -(OR g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(OR g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ORload [off] {sym} x ptr mem) -(ORW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ORW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ORWload [off] {sym} x ptr mem) -(ORW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ORW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ORWload [off] {sym} x ptr mem) -(ORW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ORW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ORWload [off] {sym} x ptr mem) -(ORW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(ORW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (ORWload [off] {sym} x ptr mem) -(XOR x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(XOR x g:(MOVDload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (XORload [off] {sym} x ptr mem) -(XOR g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(XOR g:(MOVDload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (XORload [off] {sym} x ptr mem) -(XORW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(XORW x g:(MOVWload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (XORWload [off] {sym} x ptr mem) -(XORW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(XORW g:(MOVWload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (XORWload [off] {sym} x ptr mem) -(XORW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(XORW x g:(MOVWZload [off] {sym} ptr mem)) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (XORWload [off] {sym} x ptr mem) -(XORW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) +(XORW g:(MOVWZload [off] {sym} ptr mem) x) && ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) -> (XORWload [off] {sym} x ptr mem) // Combine constant stores into larger (unaligned) stores. diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 7ff6da1b01f26..522ccbf893f52 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -491,6 +491,9 @@ var genericOps = []opData{ {name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry) {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1) + {name: "Add64carry", argLength: 3, commutative: true, typ: "(UInt64,UInt64)"}, // arg0 + arg1 + arg2, arg2 must be 0 or 1. returns (value, value>>64) + {name: "Sub64borrow", argLength: 3, typ: "(UInt64,UInt64)"}, // arg0 - (arg1 + arg2), arg2 must be 0 or 1. returns (value, value>>64&1) + {name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0 {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0 {name: "Slicemask", argLength: 1}, // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size. diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index de52523afd3c7..faaad974c4a39 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -160,10 +160,12 @@ func genRules(arch arch) { fmt.Fprintln(w, "// generated with: cd gen; go run *.go") fmt.Fprintln(w) fmt.Fprintln(w, "package ssa") + fmt.Fprintln(w, "import \"fmt\"") fmt.Fprintln(w, "import \"math\"") fmt.Fprintln(w, "import \"cmd/internal/obj\"") fmt.Fprintln(w, "import \"cmd/internal/objabi\"") fmt.Fprintln(w, "import \"cmd/compile/internal/types\"") + fmt.Fprintln(w, "var _ = fmt.Println // in case not otherwise used") fmt.Fprintln(w, "var _ = math.MinInt8 // in case not otherwise used") fmt.Fprintln(w, "var _ = obj.ANOP // in case not otherwise used") fmt.Fprintln(w, "var _ = objabi.GOROOT // in case not otherwise used") diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index 499fda5af56c3..d76d7c7b33465 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -50,10 +50,14 @@ body { font-family: Arial, sans-serif; } +h1 { + font-size: 18px; + display: inline-block; + margin: 0 1em .5em 0; +} + #helplink { - margin-bottom: 15px; - display: block; - margin-top: -15px; + display: inline-block; } #help { diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 1435caf26a6ab..03837b5f63e82 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -523,6 +523,15 @@ const ( OpAMD64DIVQU OpAMD64DIVLU OpAMD64DIVWU + OpAMD64NEGLflags + OpAMD64ADDQcarry + OpAMD64ADCQ + OpAMD64ADDQconstcarry + OpAMD64ADCQconst + OpAMD64SUBQborrow + OpAMD64SBBQ + OpAMD64SUBQconstborrow + OpAMD64SBBQconst OpAMD64MULQU2 OpAMD64DIVQU2 OpAMD64ANDQ @@ -2393,6 +2402,8 @@ const ( OpAdd32withcarry OpSub32carry OpSub32withcarry + OpAdd64carry + OpSub64borrow OpSignmask OpZeromask OpSlicemask @@ -6540,6 +6551,151 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "NEGLflags", + argLen: 1, + resultInArg0: true, + asm: x86.ANEGL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADDQcarry", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADCQ", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: x86.AADCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADDQconstcarry", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADCQconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: x86.AADCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SUBQborrow", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SBBQ", + argLen: 3, + resultInArg0: true, + asm: x86.ASBBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SUBQconstborrow", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SBBQconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: x86.ASBBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "MULQU2", argLen: 2, @@ -29629,6 +29785,17 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Add64carry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub64borrow", + argLen: 3, + generic: true, + }, { name: "Signmask", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index ae6af1c269603..13a2da9a2f02d 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -177,23 +177,11 @@ func canMergeSym(x, y interface{}) bool { return x == nil || y == nil } -// canMergeLoad reports whether the load can be merged into target without +// canMergeLoadClobber reports whether the load can be merged into target without // invalidating the schedule. // It also checks that the other non-load argument x is something we -// are ok with clobbering (all our current load+op instructions clobber -// their input register). -func canMergeLoad(target, load, x *Value) bool { - if target.Block.ID != load.Block.ID { - // If the load is in a different block do not merge it. - return false - } - - // We can't merge the load into the target if the load - // has more than one use. - if load.Uses != 1 { - return false - } - +// are ok with clobbering. +func canMergeLoadClobber(target, load, x *Value) bool { // The register containing x is going to get clobbered. // Don't merge if we still need the value of x. // We don't have liveness information here, but we can @@ -208,6 +196,22 @@ func canMergeLoad(target, load, x *Value) bool { if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) { return false } + return canMergeLoad(target, load) +} + +// canMergeLoad reports whether the load can be merged into target without +// invalidating the schedule. +func canMergeLoad(target, load *Value) bool { + if target.Block.ID != load.Block.ID { + // If the load is in a different block do not merge it. + return false + } + + // We can't merge the load into the target if the load + // has more than one use. + if load.Uses != 1 { + return false + } mem := load.MemoryArg() diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 70aa51f3d12fc..b6fb6c8b97fec 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -1297,7 +1299,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { return true } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1311,7 +1313,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ADDLload) @@ -1323,7 +1325,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { return true } // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1337,7 +1339,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ADDLload) @@ -1349,7 +1351,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { return true } // match: (ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -1364,7 +1366,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { ptr := l.Args[0] idx := l.Args[1] mem := l.Args[2] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ADDLloadidx4) @@ -1377,7 +1379,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { return true } // match: (ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -1392,7 +1394,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { idx := l.Args[1] mem := l.Args[2] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ADDLloadidx4) @@ -2163,7 +2165,7 @@ func rewriteValue386_Op386ADDSD_0(v *Value) bool { config := b.Func.Config _ = config // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2177,7 +2179,7 @@ func rewriteValue386_Op386ADDSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386ADDSDload) @@ -2189,7 +2191,7 @@ func rewriteValue386_Op386ADDSD_0(v *Value) bool { return true } // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2203,7 +2205,7 @@ func rewriteValue386_Op386ADDSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386ADDSDload) @@ -2282,7 +2284,7 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { config := b.Func.Config _ = config // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2296,7 +2298,7 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386ADDSSload) @@ -2308,7 +2310,7 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { return true } // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2322,7 +2324,7 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386ADDSSload) @@ -2429,7 +2431,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { return true } // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2443,7 +2445,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ANDLload) @@ -2455,7 +2457,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { return true } // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2469,7 +2471,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ANDLload) @@ -2481,7 +2483,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { return true } // match: (ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -2496,7 +2498,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { ptr := l.Args[0] idx := l.Args[1] mem := l.Args[2] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ANDLloadidx4) @@ -2509,7 +2511,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { return true } // match: (ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -2524,7 +2526,7 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { idx := l.Args[1] mem := l.Args[2] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ANDLloadidx4) @@ -3151,7 +3153,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { return true } // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPBload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -3165,7 +3167,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(Op386CMPBload) @@ -3177,7 +3179,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { return true } // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -3191,7 +3193,7 @@ func rewriteValue386_Op386CMPB_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(Op386InvertFlags) @@ -3455,7 +3457,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { return true } // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPLload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -3469,7 +3471,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(Op386CMPLload) @@ -3481,7 +3483,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { return true } // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -3495,7 +3497,7 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(Op386InvertFlags) @@ -3778,7 +3780,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { return true } // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPWload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -3792,7 +3794,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(Op386CMPWload) @@ -3804,7 +3806,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { return true } // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -3818,7 +3820,7 @@ func rewriteValue386_Op386CMPW_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(Op386InvertFlags) @@ -4050,7 +4052,7 @@ func rewriteValue386_Op386DIVSD_0(v *Value) bool { config := b.Func.Config _ = config // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (DIVSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -4064,7 +4066,7 @@ func rewriteValue386_Op386DIVSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386DIVSDload) @@ -4143,7 +4145,7 @@ func rewriteValue386_Op386DIVSS_0(v *Value) bool { config := b.Func.Config _ = config // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (DIVSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -4157,7 +4159,7 @@ func rewriteValue386_Op386DIVSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386DIVSSload) @@ -12773,7 +12775,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { return true } // match: (MULL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -12787,7 +12789,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386MULLload) @@ -12799,7 +12801,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { return true } // match: (MULL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -12813,7 +12815,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386MULLload) @@ -12825,7 +12827,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { return true } // match: (MULL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -12840,7 +12842,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { ptr := l.Args[0] idx := l.Args[1] mem := l.Args[2] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386MULLloadidx4) @@ -12853,7 +12855,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { return true } // match: (MULL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -12868,7 +12870,7 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { idx := l.Args[1] mem := l.Args[2] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386MULLloadidx4) @@ -13575,7 +13577,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { config := b.Func.Config _ = config // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -13589,7 +13591,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSDload) @@ -13601,7 +13603,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { return true } // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -13615,7 +13617,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSDload) @@ -13694,7 +13696,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { config := b.Func.Config _ = config // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -13708,7 +13710,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSSload) @@ -13720,7 +13722,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { return true } // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -13734,7 +13736,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSSload) @@ -14039,7 +14041,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { return true } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -14053,7 +14055,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ORLload) @@ -14065,7 +14067,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { return true } // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -14079,7 +14081,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ORLload) @@ -14098,7 +14100,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -14113,7 +14115,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { ptr := l.Args[0] idx := l.Args[1] mem := l.Args[2] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ORLloadidx4) @@ -14126,7 +14128,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { return true } // match: (ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -14141,7 +14143,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { idx := l.Args[1] mem := l.Args[2] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ORLloadidx4) @@ -19618,7 +19620,7 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { return true } // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -19632,7 +19634,7 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386SUBLload) @@ -19644,7 +19646,7 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { return true } // match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -19659,7 +19661,7 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { ptr := l.Args[0] idx := l.Args[1] mem := l.Args[2] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386SUBLloadidx4) @@ -20098,7 +20100,7 @@ func rewriteValue386_Op386SUBSD_0(v *Value) bool { config := b.Func.Config _ = config // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (SUBSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -20112,7 +20114,7 @@ func rewriteValue386_Op386SUBSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386SUBSDload) @@ -20191,7 +20193,7 @@ func rewriteValue386_Op386SUBSS_0(v *Value) bool { config := b.Func.Config _ = config // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (SUBSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -20205,7 +20207,7 @@ func rewriteValue386_Op386SUBSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386SUBSSload) @@ -20478,7 +20480,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { return true } // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -20492,7 +20494,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386XORLload) @@ -20504,7 +20506,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { return true } // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -20518,7 +20520,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386XORLload) @@ -20533,7 +20535,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { } func rewriteValue386_Op386XORL_10(v *Value) bool { // match: (XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -20548,7 +20550,7 @@ func rewriteValue386_Op386XORL_10(v *Value) bool { ptr := l.Args[0] idx := l.Args[1] mem := l.Args[2] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386XORLloadidx4) @@ -20561,7 +20563,7 @@ func rewriteValue386_Op386XORL_10(v *Value) bool { return true } // match: (XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLloadidx4 x [off] {sym} ptr idx mem) for { _ = v.Args[1] @@ -20576,7 +20578,7 @@ func rewriteValue386_Op386XORL_10(v *Value) bool { idx := l.Args[1] mem := l.Args[2] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386XORLloadidx4) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 09d17e00c85b4..3ac860c1a2c94 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -15,6 +17,10 @@ var _ = types.TypeMem // in case not otherwise used func rewriteValueAMD64(v *Value) bool { switch v.Op { + case OpAMD64ADCQ: + return rewriteValueAMD64_OpAMD64ADCQ_0(v) + case OpAMD64ADCQconst: + return rewriteValueAMD64_OpAMD64ADCQconst_0(v) case OpAMD64ADDL: return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v) case OpAMD64ADDLconst: @@ -27,6 +33,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64ADDLmodify_0(v) case OpAMD64ADDQ: return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) + case OpAMD64ADDQcarry: + return rewriteValueAMD64_OpAMD64ADDQcarry_0(v) case OpAMD64ADDQconst: return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v) case OpAMD64ADDQconstmodify: @@ -451,8 +459,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SARWconst_0(v) case OpAMD64SBBLcarrymask: return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) + case OpAMD64SBBQ: + return rewriteValueAMD64_OpAMD64SBBQ_0(v) case OpAMD64SBBQcarrymask: return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) + case OpAMD64SBBQconst: + return rewriteValueAMD64_OpAMD64SBBQconst_0(v) case OpAMD64SETA: return rewriteValueAMD64_OpAMD64SETA_0(v) case OpAMD64SETAE: @@ -527,6 +539,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SUBLmodify_0(v) case OpAMD64SUBQ: return rewriteValueAMD64_OpAMD64SUBQ_0(v) + case OpAMD64SUBQborrow: + return rewriteValueAMD64_OpAMD64SUBQborrow_0(v) case OpAMD64SUBQconst: return rewriteValueAMD64_OpAMD64SUBQconst_0(v) case OpAMD64SUBQload: @@ -1142,6 +1156,86 @@ func rewriteValueAMD64(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { + // match: (ADCQ x (MOVQconst [c]) carry) + // cond: is32Bit(c) + // result: (ADCQconst x [c] carry) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + carry := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADCQconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(carry) + return true + } + // match: (ADCQ (MOVQconst [c]) x carry) + // cond: is32Bit(c) + // result: (ADCQconst x [c] carry) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + carry := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADCQconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(carry) + return true + } + // match: (ADCQ x y (FlagEQ)) + // cond: + // result: (ADDQcarry x y) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64ADDQcarry) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool { + // match: (ADCQconst x [c] (FlagEQ)) + // cond: + // result: (ADDQconstcarry x [c]) + for { + c := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (MOVLconst [c])) // cond: @@ -1696,7 +1790,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { return true } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1710,7 +1804,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDLload) @@ -1722,7 +1816,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { return true } // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1736,7 +1830,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDLload) @@ -2614,7 +2708,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { return true } // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2628,7 +2722,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDQload) @@ -2640,7 +2734,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { return true } // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2654,7 +2748,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDQload) @@ -2667,6 +2761,47 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { + // match: (ADDQcarry x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ADDQconstcarry x [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDQcarry (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (ADDQconstcarry x [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // match: (ADDQconst [c] (ADDQ x y)) // cond: @@ -3088,7 +3223,7 @@ func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3102,7 +3237,7 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSDload) @@ -3114,7 +3249,7 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { return true } // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3128,7 +3263,7 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSDload) @@ -3234,7 +3369,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3248,7 +3383,7 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSSload) @@ -3260,7 +3395,7 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { return true } // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3274,7 +3409,7 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSSload) @@ -3530,7 +3665,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { return true } // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3544,7 +3679,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ANDLload) @@ -3556,7 +3691,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { return true } // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3570,7 +3705,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ANDLload) @@ -4057,7 +4192,7 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { return true } // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -4071,7 +4206,7 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ANDQload) @@ -4083,7 +4218,7 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { return true } // match: (ANDQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -4097,7 +4232,7 @@ func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ANDQload) @@ -8697,7 +8832,7 @@ func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { return true } // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPBload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -8711,7 +8846,7 @@ func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64CMPBload) @@ -8723,7 +8858,7 @@ func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { return true } // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -8737,7 +8872,7 @@ func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64InvertFlags) @@ -9100,7 +9235,7 @@ func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { return true } // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPLload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -9114,7 +9249,7 @@ func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64CMPLload) @@ -9126,7 +9261,7 @@ func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { return true } // match: (CMPL x l:(MOVLload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPLload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -9140,7 +9275,7 @@ func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64InvertFlags) @@ -9528,7 +9663,7 @@ func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { return true } // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPQload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -9542,7 +9677,7 @@ func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64CMPQload) @@ -9554,7 +9689,7 @@ func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { return true } // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPQload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -9568,7 +9703,7 @@ func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64InvertFlags) @@ -10067,7 +10202,7 @@ func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { return true } // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (CMPWload {sym} [off] ptr x mem) for { _ = v.Args[1] @@ -10081,7 +10216,7 @@ func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64CMPWload) @@ -10093,7 +10228,7 @@ func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { return true } // match: (CMPW x l:(MOVWload {sym} [off] ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoad(v, l) && clobber(l) // result: (InvertFlags (CMPWload {sym} [off] ptr x mem)) for { _ = v.Args[1] @@ -10107,7 +10242,7 @@ func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoad(v, l) && clobber(l)) { break } v.reset(OpAMD64InvertFlags) @@ -10496,7 +10631,7 @@ func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool { // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (DIVSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10510,7 +10645,7 @@ func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64DIVSDload) @@ -10581,7 +10716,7 @@ func rewriteValueAMD64_OpAMD64DIVSDload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool { // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (DIVSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10595,7 +10730,7 @@ func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64DIVSSload) @@ -26028,7 +26163,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { } func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -26042,7 +26177,7 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSDload) @@ -26054,7 +26189,7 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { return true } // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -26068,7 +26203,7 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSDload) @@ -26174,7 +26309,7 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -26188,7 +26323,7 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSSload) @@ -26200,7 +26335,7 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { return true } // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -26214,7 +26349,7 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSSload) @@ -34947,7 +35082,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { return true } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -34961,7 +35096,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ORLload) @@ -34973,7 +35108,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { return true } // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -34987,7 +35122,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ORLload) @@ -45998,7 +46133,7 @@ func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { return true } // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -46012,7 +46147,7 @@ func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ORQload) @@ -46024,7 +46159,7 @@ func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { return true } // match: (ORQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -46038,7 +46173,7 @@ func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ORQload) @@ -47708,6 +47843,46 @@ func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool { + // match: (SBBQ x (MOVQconst [c]) borrow) + // cond: is32Bit(c) + // result: (SBBQconst x [c] borrow) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + borrow := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64SBBQconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(borrow) + return true + } + // match: (SBBQ x y (FlagEQ)) + // cond: + // result: (SUBQborrow x y) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64SUBQborrow) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { // match: (SBBQcarrymask (FlagEQ)) // cond: @@ -47771,6 +47946,25 @@ func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SBBQconst_0(v *Value) bool { + // match: (SBBQconst x [c] (FlagEQ)) + // cond: + // result: (SUBQconstborrow x [c]) + for { + c := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64SUBQconstborrow) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool { // match: (SETA (InvertFlags x)) // cond: @@ -54906,7 +55100,7 @@ func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { return true } // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -54920,7 +55114,7 @@ func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBLload) @@ -55164,7 +55358,7 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { return true } // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -55178,7 +55372,7 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBQload) @@ -55191,6 +55385,28 @@ func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SUBQborrow_0(v *Value) bool { + // match: (SUBQborrow x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (SUBQconstborrow x [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64SUBQconstborrow) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { // match: (SUBQconst [0] x) // cond: @@ -55403,7 +55619,7 @@ func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -55417,7 +55633,7 @@ func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBSDload) @@ -55523,7 +55739,7 @@ func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -55537,7 +55753,7 @@ func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64SUBSSload) @@ -56577,7 +56793,7 @@ func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { return true } // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -56591,7 +56807,7 @@ func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64XORLload) @@ -56603,7 +56819,7 @@ func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { return true } // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -56617,7 +56833,7 @@ func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64XORLload) @@ -57276,7 +57492,7 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { return true } // match: (XORQ x l:(MOVQload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -57290,7 +57506,7 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64XORQload) @@ -57305,7 +57521,7 @@ func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool { // match: (XORQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -57319,7 +57535,7 @@ func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64XORQload) @@ -64838,6 +65054,56 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Select0 (Add64carry x y c)) + // cond: + // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) + for { + v_0 := v.Args[0] + if v_0.Op != OpAdd64carry { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // cond: + // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) + for { + v_0 := v.Args[0] + if v_0.Op != OpSub64borrow { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } // match: (Select0 (AddTupleFirst32 val tuple)) // cond: // result: (ADDL val (Select0 tuple)) @@ -64923,6 +65189,104 @@ func rewriteValueAMD64_OpSelect1_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Select1 (Add64carry x y c)) + // cond: + // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) + for { + v_0 := v.Args[0] + if v_0.Op != OpAdd64carry { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // cond: + // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) + for { + v_0 := v.Args[0] + if v_0.Op != OpSub64borrow { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (NEGLflags (MOVQconst [0]))) + // cond: + // result: (FlagEQ) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64SBBQcarrymask { + break + } + x := v_0_0_0.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } // match: (Select1 (AddTupleFirst32 _ tuple)) // cond: // result: (Select1 tuple) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 4f6f61544e362..4fc7fdfbe1839 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index b9bb109b9e191..1e4b1ef0cf341 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 231949644ee8d..e513981852598 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 9cd0050e26d1a..04df5b8603d6b 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index dad036d3d26f5..abad10a2d1a93 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 95c9a0d0fc78e..dce7a52529041 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -7117,7 +7119,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { return true } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7132,7 +7134,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7145,7 +7147,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { return true } // match: (ADD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7160,7 +7162,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7176,7 +7178,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { } func rewriteValueS390X_OpS390XADD_10(v *Value) bool { // match: (ADD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7191,7 +7193,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7204,7 +7206,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { return true } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7219,7 +7221,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7353,7 +7355,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7368,7 +7370,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7381,7 +7383,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7396,7 +7398,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7409,7 +7411,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7424,7 +7426,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7437,7 +7439,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7452,7 +7454,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7468,7 +7470,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { } func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7483,7 +7485,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7496,7 +7498,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { return true } // match: (ADDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7511,7 +7513,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7524,7 +7526,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { return true } // match: (ADDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7539,7 +7541,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7552,7 +7554,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { return true } // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7567,7 +7569,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -8136,7 +8138,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8151,7 +8153,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8164,7 +8166,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8179,7 +8181,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8192,7 +8194,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8207,7 +8209,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8220,7 +8222,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8235,7 +8237,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8297,7 +8299,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8312,7 +8314,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8325,7 +8327,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8340,7 +8342,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8353,7 +8355,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8368,7 +8370,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8381,7 +8383,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8396,7 +8398,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8409,7 +8411,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8424,7 +8426,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8437,7 +8439,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8452,7 +8454,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8465,7 +8467,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8480,7 +8482,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8496,7 +8498,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { } func rewriteValueS390X_OpS390XANDW_10(v *Value) bool { // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8511,7 +8513,7 @@ func rewriteValueS390X_OpS390XANDW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -22622,7 +22624,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22637,7 +22639,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22650,7 +22652,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22665,7 +22667,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22678,7 +22680,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22693,7 +22695,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22706,7 +22708,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22721,7 +22723,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22963,7 +22965,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22978,7 +22980,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -22991,7 +22993,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23006,7 +23008,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23019,7 +23021,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23034,7 +23036,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23047,7 +23049,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23062,7 +23064,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23075,7 +23077,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23090,7 +23092,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23103,7 +23105,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23118,7 +23120,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23131,7 +23133,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23146,7 +23148,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23159,7 +23161,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23174,7 +23176,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23808,7 +23810,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23823,7 +23825,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23836,7 +23838,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23851,7 +23853,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23864,7 +23866,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23879,7 +23881,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23892,7 +23894,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23907,7 +23909,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -33181,7 +33183,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33196,7 +33198,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33209,7 +33211,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33224,7 +33226,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33237,7 +33239,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33252,7 +33254,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33265,7 +33267,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33280,7 +33282,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33293,7 +33295,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33308,7 +33310,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33328,7 +33330,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33343,7 +33345,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33356,7 +33358,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { return true } // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33371,7 +33373,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33384,7 +33386,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { return true } // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33399,7 +33401,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -40205,7 +40207,7 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { return true } // match: (SUB x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBload [off] {sym} x ptr mem) for { t := v.Type @@ -40220,7 +40222,7 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBload) @@ -40285,7 +40287,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { return true } // match: (SUBW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type @@ -40300,7 +40302,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBWload) @@ -40313,7 +40315,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { return true } // match: (SUBW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type @@ -40328,7 +40330,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBWload) @@ -40785,7 +40787,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return true } // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40800,7 +40802,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40813,7 +40815,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return true } // match: (XOR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40828,7 +40830,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40841,7 +40843,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return true } // match: (XOR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40856,7 +40858,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40872,7 +40874,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { } func rewriteValueS390X_OpS390XXOR_10(v *Value) bool { // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40887,7 +40889,7 @@ func rewriteValueS390X_OpS390XXOR_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -41002,7 +41004,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41017,7 +41019,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41030,7 +41032,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41045,7 +41047,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41058,7 +41060,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41073,7 +41075,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41086,7 +41088,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41101,7 +41103,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41114,7 +41116,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41129,7 +41131,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41145,7 +41147,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { } func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { // match: (XORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41160,7 +41162,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41173,7 +41175,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { return true } // match: (XORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41188,7 +41190,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41201,7 +41203,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { return true } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -41216,7 +41218,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index b92556db90b0f..c17ed54b3c952 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 8ca737bed1f88..e980520376f34 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 500e274206bbc..f88fce8076fb6 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index f0a1346acf808..7869fec21ffe9 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index 9e19bb85b03a7..e7ad5ac900e2c 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -13,6 +13,7 @@ const ( ScoreReadTuple ScoreVarDef ScoreMemory + ScoreReadFlags ScoreDefault ScoreFlags ScoreControl // towards bottom of block @@ -129,13 +130,19 @@ func schedule(f *Func) { // false dependency on the other part of the tuple. // Also ensures tuple is never spilled. score[v.ID] = ScoreReadTuple - case v.Type.IsFlags() || v.Type.IsTuple(): + case v.Type.IsFlags() || v.Type.IsTuple() && v.Type.FieldType(1).IsFlags(): // Schedule flag register generation as late as possible. // This makes sure that we only have one live flags // value at a time. score[v.ID] = ScoreFlags default: score[v.ID] = ScoreDefault + // If we're reading flags, schedule earlier to keep flag lifetime short. + for _, a := range v.Args { + if a.Type.IsFlags() { + score[v.ID] = ScoreReadFlags + } + } } } } diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index c0fc7adab5449..6fc0239ffe4d7 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -1,6 +1,7 @@ package ssa_test import ( + "cmd/internal/xcoff" "debug/dwarf" "debug/elf" "debug/macho" @@ -25,6 +26,10 @@ func open(path string) (*dwarf.Data, error) { return fh.DWARF() } + if fh, err := xcoff.Open(path); err == nil { + return fh.DWARF() + } + return nil, fmt.Errorf("unrecognized executable format") } diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 7866b397939e1..b0c10c85312a0 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -2499,7 +2499,7 @@ // In general, adding a new dependency may require upgrading // existing dependencies to keep a working build, and 'go get' does // this automatically. Similarly, downgrading one dependency may -// require downgrading other dependenceis, and 'go get' does +// require downgrading other dependencies, and 'go get' does // this automatically as well. // // The -m flag instructs get to stop here, after resolving, upgrading, diff --git a/src/cmd/go/internal/cache/default.go b/src/cmd/go/internal/cache/default.go index 02fc1e896f74d..4a69bf2a443c9 100644 --- a/src/cmd/go/internal/cache/default.go +++ b/src/cmd/go/internal/cache/default.go @@ -9,7 +9,6 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "sync" ) @@ -78,52 +77,19 @@ func defaultDir() (string, bool) { } // Compute default location. - // TODO(rsc): This code belongs somewhere else, - // like maybe ioutil.CacheDir or os.CacheDir. - showWarnings := true - switch runtime.GOOS { - case "windows": - dir = os.Getenv("LocalAppData") - if dir == "" { - // Fall back to %AppData%, the old name of - // %LocalAppData% on Windows XP. - dir = os.Getenv("AppData") - } - if dir == "" { - return "off", true - } - - case "darwin": - dir = os.Getenv("HOME") - if dir == "" { - return "off", true - } - dir += "/Library/Caches" - - case "plan9": - dir = os.Getenv("home") - if dir == "" { - return "off", true - } - // Plan 9 has no established per-user cache directory, - // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix. - dir += "/lib/cache" + dir, err := os.UserCacheDir() + if err != nil { + return "off", true + } + dir = filepath.Join(dir, "go-build") - default: // Unix - // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - dir = os.Getenv("XDG_CACHE_HOME") - if dir == "" { - dir = os.Getenv("HOME") - if dir == "" { - return "off", true - } - if dir == "/" { - // probably docker run with -u flag - // https://golang.org/issue/26280 - showWarnings = false - } - dir += "/.cache" - } + // Do this after filepath.Join, so that the path has been cleaned. + showWarnings := true + switch dir { + case "/.cache/go-build": + // probably docker run with -u flag + // https://golang.org/issue/26280 + showWarnings = false } - return filepath.Join(dir, "go-build"), showWarnings + return dir, showWarnings } diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index 62e74585359e7..7bd1d0b5718b8 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -45,7 +45,7 @@ func runVendor(cmd *base.Command, args []string) { vdir := filepath.Join(modload.ModRoot, "vendor") if err := os.RemoveAll(vdir); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } modpkgs := make(map[module.Version][]string) @@ -85,7 +85,7 @@ func runVendor(cmd *base.Command, args []string) { return } if err := ioutil.WriteFile(filepath.Join(vdir, "modules.txt"), buf.Bytes(), 0666); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } } @@ -172,10 +172,10 @@ func matchNonTest(info os.FileInfo) bool { func copyDir(dst, src string, match func(os.FileInfo) bool) { files, err := ioutil.ReadDir(src) if err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } if err := os.MkdirAll(dst, 0777); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } for _, file := range files { if file.IsDir() || !file.Mode().IsRegular() || !match(file) { @@ -183,18 +183,18 @@ func copyDir(dst, src string, match func(os.FileInfo) bool) { } r, err := os.Open(filepath.Join(src, file.Name())) if err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } w, err := os.Create(filepath.Join(dst, file.Name())) if err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } if _, err := io.Copy(w, r); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } r.Close() if err := w.Close(); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } } } diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 54a2b724d420d..ffc9a12f95364 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -78,7 +78,7 @@ to use newer patch releases when available. Continuing the previous example, In general, adding a new dependency may require upgrading existing dependencies to keep a working build, and 'go get' does this automatically. Similarly, downgrading one dependency may -require downgrading other dependenceis, and 'go get' does +require downgrading other dependencies, and 'go get' does this automatically as well. The -m flag instructs get to stop here, after resolving, upgrading, diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index a506c25dc7eb7..76068069087f7 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -145,34 +145,38 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { } } } - if cfg.BuildMod == "vendor" { - m.Dir = filepath.Join(ModRoot, "vendor", m.Path) - } } - complete(info) + if !fromBuildList { + complete(info) + return info + } - if fromBuildList { - if r := Replacement(m); r.Path != "" { - info.Replace = &modinfo.ModulePublic{ - Path: r.Path, - Version: r.Version, - GoVersion: info.GoVersion, - } - if r.Version == "" { - if filepath.IsAbs(r.Path) { - info.Replace.Dir = r.Path - } else { - info.Replace.Dir = filepath.Join(ModRoot, r.Path) - } - } - complete(info.Replace) - info.Dir = info.Replace.Dir - info.GoMod = filepath.Join(info.Dir, "go.mod") - info.Error = nil // ignore error loading original module version (it has been replaced) - } + r := Replacement(m) + if r.Path == "" { + complete(info) + return info } + // Don't hit the network to fill in extra data for replaced modules. + // The original resolved Version and Time don't matter enough to be + // worth the cost, and we're going to overwrite the GoMod and Dir from the + // replacement anyway. See https://golang.org/issue/27859. + info.Replace = &modinfo.ModulePublic{ + Path: r.Path, + Version: r.Version, + GoVersion: info.GoVersion, + } + if r.Version == "" { + if filepath.IsAbs(r.Path) { + info.Replace.Dir = r.Path + } else { + info.Replace.Dir = filepath.Join(ModRoot, r.Path) + } + } + complete(info.Replace) + info.Dir = info.Replace.Dir + info.GoMod = filepath.Join(info.Dir, "go.mod") return info } @@ -224,6 +228,10 @@ func findModule(target, path string) module.Version { if path == "." { return buildList[0] } + if cfg.BuildMod == "vendor" { + readVendorList() + return vendorMap[path] + } for _, mod := range buildList { if maybeInModule(path, mod.Path) { return mod diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go index 8ec9162dabcdc..aa109693f307e 100644 --- a/src/cmd/go/internal/mvs/mvs.go +++ b/src/cmd/go/internal/mvs/mvs.go @@ -68,6 +68,7 @@ func (e *MissingModuleError) Error() string { } // BuildList returns the build list for the target module. +// The first element is the target itself, with the remainder of the list sorted by path. func BuildList(target module.Version, reqs Reqs) ([]module.Version, error) { return buildList(target, reqs, nil) } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 6ae263431cc21..99a500f11ff75 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -2080,14 +2080,37 @@ func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []s } // gccld runs the gcc linker to create an executable from a set of object files. -func (b *Builder) gccld(p *load.Package, objdir, out string, flags []string, objs []string) error { +func (b *Builder) gccld(p *load.Package, objdir, outfile string, flags []string, objs []string) error { var cmd []string if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 { cmd = b.GxxCmd(p.Dir, objdir) } else { cmd = b.GccCmd(p.Dir, objdir) } - return b.run(nil, p.Dir, p.ImportPath, b.cCompilerEnv(), cmd, "-o", out, objs, flags) + + cmdargs := []interface{}{cmd, "-o", outfile, objs, flags} + dir := p.Dir + out, err := b.runOut(dir, b.cCompilerEnv(), cmdargs...) + if len(out) > 0 { + // Filter out useless linker warnings caused by bugs outside Go. + // See also cmd/link/internal/ld's hostlink method. + var save [][]byte + for _, line := range bytes.SplitAfter(out, []byte("\n")) { + // golang.org/issue/26073 - Apple Xcode bug + if bytes.Contains(line, []byte("ld: warning: text-based stub file")) { + continue + } + save = append(save, line) + } + out = bytes.Join(save, nil) + if len(out) > 0 { + b.showOutput(nil, dir, p.ImportPath, b.processOutput(out)) + if err != nil { + err = errPrintedOutput + } + } + } + return err } // Grab these before main helpfully overwrites them. diff --git a/src/cmd/go/testdata/script/mod_replace.txt b/src/cmd/go/testdata/script/mod_replace.txt index 5894ed69f3417..b9cf00c36cb5b 100644 --- a/src/cmd/go/testdata/script/mod_replace.txt +++ b/src/cmd/go/testdata/script/mod_replace.txt @@ -1,10 +1,14 @@ env GO111MODULE=on +cp go.mod go.mod.orig + +# Make sure the test builds without replacement. go build -o a1.exe . exec ./a1.exe stdout 'Don''t communicate by sharing memory' # Modules can be replaced by local packages. +cp go.mod.orig go.mod go mod edit -replace=rsc.io/quote/v3=./local/rsc.io/quote/v3 go build -o a2.exe . exec ./a2.exe @@ -12,16 +16,26 @@ stdout 'Concurrency is not parallelism.' # The module path of the replacement doesn't need to match. # (For example, it could be a long-running fork with its own import path.) +cp go.mod.orig go.mod go mod edit -replace=rsc.io/quote/v3=./local/not-rsc.io/quote/v3 go build -o a3.exe . exec ./a3.exe stdout 'Clear is better than clever.' # However, the same module can't be used as two different paths. -go mod edit -dropreplace=rsc.io/quote/v3 -replace=not-rsc.io/quote/v3@v3.0.0=rsc.io/quote/v3@v3.0.0 -require=not-rsc.io/quote/v3@v3.0.0 +cp go.mod.orig go.mod +go mod edit -replace=not-rsc.io/quote/v3@v3.0.0=rsc.io/quote/v3@v3.0.0 -require=not-rsc.io/quote/v3@v3.0.0 ! go build -o a4.exe . stderr 'rsc.io/quote/v3@v3.0.0 used for two different module paths \(not-rsc.io/quote/v3 and rsc.io/quote/v3\)' +# Modules that do not (yet) exist upstream can be replaced too. +cp go.mod.orig go.mod +go mod edit -require not-rsc.io/quote/v3@v3.0.0 -replace=not-rsc.io/quote/v3=./local/rsc.io/quote/v3 +go build -o a5.exe ./usenewmodule +! stderr 'finding not-rsc.io/quote/v3' +exec ./a5.exe +stdout 'Concurrency is not parallelism.' + -- go.mod -- module quoter @@ -39,6 +53,18 @@ func main() { fmt.Println(quote.GoV3()) } +-- usenewmodule/main.go -- +package main + +import ( + "fmt" + "not-rsc.io/quote/v3" +) + +func main() { + fmt.Println(quote.GoV3()) +} + -- local/rsc.io/quote/v3/go.mod -- module rsc.io/quote/v3 diff --git a/src/cmd/go/testdata/script/mod_tidy_replace.txt b/src/cmd/go/testdata/script/mod_tidy_replace.txt new file mode 100644 index 0000000000000..70c789afaad2a --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_replace.txt @@ -0,0 +1,71 @@ +env GO111MODULE=on + +# From inside the module, 'go list -m all' should NOT include transitive +# requirements of modules that have been replaced. +go list -m all +stdout 'rsc.io/quote/v3 v3.0.0' +! stdout 'rsc.io/sampler' +! stdout 'golang.org/x/text' + +# From outside the module, 'go list -m all' should include them. +cd outside +go list -m all +stdout 'rsc.io/quote/v3 v3.0.0' +stdout 'rsc.io/sampler v1.3.0' +stdout 'golang.org/x/text' +cd .. + +# 'go list all' should add indirect requirements to satisfy the packages +# imported from replacement modules. +! grep 'rsc.io/sampler' go.mod +! grep 'golang.org/x/text' go.mod +go list all +grep 'rsc.io/sampler' go.mod +grep 'golang.org/x/text' go.mod + +# 'go get' and 'go mod tidy' should follow the requirements of the replacements, +# not the originals, even if that results in a set of versions that are +# misleading or redundant without those replacements. +go get rsc.io/sampler@v1.2.0 +go mod tidy +go list -m all +stdout 'rsc.io/quote/v3 v3.0.0' +stdout 'rsc.io/sampler v1.2.0' +stdout 'golang.org/x/text' + +# The requirements seen from outside may be higher (or lower) +# than those seen from within the module. +grep 'rsc.io/sampler v1.2.0' go.mod +cd outside +go list -m all +stdout 'rsc.io/sampler v1.3.0' + +-- go.mod -- +module example.com/tidy + +require rsc.io/quote/v3 v3.0.0 +replace rsc.io/quote/v3 => ./not-rsc.io/quote/v3 + +-- imports.go -- +package tidy + +import _ "rsc.io/quote/v3" + +-- outside/go.mod -- +module example.com/tidy/outside + +require example.com/tidy v0.0.0 +replace example.com/tidy => ./.. + +-- not-rsc.io/quote/v3/go.mod -- +module not-rsc.io/quote/v3 + +// No requirements specified! + +-- not-rsc.io/quote/v3/quote.go -- +package quote + +import ( + _ "rsc.io/sampler" + _ "golang.org/x/text/language" +) diff --git a/src/cmd/go/testdata/script/mod_vendor.txt b/src/cmd/go/testdata/script/mod_vendor.txt index b3769a850415f..203183be881f2 100644 --- a/src/cmd/go/testdata/script/mod_vendor.txt +++ b/src/cmd/go/testdata/script/mod_vendor.txt @@ -67,6 +67,7 @@ module m require ( a v1.0.0 + diamondroot v0.0.0 mysite/myname/mypkg v1.0.0 w v1.0.0 // indirect x v1.0.0 @@ -76,6 +77,10 @@ require ( replace ( a v1.0.0 => ./a + diamondleft => ./diamondleft + diamondpoint => ./diamondpoint + diamondright => ./diamondright + diamondroot => ./diamondroot mysite/myname/mypkg v1.0.0 => ./mypkg w v1.0.0 => ./w x v1.0.0 => ./x @@ -200,6 +205,10 @@ import _ "z" package m import _ "x/x1" +-- importdiamond.go -- +package m + +import _ "diamondroot" -- w/go.mod -- module w -- w/w.go -- @@ -228,3 +237,42 @@ package y module z -- z/z.go -- package z + +-- diamondroot/go.mod -- +module diamondroot + +require ( + diamondleft v0.0.0 + diamondright v0.0.0 +) +-- diamondroot/x.go -- +package diamondroot + +import ( + _ "diamondleft" + _ "diamondright" +) +-- diamondleft/go.mod -- +module diamondleft + +require ( + diamondpoint v0.0.0 +) +-- diamondleft/x.go -- +package diamondleft + +import _ "diamondpoint" +-- diamondright/go.mod -- +module diamondright + +require ( + diamondpoint v0.0.0 +) +-- diamondright/x.go -- +package diamondright + +import _ "diamondpoint" +-- diamondpoint/go.mod -- +module diamondpoint +-- diamondpoint/x.go -- +package diamondpoint diff --git a/src/cmd/go/testdata/script/mod_vendor_replace.txt b/src/cmd/go/testdata/script/mod_vendor_replace.txt new file mode 100644 index 0000000000000..6bc1c77ed3d0d --- /dev/null +++ b/src/cmd/go/testdata/script/mod_vendor_replace.txt @@ -0,0 +1,39 @@ +env GO111MODULE=on + +# Before vendoring, we expect to see the original directory. +go list -f '{{.Version}} {{.Dir}}' -m rsc.io/quote/v3 +stdout 'v3.0.0' +stdout '.*[/\\]not-rsc.io[/\\]quote[/\\]v3' + +# Since all dependencies are replaced, 'go mod vendor' should not +# have to download anything from the network. +go mod vendor +! stderr 'downloading' +! stderr 'finding' + +# After vendoring, we expect to see the replacement in the vendor directory, +# without attempting to look up the non-replaced version. +cmp vendor/rsc.io/quote/v3/quote.go local/not-rsc.io/quote/v3/quote.go + +go list -mod=vendor -f '{{.Version}} {{.Dir}}' -m rsc.io/quote/v3 +stdout 'v3.0.0' +stdout '.*[/\\]vendor[/\\]rsc.io[/\\]quote[/\\]v3' +! stderr 'finding' +! stderr 'lookup disabled' + +-- go.mod -- +module example.com/replace + +require rsc.io/quote/v3 v3.0.0 +replace rsc.io/quote/v3 => ./local/not-rsc.io/quote/v3 + +-- imports.go -- +package replace + +import _ "rsc.io/quote/v3" + +-- local/not-rsc.io/quote/v3/go.mod -- +module not-rsc.io/quote/v3 + +-- local/not-rsc.io/quote/v3/quote.go -- +package quote diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go index 355091fedaa20..21512a80bdf26 100644 --- a/src/cmd/internal/dwarf/dwarf.go +++ b/src/cmd/internal/dwarf/dwarf.go @@ -179,7 +179,7 @@ type Context interface { AddBytes(s Sym, b []byte) AddAddress(s Sym, t interface{}, ofs int64) AddSectionOffset(s Sym, size int, t interface{}, ofs int64) - AddDWARFSectionOffset(s Sym, size int, t interface{}, ofs int64) + AddDWARFAddrSectionOffset(s Sym, t interface{}, ofs int64) CurrentOffset(s Sym) int64 RecordDclReference(from Sym, to Sym, dclIdx int, inlIndex int) RecordChildDieOffsets(s Sym, vars []*Var, offsets []int32) @@ -895,7 +895,7 @@ func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, da case DW_FORM_data4: // constant, {line,loclist,mac,rangelist}ptr if cls == DW_CLS_PTR { // DW_AT_stmt_list and DW_AT_ranges - ctxt.AddDWARFSectionOffset(s, 4, data, value) + ctxt.AddDWARFAddrSectionOffset(s, data, value) break } ctxt.AddInt(s, 4, value) @@ -932,7 +932,7 @@ func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, da if data == nil { return fmt.Errorf("dwarf: null reference in %d", abbrev) } - ctxt.AddDWARFSectionOffset(s, 4, data, value) + ctxt.AddDWARFAddrSectionOffset(s, data, value) case DW_FORM_ref1, // reference within the compilation unit DW_FORM_ref2, // reference diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index ef9ce4c688d4a..b6cfec3b3e6f5 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -459,7 +459,12 @@ func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { panic("should be used only in the linker") } -func (c dwCtxt) AddDWARFSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { +func (c dwCtxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { + size := 4 + if isDwarf64(c.Link) { + size = 8 + } + ls := s.(*LSym) rsym := t.(*LSym) ls.WriteAddr(c.Link, ls.Size, size, rsym, ofs) @@ -500,6 +505,10 @@ func (c dwCtxt) Logf(format string, args ...interface{}) { c.Link.Logf(format, args...) } +func isDwarf64(ctxt *Link) bool { + return ctxt.Headtype == objabi.Haix +} + func (ctxt *Link) dwarfSym(s *LSym) (dwarfInfoSym, dwarfLocSym, dwarfRangesSym, dwarfAbsFnSym, dwarfIsStmtSym *LSym) { if s.Type != objabi.STEXT { ctxt.Diag("dwarfSym of non-TEXT %v", s) diff --git a/src/cmd/internal/obj/s390x/a.out.go b/src/cmd/internal/obj/s390x/a.out.go index 9ee02a2d0d72e..af321f6131a96 100644 --- a/src/cmd/internal/obj/s390x/a.out.go +++ b/src/cmd/internal/obj/s390x/a.out.go @@ -945,6 +945,9 @@ const ( AVUPLHW AVUPLF AVMSLG + AVMSLEG + AVMSLOG + AVMSLEOG // binary ABYTE diff --git a/src/cmd/internal/obj/s390x/anames.go b/src/cmd/internal/obj/s390x/anames.go index 2d6ea5abb4431..9cea9f962d93d 100644 --- a/src/cmd/internal/obj/s390x/anames.go +++ b/src/cmd/internal/obj/s390x/anames.go @@ -678,6 +678,9 @@ var Anames = []string{ "VUPLHW", "VUPLF", "VMSLG", + "VMSLEG", + "VMSLOG", + "VMSLEOG", "BYTE", "WORD", "DWORD", diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 359610c41df15..4e43d27790055 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -1379,6 +1379,9 @@ func buildop(ctxt *obj.Link) { opset(AVSBCBIQ, r) opset(AVSBIQ, r) opset(AVMSLG, r) + opset(AVMSLEG, r) + opset(AVMSLOG, r) + opset(AVMSLEOG, r) case AVSEL: opset(AVFMADB, r) opset(AWFMADB, r) diff --git a/src/cmd/internal/obj/s390x/vector.go b/src/cmd/internal/obj/s390x/vector.go index 3f1d900367d3b..62adcf6d4a523 100644 --- a/src/cmd/internal/obj/s390x/vector.go +++ b/src/cmd/internal/obj/s390x/vector.go @@ -45,7 +45,7 @@ func vop(as obj.As) (opcode, es, cs uint32) { return op_VAC, 0, 0 case AVACQ: return op_VAC, 4, 0 - case AVMSLG: + case AVMSLG, AVMSLEG, AVMSLOG, AVMSLEOG: return op_VMSL, 3, 0 case AVACCC: return op_VACCC, 0, 0 @@ -1058,6 +1058,12 @@ func singleElementMask(as obj.As) uint32 { AWFTCIDB, AWFIDB: return 8 + case AVMSLEG: + return 8 + case AVMSLOG: + return 4 + case AVMSLEOG: + return 12 } return 0 } diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 3cc9e294d29fe..f424f1d17b279 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -314,6 +314,18 @@ func relocsym(ctxt *Link, s *sym.Symbol) { break } + // On AIX, if a relocated symbol is in .data, a second relocation + // must be done by the loader, as the section .data will be moved. + // The "default" symbol address is still needed by the loader so + // the current relocation can't be skipped. + // runtime.algarray is different because it will end up in .rodata section + if ctxt.HeadType == objabi.Haix && r.Sym.Sect.Seg == &Segdata && r.Sym.Name != "runtime.algarray" { + // It's not possible to make a loader relocation to a DWARF section. + // FIXME + if s.Sect.Seg != &Segdwarf { + xcoffaddloaderreloc(ctxt, s, r) + } + } o = Symaddr(r.Sym) + r.Add @@ -606,6 +618,7 @@ func dynrelocsym(ctxt *Link, s *sym.Symbol) { thearch.Adddynrel(ctxt, s, r) continue } + if r.Sym != nil && r.Sym.Type == sym.SDYNIMPORT || r.Type >= 256 { if r.Sym != nil && !r.Sym.Attr.Reachable() { Errorf(s, "dynamic relocation to unreachable symbol %s", r.Sym.Name) @@ -1329,6 +1342,14 @@ func (ctxt *Link) dodata() { gc.AddSym(s) datsize += s.Size } + // On AIX, TOC entries must be the last of .data + for _, s := range data[sym.SXCOFFTOC] { + s.Sect = sect + s.Type = sym.SDATA + datsize = aligndatsize(datsize, s) + s.Value = int64(uint64(datsize) - sect.Vaddr) + datsize += s.Size + } checkdatsize(ctxt, datsize, sym.SDATA) sect.Length = uint64(datsize) - sect.Vaddr gc.End(int64(sect.Length)) @@ -1688,6 +1709,10 @@ func (ctxt *Link) dodata() { } for _, sect := range Segdata.Sections { sect.Extnum = int16(n) + if ctxt.HeadType == objabi.Haix && (sect.Name == ".noptrdata" || sect.Name == ".bss") { + // On AIX, "noptr" sections are merged with their "ptr" section + continue + } n++ } for _, sect := range Segdwarf.Sections { diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index 827a1d931afad..7a20650d9cc0c 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -67,7 +67,12 @@ func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64 r.Add = ofs } -func (c dwctxt) AddDWARFSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { +func (c dwctxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { + size := 4 + if isDwarf64(c.linkctxt) { + size = 8 + } + c.AddSectionOffset(s, size, t, ofs) ls := s.(*sym.Symbol) ls.R[len(ls.R)-1].Type = objabi.R_DWARFSECREF @@ -95,6 +100,10 @@ func (c dwctxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets [] panic("should be used only in the compiler") } +func isDwarf64(ctxt *Link) bool { + return ctxt.HeadType == objabi.Haix +} + var gdbscript string var dwarfp []*sym.Symbol @@ -873,6 +882,33 @@ func defdwsymb(ctxt *Link, s *sym.Symbol, str string, t SymbolType, v int64, got } } +// createUnitLength creates the initial length field with value v and update +// offset of unit_length if needed. +func createUnitLength(ctxt *Link, s *sym.Symbol, v uint64) { + if isDwarf64(ctxt) { + s.AddUint32(ctxt.Arch, 0xFFFFFFFF) + } + addDwarfAddrField(ctxt, s, v) +} + +// addDwarfAddrField adds a DWARF field in DWARF 64bits or 32bits. +func addDwarfAddrField(ctxt *Link, s *sym.Symbol, v uint64) { + if isDwarf64(ctxt) { + s.AddUint(ctxt.Arch, v) + } else { + s.AddUint32(ctxt.Arch, uint32(v)) + } +} + +// addDwarfAddrRef adds a DWARF pointer in DWARF 64bits or 32bits. +func addDwarfAddrRef(ctxt *Link, s *sym.Symbol, t *sym.Symbol) { + if isDwarf64(ctxt) { + adddwarfref(ctxt, s, t, 8) + } else { + adddwarfref(ctxt, s, t, 4) + } +} + // compilationUnit is per-compilation unit (equivalently, per-package) // debug-related data. type compilationUnit struct { @@ -1081,11 +1117,11 @@ func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) { // Write .debug_line Line Number Program Header (sec 6.2.4) // Fields marked with (*) must be changed for 64-bit dwarf unitLengthOffset := ls.Size - ls.AddUint32(ctxt.Arch, 0) // unit_length (*), filled in at end. + createUnitLength(ctxt, ls, 0) // unit_length (*), filled in at end unitstart = ls.Size ls.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) -- version 3 is incompatible w/ XCode 9.0's dsymutil, latest supported on OSX 10.12 as of 2018-05 headerLengthOffset := ls.Size - ls.AddUint32(ctxt.Arch, 0) // header_length (*), filled in at end. + addDwarfAddrField(ctxt, ls, 0) // header_length (*), filled in at end headerstart = ls.Size // cpos == unitstart + 4 + 2 + 4 @@ -1238,8 +1274,16 @@ func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) { dwarf.Uleb128put(dwarfctxt, ls, 1) ls.AddUint8(dwarf.DW_LNE_end_sequence) - ls.SetUint32(ctxt.Arch, unitLengthOffset, uint32(ls.Size-unitstart)) - ls.SetUint32(ctxt.Arch, headerLengthOffset, uint32(headerend-headerstart)) + if ctxt.HeadType == objabi.Haix { + saveDwsectCUSize(".debug_line", unit.lib.String(), uint64(ls.Size-unitLengthOffset)) + } + if isDwarf64(ctxt) { + ls.SetUint(ctxt.Arch, unitLengthOffset+4, uint64(ls.Size-unitstart)) // +4 because of 0xFFFFFFFF + ls.SetUint(ctxt.Arch, headerLengthOffset, uint64(headerend-headerstart)) + } else { + ls.SetUint32(ctxt.Arch, unitLengthOffset, uint32(ls.Size-unitstart)) + ls.SetUint32(ctxt.Arch, headerLengthOffset, uint32(headerend-headerstart)) + } // Apply any R_DWARFFILEREF relocations, since we now know the // line table file indices for this compilation unit. Note that @@ -1329,8 +1373,8 @@ func writeframes(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { if haslinkregister(ctxt) { cieReserve = 32 } - fs.AddUint32(ctxt.Arch, cieReserve) // initial length, must be multiple of thearch.ptrsize - fs.AddUint32(ctxt.Arch, 0xffffffff) // cid. + createUnitLength(ctxt, fs, uint64(cieReserve)) // initial length, must be multiple of thearch.ptrsize + addDwarfAddrField(ctxt, fs, 0xffffffff) // cid. fs.AddUint8(3) // dwarf version (appendix F) fs.AddUint8(0) // augmentation "" dwarf.Uleb128put(dwarfctxt, fs, 1) // code_alignment_factor @@ -1418,9 +1462,9 @@ func writeframes(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { // ptrsize: address range fs.AddUint32(ctxt.Arch, uint32(4+2*ctxt.Arch.PtrSize+len(deltaBuf))) // length (excludes itself) if ctxt.LinkMode == LinkExternal { - adddwarfref(ctxt, fs, fs, 4) + addDwarfAddrRef(ctxt, fs, fs) } else { - fs.AddUint32(ctxt.Arch, 0) // CIE offset + addDwarfAddrField(ctxt, fs, 0) // CIE offset } fs.AddAddr(ctxt.Arch, s) fs.AddUintXX(ctxt.Arch, uint64(s.Size), ctxt.Arch.PtrSize) // address range @@ -1455,11 +1499,11 @@ func writeinfo(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit, abbrevs // Write .debug_info Compilation Unit Header (sec 7.5.1) // Fields marked with (*) must be changed for 64-bit dwarf // This must match COMPUNITHEADERSIZE above. - s.AddUint32(ctxt.Arch, 0) // unit_length (*), will be filled in later. - s.AddUint16(ctxt.Arch, 4) // dwarf version (appendix F) + createUnitLength(ctxt, s, 0) // unit_length (*), will be filled in later. + s.AddUint16(ctxt.Arch, 4) // dwarf version (appendix F) // debug_abbrev_offset (*) - adddwarfref(ctxt, s, abbrevsym, 4) + addDwarfAddrRef(ctxt, s, abbrevsym) s.AddUint8(uint8(ctxt.Arch.PtrSize)) // address_size @@ -1477,8 +1521,17 @@ func writeinfo(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit, abbrevs for _, child := range cu { cusize += child.Size } - cusize -= 4 // exclude the length field. - s.SetUint32(ctxt.Arch, 0, uint32(cusize)) + // Save size for AIX symbol table. + if ctxt.HeadType == objabi.Haix { + saveDwsectCUSize(".debug_info", getPkgFromCUSym(s), uint64(cusize)) + } + if isDwarf64(ctxt) { + cusize -= 12 // exclude the length field. + s.SetUint(ctxt.Arch, 4, uint64(cusize)) // 4 because of 0XFFFFFFFF + } else { + cusize -= 4 // exclude the length field. + s.SetUint32(ctxt.Arch, 0, uint32(cusize)) + } // Leave a breadcrumb for writepub. This does not // appear in the DWARF output. newattr(compunit, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, cusize, 0) @@ -1519,10 +1572,10 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*s culength := uint32(getattr(compunit, dwarf.DW_AT_byte_size).Value) + 4 // Write .debug_pubnames/types Header (sec 6.1.1) - s.AddUint32(ctxt.Arch, 0) // unit_length (*), will be filled in later. - s.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) - adddwarfref(ctxt, s, dtolsym(compunit.Sym), 4) // debug_info_offset (of the Comp unit Header) - s.AddUint32(ctxt.Arch, culength) // debug_info_length + createUnitLength(ctxt, s, 0) // unit_length (*), will be filled in later. + s.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) + addDwarfAddrRef(ctxt, s, dtolsym(compunit.Sym)) // debug_info_offset (of the Comp unit Header) + addDwarfAddrField(ctxt, s, uint64(culength)) // debug_info_length for die := compunit.Child; die != nil; die = die.Link { if !ispub(die) { @@ -1533,19 +1586,31 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*s if die.Sym == nil { fmt.Println("Missing sym for ", name) } - adddwarfref(ctxt, s, dtolsym(die.Sym), 4) + addDwarfAddrRef(ctxt, s, dtolsym(die.Sym)) Addstring(s, name) } - s.AddUint32(ctxt.Arch, 0) + addDwarfAddrField(ctxt, s, 0) // Null offset - s.SetUint32(ctxt.Arch, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field. + // On AIX, save the current size of this compilation unit. + if ctxt.HeadType == objabi.Haix { + saveDwsectCUSize(sname, getPkgFromCUSym(dtolsym(compunit.Sym)), uint64(s.Size-sectionstart)) + } + if isDwarf64(ctxt) { + s.SetUint(ctxt.Arch, sectionstart+4, uint64(s.Size-sectionstart)-12) // exclude the length field. + } else { + s.SetUint32(ctxt.Arch, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field. + } } return syms } func writegdbscript(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { + // TODO (aix): make it available + if ctxt.HeadType == objabi.Haix { + return syms + } if ctxt.LinkMode == LinkExternal && ctxt.HeadType == objabi.Hwindows && ctxt.BuildMode == BuildModeCArchive { // gcc on Windows places .debug_gdb_scripts in the wrong location, which // causes the program not to run. See https://golang.org/issue/20183 @@ -2019,3 +2084,27 @@ func (v compilationUnitByStartPC) Less(i, j int) bool { return v[i].lib.Textp[0].Value < v[j].lib.Textp[0].Value } } + +// On AIX, the symbol table needs to know where are the compilation units parts +// for a specific package in each .dw section. +// dwsectCUSize map will save the size of a compilation unit for +// the corresponding .dw section. +// This size can later be retrieved with the index "sectionName.pkgName". +var dwsectCUSize map[string]uint64 + +// getDwsectCUSize retrieves the corresponding package size inside the current section. +func getDwsectCUSize(sname string, pkgname string) uint64 { + return dwsectCUSize[sname+"."+pkgname] +} + +func saveDwsectCUSize(sname string, pkgname string, size uint64) { + dwsectCUSize[sname+"."+pkgname] = size +} + +// getPkgFromCUSym returns the package name for the compilation unit +// represented by s. +// The prefix dwarf.InfoPrefix+".pkg." needs to be removed in order to get +// the package name. +func getPkgFromCUSym(s *sym.Symbol) string { + return strings.TrimPrefix(s.Name, dwarf.InfoPrefix+".pkg.") +} diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go index f2dd799922e50..d6c6b53a44c52 100644 --- a/src/cmd/link/internal/ld/go.go +++ b/src/cmd/link/internal/ld/go.go @@ -163,6 +163,10 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { } havedynamic = 1 } + if ctxt.HeadType == objabi.Haix { + xcoffadddynimpsym(ctxt, s) + } + continue case "cgo_import_static": @@ -317,7 +321,8 @@ func fieldtrack(ctxt *Link) { } func (ctxt *Link) addexport() { - if ctxt.HeadType == objabi.Hdarwin { + // TODO(aix) + if ctxt.HeadType == objabi.Hdarwin || ctxt.HeadType == objabi.Haix { return } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index c304b858df4b3..42edd09510223 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -39,6 +39,7 @@ import ( "cmd/link/internal/loadelf" "cmd/link/internal/loadmacho" "cmd/link/internal/loadpe" + "cmd/link/internal/loadxcoff" "cmd/link/internal/objfile" "cmd/link/internal/sym" "crypto/sha1" @@ -1338,9 +1339,24 @@ func (ctxt *Link) hostlink() { ctxt.Logf("\n") } - if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil { + out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput() + if err != nil { Exitf("running %s failed: %v\n%s", argv[0], err, out) - } else if len(out) > 0 { + } + + // Filter out useless linker warnings caused by bugs outside Go. + // See also cmd/go/internal/work/exec.go's gccld method. + var save [][]byte + for _, line := range bytes.SplitAfter(out, []byte("\n")) { + // golang.org/issue/26073 - Apple Xcode bug + if bytes.Contains(line, []byte("ld: warning: text-based stub file")) { + continue + } + save = append(save, line) + } + out = bytes.Join(save, nil) + + if len(out) > 0 { // always print external output even if the command is successful, so that we don't // swallow linker warnings (see https://golang.org/issue/17935). ctxt.Logf("%s", out) @@ -1518,6 +1534,18 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, return ldhostobj(ldpe, ctxt.HeadType, f, pkg, length, pn, file) } + if c1 == 0x01 && (c2 == 0xD7 || c2 == 0xF7) { + ldxcoff := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, err := loadxcoff.Load(ctxt.Arch, ctxt.Syms, f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldxcoff, ctxt.HeadType, f, pkg, length, pn, file) + } + /* check the header */ line, err := f.ReadString('\n') if err != nil { @@ -2244,7 +2272,7 @@ func Entryvalue(ctxt *Link) int64 { if s.Type == 0 { return *FlagTextAddr } - if s.Type != sym.STEXT { + if ctxt.HeadType != objabi.Haix && s.Type != sym.STEXT { Errorf(s, "entry not text") } return s.Value diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 2c5152f2e3d63..b87ee8094fb6a 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -224,6 +224,10 @@ func Main(arch *sys.Arch, theArch Arch) { ctxt.dope() ctxt.windynrelocsyms() } + if ctxt.HeadType == objabi.Haix { + ctxt.doxcoff() + } + ctxt.addexport() thearch.Gentext(ctxt) // trampolines, call stubs, etc. ctxt.textbuildid() diff --git a/src/cmd/link/internal/ld/sym.go b/src/cmd/link/internal/ld/sym.go index 3aa90c17dc8a1..a487b5e5f6ca1 100644 --- a/src/cmd/link/internal/ld/sym.go +++ b/src/cmd/link/internal/ld/sym.go @@ -66,7 +66,7 @@ func (ctxt *Link) computeTLSOffset() { default: log.Fatalf("unknown thread-local storage offset for %v", ctxt.HeadType) - case objabi.Hplan9, objabi.Hwindows, objabi.Hjs: + case objabi.Hplan9, objabi.Hwindows, objabi.Hjs, objabi.Haix: break /* diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go new file mode 100644 index 0000000000000..f06b498594493 --- /dev/null +++ b/src/cmd/link/internal/ld/xcoff.go @@ -0,0 +1,1232 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "cmd/internal/objabi" + "cmd/link/internal/sym" + "encoding/binary" + "strings" +) + +// This file handles all algorithms related to XCOFF files generation. +// Most of them are adaptations of the ones in cmd/link/internal/pe.go +// as PE and XCOFF are based on COFF files. +// XCOFF files generated are 64 bits. + +// Total amount of space to reserve at the start of the file +// for FileHeader, Auxiliary Header, and Section Headers. +// May waste some. +// Based on 24(fhdr) + 120(ahdr) + 23(max sections number) * 72(scnhdr) +const ( + XCOFFHDRRESERVE = FILHSZ_64 + AOUTHSZ_EXEC64 + SCNHSZ_64*23 +) + +const ( + XCOFFSECTALIGN int64 = 32 // base on dump -o + XCOFFBASE = 0x100000000 // Address on 64 bits must start at this value. +) + +// File Header +type XcoffFileHdr64 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint64 // Byte offset to symbol table start + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags + Fnsyms int32 // Number of entries in symbol table +} + +const ( + U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF +) + +// Flags that describe the type of the object file. +const ( + F_RELFLG = 0x0001 + F_EXEC = 0x0002 + F_LNNO = 0x0004 + F_FDPR_PROF = 0x0010 + F_FDPR_OPTI = 0x0020 + F_DSA = 0x0040 + F_VARPG = 0x0100 + F_DYNLOAD = 0x1000 + F_SHROBJ = 0x2000 + F_LOADONLY = 0x4000 +) + +// Auxiliary Header +type XcoffAoutHdr64 struct { + Omagic int16 // Flags - Ignored If Vstamp Is 1 + Ovstamp int16 // Version + Odebugger uint32 // Reserved For Debugger + Otextstart uint64 // Virtual Address Of Text + Odatastart uint64 // Virtual Address Of Data + Otoc uint64 // Toc Address + Osnentry int16 // Section Number For Entry Point + Osntext int16 // Section Number For Text + Osndata int16 // Section Number For Data + Osntoc int16 // Section Number For Toc + Osnloader int16 // Section Number For Loader + Osnbss int16 // Section Number For Bss + Oalgntext int16 // Max Text Alignment + Oalgndata int16 // Max Data Alignment + Omodtype [2]byte // Module Type Field + Ocpuflag uint8 // Bit Flags - Cputypes Of Objects + Ocputype uint8 // Reserved for CPU type + Otextpsize uint8 // Requested text page size + Odatapsize uint8 // Requested data page size + Ostackpsize uint8 // Requested stack page size + Oflags uint8 // Flags And TLS Alignment + Otsize uint64 // Text Size In Bytes + Odsize uint64 // Data Size In Bytes + Obsize uint64 // Bss Size In Bytes + Oentry uint64 // Entry Point Address + Omaxstack uint64 // Max Stack Size Allowed + Omaxdata uint64 // Max Data Size Allowed + Osntdata int16 // Section Number For Tdata Section + Osntbss int16 // Section Number For Tbss Section + Ox64flags uint16 // Additional Flags For 64-Bit Objects + Oresv3a int16 // Reserved + Oresv3 [2]int32 // Reserved + +} + +// Section Header +type XcoffScnHdr64 struct { + Sname [8]byte // Section Name + Spaddr uint64 // Physical Address + Svaddr uint64 // Virtual Address + Ssize uint64 // Section Size + Sscnptr uint64 // File Offset To Raw Data + Srelptr uint64 // File Offset To Relocation + Slnnoptr uint64 // File Offset To Line Numbers + Snreloc uint32 // Number Of Relocation Entries + Snlnno uint32 // Number Of Line Number Entries + Sflags uint32 // flags +} + +// Flags defining the section type. +const ( + STYP_DWARF = 0x0010 + STYP_TEXT = 0x0020 + STYP_DATA = 0x0040 + STYP_BSS = 0x0080 + STYP_EXCEPT = 0x0100 + STYP_INFO = 0x0200 + STYP_TDATA = 0x0400 + STYP_TBSS = 0x0800 + STYP_LOADER = 0x1000 + STYP_DEBUG = 0x2000 + STYP_TYPCHK = 0x4000 + STYP_OVRFLO = 0x8000 +) +const ( + SSUBTYP_DWINFO = 0x10000 // DWARF info section + SSUBTYP_DWLINE = 0x20000 // DWARF line-number section + SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section + SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section + SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section + SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section + SSUBTYP_DWSTR = 0x70000 // DWARF strings section + SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section + SSUBTYP_DWLOC = 0x90000 // DWARF location lists section + SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section + SSUBTYP_DWMAC = 0xB0000 // DWARF macros section +) + +// Headers size +const ( + FILHSZ_32 = 20 + FILHSZ_64 = 24 + AOUTHSZ_EXEC32 = 72 + AOUTHSZ_EXEC64 = 120 + SCNHSZ_32 = 40 + SCNHSZ_64 = 72 + LDHDRSZ_32 = 32 + LDHDRSZ_64 = 56 + LDSYMSZ_64 = 24 +) + +// Symbol Table Entry +type XcoffSymEnt64 struct { + Nvalue uint64 // Symbol value + Noffset uint32 // Offset of the name in string table or .debug section + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass uint8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +const SYMESZ = 18 + +const ( + // Nscnum + N_DEBUG = -2 + N_ABS = -1 + N_UNDEF = 0 + + //Ntype + SYM_V_INTERNAL = 0x1000 + SYM_V_HIDDEN = 0x2000 + SYM_V_PROTECTED = 0x3000 + SYM_V_EXPORTED = 0x4000 + SYM_TYPE_FUNC = 0x0020 // is function +) + +// Storage Class. +const ( + C_NULL = 0 // Symbol table entry marked for deletion + C_EXT = 2 // External symbol + C_STAT = 3 // Static symbol + C_BLOCK = 100 // Beginning or end of inner block + C_FCN = 101 // Beginning or end of function + C_FILE = 103 // Source file name and compiler information + C_HIDEXT = 107 // Unnamed external symbol + C_BINCL = 108 // Beginning of include file + C_EINCL = 109 // End of include file + C_WEAKEXT = 111 // Weak external symbol + C_DWARF = 112 // DWARF symbol + C_GSYM = 128 // Global variable + C_LSYM = 129 // Automatic variable allocated on stack + C_PSYM = 130 // Argument to subroutine allocated on stack + C_RSYM = 131 // Register variable + C_RPSYM = 132 // Argument to function or procedure stored in register + C_STSYM = 133 // Statically allocated symbol + C_BCOMM = 135 // Beginning of common block + C_ECOML = 136 // Local member of common block + C_ECOMM = 137 // End of common block + C_DECL = 140 // Declaration of object + C_ENTRY = 141 // Alternate entry + C_FUN = 142 // Function or procedure + C_BSTAT = 143 // Beginning of static block + C_ESTAT = 144 // End of static block + C_GTLS = 145 // Global thread-local variable + C_STTLS = 146 // Static thread-local variable +) + +// File Auxiliary Entry +type XcoffAuxFile64 struct { + Xfname [8]byte // Name or offset inside string table + Xftype uint8 // Source file string type + Xauxtype uint8 // Type of auxiliary entry +} + +// Function Auxiliary Entry +type XcoffAuxFcn64 struct { + Xlnnoptr uint64 // File pointer to line number + Xfsize uint32 // Size of function in bytes + Xendndx uint32 // Symbol table index of next entry + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// csect Auxiliary Entry. +type XcoffAuxCSect64 struct { + Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xscnlenhi uint32 // Upper 4 bytes of length or symbol table index + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// Auxiliary type +const ( + _AUX_EXCEPT = 255 + _AUX_FCN = 254 + _AUX_SYM = 253 + _AUX_FILE = 252 + _AUX_CSECT = 251 + _AUX_SECT = 250 +) + +// Xftype field +const ( + XFT_FN = 0 // Source File Name + XFT_CT = 1 // Compile Time Stamp + XFT_CV = 2 // Compiler Version Number + XFT_CD = 128 // Compiler Defined Information/ + +) + +// Symbol type field. +const ( + XTY_ER = 0 // External reference + XTY_SD = 1 // Section definition + XTY_LD = 2 // Label definition + XTY_CM = 3 // Common csect definition + XTY_WK = 0x8 // Weak symbol + XTY_EXP = 0x10 // Exported symbol + XTY_ENT = 0x20 // Entry point symbol + XTY_IMP = 0x40 // Imported symbol +) + +// Storage-mapping class. +const ( + XMC_PR = 0 // Program code + XMC_RO = 1 // Read-only constant + XMC_DB = 2 // Debug dictionary table + XMC_TC = 3 // TOC entry + XMC_UA = 4 // Unclassified + XMC_RW = 5 // Read/Write data + XMC_GL = 6 // Global linkage + XMC_XO = 7 // Extended operation + XMC_SV = 8 // 32-bit supervisor call descriptor + XMC_BS = 9 // BSS class + XMC_DS = 10 // Function descriptor + XMC_UC = 11 // Unnamed FORTRAN common + XMC_TC0 = 15 // TOC anchor + XMC_TD = 16 // Scalar data entry in the TOC + XMC_SV64 = 17 // 64-bit supervisor call descriptor + XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit + XMC_TL = 20 // Read/Write thread-local data + XMC_UL = 21 // Read/Write thread-local data (.tbss) + XMC_TE = 22 // TOC entry +) + +// Loader Header +type XcoffLdHdr64 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Lstlen uint32 // Length of string table + Limpoff uint64 // Offset to start of import file IDs + Lstoff uint64 // Offset to start of string table + Lsymoff uint64 // Offset to start of symbol table + Lrldoff uint64 // Offset to start of relocation entries +} + +// Loader Symbol +type XcoffLdSym64 struct { + Lvalue uint64 // Address field + Loffset uint32 // Byte offset into string table of symbol name + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type XcoffLdImportFile64 struct { + Limpidpath string + Limpidbase string + Limpidmem string +} + +type XcoffLdRel64 struct { + Lvaddr uint64 // Address Field + Lrtype uint16 // Relocation Size and Type + Lrsecnm int16 // Section Number being relocated + Lsymndx int32 // Loader-Section symbol table index +} + +const ( + XCOFF_R_POS = 0x00 // A(sym) Positive Relocation +) + +type XcoffLdStr64 struct { + size uint16 + name string +} + +// xcoffFile is used to build XCOFF file. +type xcoffFile struct { + xfhdr XcoffFileHdr64 + xahdr XcoffAoutHdr64 + sections []*XcoffScnHdr64 + stringTable xcoffStringTable + textSect *XcoffScnHdr64 + dataSect *XcoffScnHdr64 + bssSect *XcoffScnHdr64 + loaderSect *XcoffScnHdr64 + symtabOffset int64 // offset to the start of symbol table + symbolCount uint32 // number of symbol table records written + dynLibraries map[string]int // Dynamic libraries in .loader section. The integer represents its import file number (- 1) + dynSymbols []*sym.Symbol // Dynamic symbols in .loader section + loaderReloc []*XcoffLdRel64 // Reloc that must be made inside loader +} + +// Those values will latter be computed in XcoffInit +var ( + XCOFFFILEHDR int + XCOFFSECTHDR int +) + +// Var used by XCOFF Generation algorithms +var ( + xfile xcoffFile + loaderOff uint64 + loaderSize uint64 +) + +// xcoffStringTable is a XCOFF string table. +type xcoffStringTable struct { + strings []string + stringsLen int +} + +// size returns size of string table t. +func (t *xcoffStringTable) size() int { + // string table starts with 4-byte length at the beginning + return t.stringsLen + 4 +} + +// add adds string str to string table t. +func (t *xcoffStringTable) add(str string) int { + off := t.size() + t.strings = append(t.strings, str) + t.stringsLen += len(str) + 1 // each string will have 0 appended to it + return off +} + +// write writes string table t into the output file. +func (t *xcoffStringTable) write(out *OutBuf) { + out.Write32(uint32(t.size())) + for _, s := range t.strings { + out.WriteString(s) + out.Write8(0) + } +} + +// write writes XCOFF section sect into the output file. +func (sect *XcoffScnHdr64) write(ctxt *Link) { + binary.Write(ctxt.Out, binary.BigEndian, sect) + ctxt.Out.Write32(0) // Add 4 empty bytes at the end to match alignment +} + +// addSection adds section to the XCOFF file f. +func (f *xcoffFile) addSection(s *sym.Section) *XcoffScnHdr64 { + sect := &XcoffScnHdr64{ + Spaddr: s.Vaddr, + Svaddr: s.Vaddr, + Ssize: s.Length, + Sscnptr: s.Seg.Fileoff + s.Vaddr - s.Seg.Vaddr, + } + copy(sect.Sname[:], s.Name) // copy string to [8]byte ( pb if len(name) > 8 ) + f.sections = append(f.sections, sect) + return sect +} + +// addLoaderSection adds the loader section to the XCOFF file f. +func (f *xcoffFile) addLoaderSection(size uint64, off uint64) *XcoffScnHdr64 { + sect := &XcoffScnHdr64{ + Ssize: size, + Sscnptr: off, + Sflags: STYP_LOADER, + } + copy(sect.Sname[:], ".loader") // copy string to [8]byte ( pb if len(name) > 8 + f.xahdr.Osnloader = int16(len(f.sections) + 1) + f.sections = append(f.sections, sect) + f.loaderSect = sect + return sect +} + +// addDwarfSection adds a dwarf section to the XCOFF file f. +// This function is similar to addSection, but Dwarf section names +// must be modified to conventional names and they are various subtypes. +func (f *xcoffFile) addDwarfSection(s *sym.Section) *XcoffScnHdr64 { + sect := &XcoffScnHdr64{ + Ssize: s.Length, + Sscnptr: s.Seg.Fileoff + s.Vaddr - s.Seg.Vaddr, + Sflags: STYP_DWARF, + } + newName, subtype := xcoffGetDwarfSubtype(s.Name) + copy(sect.Sname[:], newName) + sect.Sflags |= subtype + f.sections = append(f.sections, sect) + return sect +} + +// xcoffGetDwarfSubtype returns the XCOFF name of the DWARF section str +// and its subtype constant. +func xcoffGetDwarfSubtype(str string) (string, uint32) { + switch str { + default: + Exitf("unknown DWARF section name for XCOFF: %s", str) + case ".debug_abbrev": + return ".dwabrev", SSUBTYP_DWABREV + case ".debug_info": + return ".dwinfo", SSUBTYP_DWINFO + case ".debug_frame": + return ".dwframe", SSUBTYP_DWFRAME + case ".debug_line": + return ".dwline", SSUBTYP_DWLINE + case ".debug_loc": + return ".dwloc", SSUBTYP_DWLOC + case ".debug_pubnames": + return ".dwpbnms", SSUBTYP_DWPBNMS + case ".debug_pubtypes": + return ".dwpbtyp", SSUBTYP_DWPBTYP + case ".debug_ranges": + return ".dwrnge", SSUBTYP_DWRNGES + } + // never used + return "", 0 +} + +// Xcoffinit initialised some internal value and setups +// already known header information +func Xcoffinit(ctxt *Link) { + xfile.dynLibraries = make(map[string]int) + + XCOFFFILEHDR = int(Rnd(XCOFFHDRRESERVE, XCOFFSECTALIGN)) + XCOFFSECTHDR = int(Rnd(int64(XCOFFFILEHDR), XCOFFSECTALIGN)) + + HEADR = int32(XCOFFFILEHDR) + if *FlagTextAddr != -1 { + Errorf(nil, "-T not available on AIX") + } + *FlagTextAddr = XCOFFBASE + int64(XCOFFSECTHDR) + *FlagDataAddr = 0 + if *FlagRound != -1 { + Errorf(nil, "-R not available on AIX") + } + *FlagRound = int(XCOFFSECTALIGN) + +} + +// SYMBOL TABLE + +// type records C_FILE information needed for genasmsym in XCOFF. +type xcoffSymSrcFile struct { + name string + fileSymNb uint32 // Symbol number of this C_FILE + csectSymNb uint64 // Symbol number for the current .csect + csectSize int64 +} + +var ( + currDwscnoff = make(map[string]uint64) // Needed to create C_DWARF symbols + currSymSrcFile xcoffSymSrcFile +) + +// writeSymbol writes a symbol or an auxiliary symbol entry on ctxt.out. +func (f *xcoffFile) writeSymbol(out *OutBuf, byteOrder binary.ByteOrder, sym interface{}) { + binary.Write(out, byteOrder, sym) + f.symbolCount++ +} + +// Write symbols needed when a new file appared : +// - a C_FILE with one auxiliary entry for its name +// - C_DWARF symbols to provide debug information +// - a C_HIDEXT which will be a csect containing all of its functions +// It needs several parameters to create .csect symbols such as its entry point and its section number. +// +// Currently, a new file is in fact a new package. It seems to be OK, but it might change +// in the future. +func (f *xcoffFile) writeSymbolNewFile(ctxt *Link, name string, firstEntry uint64, extnum int16) { + /* C_FILE */ + s := &XcoffSymEnt64{ + Noffset: uint32(f.stringTable.add(".file")), + Nsclass: C_FILE, + Nscnum: N_DEBUG, + Ntype: 0, // Go isn't inside predefined language. + Nnumaux: 1, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + + // Auxiliary entry for file name. + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(f.stringTable.add(name))) + ctxt.Out.Write32(0) // 6 bytes empty + ctxt.Out.Write16(0) + ctxt.Out.Write8(XFT_FN) + ctxt.Out.Write16(0) // 2 bytes empty + ctxt.Out.Write8(_AUX_FILE) + f.symbolCount++ + + /* Dwarf */ + for _, sect := range Segdwarf.Sections { + // Find the size of this corresponding package DWARF compilation unit. + // This size is set during DWARF generation (see dwarf.go). + dwsize := getDwsectCUSize(sect.Name, name) + // .debug_abbrev is commun to all packages and not found with the previous function + if sect.Name == ".debug_abbrev" { + s := ctxt.Syms.Lookup(sect.Name, 0) + dwsize = uint64(s.Size) + } + + // get XCOFF name + name, _ := xcoffGetDwarfSubtype(sect.Name) + s := &XcoffSymEnt64{ + Nvalue: currDwscnoff[sect.Name], + Noffset: uint32(f.stringTable.add(name)), + Nsclass: C_DWARF, + Nscnum: sect.Extnum, + Nnumaux: 1, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + + // update the DWARF section offset in this file + if sect.Name != ".debug_abbrev" { + currDwscnoff[sect.Name] += dwsize + } + + // Auxiliary dwarf section + ctxt.Out.Write64(dwsize) // section length + ctxt.Out.Write64(0) // nreloc + ctxt.Out.Write8(0) // pad + ctxt.Out.Write8(_AUX_SECT) + f.symbolCount++ + } + + /* .csect */ + // Check if extnum is in text. + // This is temporary and only here to check if this algorithm is correct. + if extnum != 1 { + Exitf("XCOFF symtab: A new file was detected with its first symbol not in .text") + } + + currSymSrcFile.csectSymNb = uint64(f.symbolCount) + currSymSrcFile.csectSize = 0 + + // No offset because no name + s = &XcoffSymEnt64{ + Nvalue: firstEntry, + Nscnum: extnum, + Nsclass: C_HIDEXT, + Ntype: 0, // check visibility ? + Nnumaux: 1, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + + aux := &XcoffAuxCSect64{ + Xsmclas: XMC_PR, + Xsmtyp: XTY_SD | 5<<3, // align = 5 + Xauxtype: _AUX_CSECT, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, aux) + +} + +// Update values for the previous package. +// - Svalue of the C_FILE symbol: if it is the last one, this Svalue must be -1 +// - Xsclen of the csect symbol. +func (f *xcoffFile) updatePreviousFile(ctxt *Link, last bool) { + // first file + if currSymSrcFile.fileSymNb == 0 { + return + } + + prevOff := f.symtabOffset + int64(currSymSrcFile.fileSymNb*SYMESZ) + currOff := ctxt.Out.Offset() + + // Update C_FILE + ctxt.Out.SeekSet(prevOff) + if last { + ctxt.Out.Write64(0xFFFFFFFFFFFFFFFF) + } else { + ctxt.Out.Write64(uint64(f.symbolCount)) + } + + // update csect scnlen in this auxiliary entry + prevOff = f.symtabOffset + int64((currSymSrcFile.csectSymNb+1)*SYMESZ) + ctxt.Out.SeekSet(prevOff) + ctxt.Out.Write32(uint32(currSymSrcFile.csectSize & 0xFFFFFFFF)) + prevOff += 12 + ctxt.Out.SeekSet(prevOff) + ctxt.Out.Write32(uint32(currSymSrcFile.csectSize >> 32)) + + ctxt.Out.SeekSet(currOff) + +} + +// Write symbol representing a .text function. +// The symbol table is split with C_FILE corresponding to each package +// and not to each source file as it should be. +func (f *xcoffFile) writeSymbolFunc(ctxt *Link, x *sym.Symbol) []interface{} { + // New XCOFF symbols which will be written. + syms := []interface{}{} + + // Check if a new file is detected. + if x.File == "" { // Undefined global symbol + // If this happens, the algorithme must be redone. + if currSymSrcFile.name != "" { + Exitf("undefined global symbol found inside another file") + } + } else { + // Current file has changed. New C_FILE, C_DWARF, etc must be generated. + if currSymSrcFile.name != x.File { + // update previous file values + xfile.updatePreviousFile(ctxt, false) + currSymSrcFile.name = x.File + currSymSrcFile.fileSymNb = f.symbolCount + f.writeSymbolNewFile(ctxt, x.File, uint64(x.Value), x.Sect.Extnum) + } + } + + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(x.Name)), + Nvalue: uint64(x.Value), + Nscnum: x.Sect.Extnum, + Ntype: SYM_TYPE_FUNC, + Nnumaux: 2, + } + + if x.Version != 0 || x.Attr.VisibilityHidden() || x.Attr.Local() { + s.Nsclass = C_HIDEXT + } + + syms = append(syms, s) + + // Update current csect size + currSymSrcFile.csectSize += x.Size + + // create auxiliary entries + a2 := &XcoffAuxFcn64{ + Xfsize: uint32(x.Size), + Xlnnoptr: 0, // TODO + Xendndx: xfile.symbolCount + 3, // this symbol + 2 aux entries + Xauxtype: _AUX_FCN, + } + syms = append(syms, a2) + + a4 := &XcoffAuxCSect64{ + Xscnlenlo: uint32(currSymSrcFile.csectSymNb & 0xFFFFFFFF), + Xscnlenhi: uint32(currSymSrcFile.csectSymNb >> 32), + Xsmclas: XMC_PR, // Program Code + Xsmtyp: XTY_LD, // label definition (based on C) + Xauxtype: _AUX_CSECT, + } + syms = append(syms, a4) + return syms +} + +// put function used by genasmsym to write symbol table +func putaixsym(ctxt *Link, x *sym.Symbol, str string, t SymbolType, addr int64, go_ *sym.Symbol) { + + // All XCOFF symbols generated by this GO symbols + // Can be a symbol entry or a auxiliary entry + syms := []interface{}{} + + switch t { + default: + return + + case TextSym: + if x.FuncInfo != nil { + // Function within a file + syms = xfile.writeSymbolFunc(ctxt, x) + } else { + // Only runtime.text and runtime.etext come through this way + if x.Name != "runtime.text" && x.Name != "runtime.etext" && x.Name != "go.buildid" { + Exitf("putaixsym: unknown text symbol %s", x.Name) + } + s := &XcoffSymEnt64{ + Nsclass: C_HIDEXT, + Noffset: uint32(xfile.stringTable.add(str)), + Nvalue: uint64(x.Value), + Nscnum: x.Sect.Extnum, + Ntype: SYM_TYPE_FUNC, + Nnumaux: 1, + } + syms = append(syms, s) + + size := uint64(x.Size) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + Xsmclas: XMC_PR, + Xsmtyp: XTY_SD, + } + syms = append(syms, a4) + + } + + case DataSym, BSSSym: + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(str)), + Nvalue: uint64(x.Value), + Nscnum: x.Sect.Extnum, + Nnumaux: 1, + } + + if x.Version != 0 || x.Attr.VisibilityHidden() || x.Attr.Local() { + // There is more symbols in the case of a global data + // which are related to the assembly generated + // to access such symbols. + // But as Golang as its own way to check if a symbol is + // global or local (the capital letter), we don't need to + // implement them yet. + s.Nsclass = C_HIDEXT + } + + syms = append(syms, s) + + // Create auxiliary entry + + // Normally, size should be the size of csect containing all + // the data and bss symbols of one file/package. + // However, it's easier to just have a csect for each symbol. + // It might change + size := uint64(x.Size) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + } + // Read only data + if x.Type >= sym.STYPE && x.Type <= sym.SPCLNTAB { + a4.Xsmclas = XMC_RO + } else { + a4.Xsmclas = XMC_RW + } + if t == DataSym { + a4.Xsmtyp |= XTY_SD + } else { + a4.Xsmtyp |= XTY_CM + } + + syms = append(syms, a4) + + } + for _, s := range syms { + xfile.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + } +} + +// Generate XCOFF Symbol table and XCOFF String table +func Asmaixsym(ctxt *Link) { + // write symbol table + xfile.symtabOffset = ctxt.Out.Offset() + genasmsym(ctxt, putaixsym) + + // update last file Svalue + xfile.updatePreviousFile(ctxt, true) + + // write string table + xfile.stringTable.write(ctxt.Out) +} + +// xcoffadddynimpsym adds a dynamic symbol to a XCOFF file +func xcoffadddynimpsym(ctxt *Link, s *sym.Symbol) { + xfile.adddynimpsym(ctxt, s) +} + +// Add a new imported symbol and a new library if needed. +// Currently, dynamic symbols are considered as .data symbols which will receive +// their value by the loader. Their relocation is created during the creation +// of the .loader section, because it needs its symbol index. +// However, there is no writing protection on those symbols and +// it might need to be added. +// TODO(aix): Add writing protection. +// TODO(aix): Handles dynamic symbols without library. +func (f *xcoffFile) adddynimpsym(ctxt *Link, s *sym.Symbol) { + // Check that library name is given. + // Pattern is already checked when compiling. + if s.Dynimplib() == "" { + Errorf(s, "imported symbol must have a given library") + } + + for _, sf := range f.dynSymbols { + if sf == s { + return + } + } + + f.dynSymbols = append(f.dynSymbols, s) + s.Type = sym.SXCOFFTOC + // Function descriptor value + s.AddUint64(ctxt.Arch, 0) + + if _, ok := f.dynLibraries[s.Dynimplib()]; !ok { + f.dynLibraries[s.Dynimplib()] = len(f.dynLibraries) + } +} + +// Add a relocation to .loader relocation section +func xcoffaddloaderreloc(ctxt *Link, s *sym.Symbol, r *sym.Reloc) { + if s.Type <= sym.SPCLNTAB && r.Sym.Type >= sym.SELFSECT && r.Sym.Type <= sym.SXREF { + Errorf(s, "cannot have a relocation in a text section with a data symbol: %s ", r.Sym.Name) + } + + ldr := &XcoffLdRel64{ + Lvaddr: uint64(s.Value + int64(r.Off)), + Lrsecnm: s.Sect.Extnum, + } + + switch r.Type { + case objabi.R_ADDR: + // Relocation of a .data symbol + ldr.Lrtype = 0x3F<<8 + XCOFF_R_POS + ldr.Lsymndx = 1 // .data + default: + Errorf(s, "unexpected .loader relocation to symbol: %s (type: %s)", r.Sym.Name, r.Type.String()) + } + + xfile.loaderReloc = append(xfile.loaderReloc, ldr) + +} + +func (ctxt *Link) doxcoff() { + // Initial map used to store compilation unit size for each DWARF section (see dwarf.go). + dwsectCUSize = make(map[string]uint64) + + // TOC + toc := ctxt.Syms.Lookup("TOC", 0) + toc.Type = sym.SXCOFFTOC + toc.Attr |= sym.AttrReachable +} + +// Loader section +// Currently, this section is created from scratch when assembling the XCOFF file +// according to information retrieved in xfile object. + +// Create loader section and returns its size +func Loaderblk(ctxt *Link, off uint64) uint64 { + xfile.writeLdrScn(ctxt, off) + return loaderSize +} + +func (f *xcoffFile) writeLdrScn(ctxt *Link, globalOff uint64) { + var symtab []*XcoffLdSym64 + var strtab []*XcoffLdStr64 + var importtab []*XcoffLdImportFile64 + var reloctab []*XcoffLdRel64 + var dynimpreloc []*XcoffLdRel64 + + // As the string table is updated in any loader subsection, + // its length must be computed at the same time. + stlen := uint32(0) + + // Loader Header + hdr := &XcoffLdHdr64{ + Lversion: 2, + Lsymoff: LDHDRSZ_64, + } + + /* Symbol table */ + // Entry point symbol + ep := ctxt.Syms.ROLookup(*flagEntrySymbol, 0) + if !ep.Attr.Reachable() { + Exitf("wrong entry point") + } + lds := &XcoffLdSym64{ + Lvalue: uint64(ep.Value), + Loffset: uint32(stlen + 2), // +2 because it must have the first byte of the symbol not its size field + Lscnum: ep.Sect.Extnum, + Lsmtype: XTY_ENT | XTY_SD, + Lsmclas: XMC_DS, + Lifile: 0, + Lparm: 0, + } + ldstr := &XcoffLdStr64{ + size: uint16(len(ep.String()) + 1), // + null terminator + name: ep.String(), + } + stlen += uint32(2 + ldstr.size) // 2 = sizeof ldstr.size + symtab = append(symtab, lds) + strtab = append(strtab, ldstr) + + nbldsym := int32(4) + + // dynamic import + for _, s := range f.dynSymbols { + lds = &XcoffLdSym64{ + Loffset: uint32(stlen + 2), + Lsmtype: XTY_IMP, + Lsmclas: XMC_DS, + Lifile: int32(f.dynLibraries[s.Dynimplib()] + 1), + } + ldstr := &XcoffLdStr64{ + size: uint16(len(s.Extname()) + 1), // + null terminator + name: s.Extname(), + } + stlen += uint32(2 + ldstr.size) // 2 = sizeof ldstr.size + symtab = append(symtab, lds) + strtab = append(strtab, ldstr) + + // Create relocation entry at the same moment to get symndx + ldr := &XcoffLdRel64{ + Lvaddr: uint64(s.Value), + Lrtype: 0x3F00, + Lrsecnm: s.Sect.Extnum, + Lsymndx: int32(nbldsym), + } + dynimpreloc = append(dynimpreloc, ldr) + nbldsym++ + + } + + hdr.Lnsyms = int32(len(symtab)) + hdr.Lrldoff = hdr.Lsymoff + uint64(24*hdr.Lnsyms) // 24 = sizeof one symbol + off := hdr.Lrldoff // current offset is the same of reloc offset + + /* Reloc */ + ldr := &XcoffLdRel64{ + Lvaddr: uint64(ep.Value), + Lrtype: 0x3F00, + Lrsecnm: ep.Sect.Extnum, + Lsymndx: 0, + } + off += 16 + reloctab = append(reloctab, ldr) + + off += uint64(16 * len(f.loaderReloc)) + reloctab = append(reloctab, (f.loaderReloc)...) + + off += uint64(16 * len(dynimpreloc)) + reloctab = append(reloctab, dynimpreloc...) + + hdr.Lnreloc = int32(len(reloctab)) + hdr.Limpoff = off + + /* Import */ + // Default import: /usr/lib:/lib + ldimpf := &XcoffLdImportFile64{ + Limpidpath: "/usr/lib:/lib", + } + off += uint64(len(ldimpf.Limpidpath) + len(ldimpf.Limpidbase) + len(ldimpf.Limpidmem) + 3) // + null delimiter + importtab = append(importtab, ldimpf) + + // The map created by adddynimpsym associates the name to a number + // This number represents the librairie index (- 1) in this import files section + // Therefore, they must be sorted before being put inside the section + libsOrdered := make([]string, len(f.dynLibraries)) + for key, val := range f.dynLibraries { + if libsOrdered[val] != "" { + continue + } + libsOrdered[val] = key + } + + for _, lib := range libsOrdered { + // lib string is defined as base.a/mem.o or path/base.a/mem.o + n := strings.Split(lib, "/") + path := "" + base := n[len(n)-2] + mem := n[len(n)-1] + if len(n) > 2 { + path = lib[:len(lib)-len(base)-len(mem)-2] + + } + ldimpf = &XcoffLdImportFile64{ + Limpidpath: path, + Limpidbase: base, + Limpidmem: mem, + } + off += uint64(len(ldimpf.Limpidpath) + len(ldimpf.Limpidbase) + len(ldimpf.Limpidmem) + 3) // + null delimiter + importtab = append(importtab, ldimpf) + } + + hdr.Lnimpid = int32(len(importtab)) + hdr.Listlen = uint32(off - hdr.Limpoff) + hdr.Lstoff = off + hdr.Lstlen = stlen + + /* Writing */ + ctxt.Out.SeekSet(int64(globalOff)) + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, hdr) + + for _, s := range symtab { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, s) + + } + for _, r := range reloctab { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, r) + } + for _, f := range importtab { + ctxt.Out.WriteString(f.Limpidpath) + ctxt.Out.Write8(0) + ctxt.Out.WriteString(f.Limpidbase) + ctxt.Out.Write8(0) + ctxt.Out.WriteString(f.Limpidmem) + ctxt.Out.Write8(0) + } + for _, s := range strtab { + ctxt.Out.Write16(s.size) + ctxt.Out.WriteString(s.name) + ctxt.Out.Write8(0) // null terminator + } + + loaderOff = globalOff + loaderSize = off + uint64(stlen) + ctxt.Out.Flush() + + /* again for printing */ + if !*flagA { + return + } + + ctxt.Logf("\n.loader section") + // write in buf + var buf bytes.Buffer + + binary.Write(&buf, ctxt.Arch.ByteOrder, hdr) + for _, s := range symtab { + binary.Write(&buf, ctxt.Arch.ByteOrder, s) + + } + for _, f := range importtab { + buf.WriteString(f.Limpidpath) + buf.WriteByte(0) + buf.WriteString(f.Limpidbase) + buf.WriteByte(0) + buf.WriteString(f.Limpidmem) + buf.WriteByte(0) + } + for _, s := range strtab { + binary.Write(&buf, ctxt.Arch.ByteOrder, s.size) + buf.WriteString(s.name) + buf.WriteByte(0) // null terminator + } + + // Log buffer + ctxt.Logf("\n\t%.8x|", globalOff) + for i, b := range buf.Bytes() { + if i > 0 && i%16 == 0 { + ctxt.Logf("\n\t%.8x|", uint64(globalOff)+uint64(i)) + } + ctxt.Logf(" %.2x", b) + } + ctxt.Logf("\n") + +} + +// XCOFF assembling and writing file + +func (f *xcoffFile) writeFileHeader(ctxt *Link) { + // File header + f.xfhdr.Fmagic = U64_TOCMAGIC + f.xfhdr.Fnscns = uint16(len(f.sections)) + f.xfhdr.Ftimedat = 0 + + if !*FlagS { + f.xfhdr.Fsymptr = uint64(f.symtabOffset) + f.xfhdr.Fnsyms = int32(f.symbolCount) + } + + if ctxt.BuildMode == BuildModeExe { + f.xfhdr.Fopthdr = AOUTHSZ_EXEC64 + f.xfhdr.Fflags = F_EXEC + + // auxiliary header + f.xahdr.Ovstamp = 1 // based on dump -o + f.xahdr.Omagic = 0x10b + copy(f.xahdr.Omodtype[:], "1L") + f.xahdr.Oentry = uint64(Entryvalue(ctxt)) + f.xahdr.Otoc = uint64(ctxt.Syms.ROLookup("TOC", 0).Value) + + // Based on dump -o + f.xahdr.Oalgntext = 0x5 + f.xahdr.Oalgndata = 0x5 + + binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) + binary.Write(ctxt.Out, binary.BigEndian, &f.xahdr) + } else { + f.xfhdr.Fopthdr = 0 + binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) + } + +} + +func xcoffwrite(ctxt *Link) { + ctxt.Out.SeekSet(0) + + xfile.writeFileHeader(ctxt) + + for _, sect := range xfile.sections { + sect.write(ctxt) + } +} + +// Generate XCOFF assembly file +func Asmbxcoff(ctxt *Link) { + // initial offset for sections + if ctxt.BuildMode == BuildModeExe { + // search entry section number + eaddr := uint64(Entryvalue(ctxt)) + for _, sect := range append(Segtext.Sections, Segdata.Sections...) { + if eaddr-sect.Vaddr <= sect.Length { + xfile.xahdr.Osnentry = int16(sect.Extnum) + } + } + + // check + if xfile.xahdr.Osnentry == 0 { + Exitf("internal error: Section number for entry point (addr = 0x%x) not found", eaddr) + } + + } + + // add text sections + for _, sect := range Segtext.Sections { + // ctxt.Logf(".text: %s \n", sect.Name) + s := xfile.addSection(sect) + s.Sflags = STYP_TEXT + + // use sect.Name because of convertion inside scnhdr + if sect.Name == ".text" { + xfile.xahdr.Otextstart = s.Spaddr + xfile.xahdr.Otsize = s.Ssize + xfile.xahdr.Osntext = sect.Extnum + } + } + + // add data sections + var ( + snoptrdata, + sdata, + sbss, + snoptrbss *sym.Section + ) + for _, sect := range Segdata.Sections { + if sect.Name == ".noptrdata" { + snoptrdata = sect + } + if sect.Name == ".noptrbss" { + snoptrbss = sect + } + if sect.Name == ".data" { + sdata = sect + } + if sect.Name == ".bss" { + sbss = sect + } + } + + // On AIX, there must be only one data and one bss section. + // Therefore, their noptr section is merged within them. + // The length of the new section must be recomputed to handle defautl gap + // between GO sections as AIX doesn't allow it. + + // Merge .noptrdata inside .data + sdata.Vaddr = snoptrdata.Vaddr + sdata.Length = sbss.Vaddr - sdata.Vaddr + s := xfile.addSection(sdata) + s.Sflags = STYP_DATA + xfile.xahdr.Odatastart = s.Spaddr + xfile.xahdr.Odsize = s.Ssize + xfile.xahdr.Osndata = sdata.Extnum + + // Merge .noptrbss inside .bss + sbss.Length = snoptrbss.Vaddr + snoptrbss.Length - sbss.Vaddr + s = xfile.addSection(sbss) + s.Sflags = STYP_BSS + xfile.xahdr.Obsize = s.Ssize + xfile.xahdr.Osnbss = sbss.Extnum + s.Sscnptr = 0 + + // add dwarf section + for _, sect := range Segdwarf.Sections { + xfile.addDwarfSection(sect) + } + + // Loader section must be add at the end because of sect.Extnum + // in others sections + xfile.addLoaderSection(loaderSize, loaderOff) + + xcoffwrite(ctxt) +} diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index 3e833b686eaa1..c4a49c6a1e595 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -692,6 +692,11 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bo // Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI // Specification". v := r.Sym.Value - 0x7000 + if ctxt.HeadType == objabi.Haix { + // On AIX, the thread pointer points 0x7800 bytes after + // the TLS. + v -= 0x800 + } if int64(int16(v)) != v { ld.Errorf(s, "TLS offset out of range %d", v) } @@ -941,6 +946,13 @@ func asmb(ctxt *ld.Link) { ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) + loadersize := uint64(0) + if ctxt.HeadType == objabi.Haix && ctxt.BuildMode == ld.BuildModeExe { + loadero := uint64(ld.Rnd(int64(ld.Segdwarf.Fileoff+ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + ctxt.Out.SeekSet(int64(loadero)) + loadersize = ld.Loaderblk(ctxt, loadero) + } + /* output symbol table */ ld.Symsize = 0 @@ -960,6 +972,16 @@ func asmb(ctxt *ld.Link) { case objabi.Hplan9: symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) + + case objabi.Haix: + symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) + + // Add loader size if needed + if ctxt.BuildMode == ld.BuildModeExe { + symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) + symo += uint32(loadersize) + } + symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } ctxt.Out.SeekSet(int64(symo)) @@ -988,6 +1010,10 @@ func asmb(ctxt *ld.Link) { ctxt.Out.Write(sym.P) ctxt.Out.Flush() } + + case objabi.Haix: + ld.Asmaixsym(ctxt) + ctxt.Out.Flush() } } @@ -1013,6 +1039,9 @@ func asmb(ctxt *ld.Link) { objabi.Hopenbsd, objabi.Hnacl: ld.Asmbelf(ctxt, int64(symo)) + + case objabi.Haix: + ld.Asmbxcoff(ctxt) } ctxt.Out.Flush() diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go index 273d9b42cb9da..e630f8c062b6b 100644 --- a/src/cmd/link/internal/ppc64/obj.go +++ b/src/cmd/link/internal/ppc64/obj.go @@ -121,6 +121,10 @@ func archinit(ctxt *ld.Link) { if *ld.FlagRound == -1 { *ld.FlagRound = 0x10000 } + + case objabi.Haix: + ld.Xcoffinit(ctxt) + } if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 { diff --git a/src/container/list/list.go b/src/container/list/list.go index dc4260e131665..b8b599aabb11d 100644 --- a/src/container/list/list.go +++ b/src/container/list/list.go @@ -116,6 +116,23 @@ func (l *List) remove(e *Element) *Element { return e } +// move moves e to next to at and returns e. +func (l *List) move(e, at *Element) *Element { + if e == at { + return e + } + e.prev.next = e.next + e.next.prev = e.prev + + n := at.next + at.next = e + e.prev = at + e.next = n + n.prev = e + + return e +} + // Remove removes e from l if e is an element of list l. // It returns the element value e.Value. // The element must not be nil. @@ -170,7 +187,7 @@ func (l *List) MoveToFront(e *Element) { return } // see comment in List.Remove about initialization of l - l.insert(l.remove(e), &l.root) + l.move(e, &l.root) } // MoveToBack moves element e to the back of list l. @@ -181,7 +198,7 @@ func (l *List) MoveToBack(e *Element) { return } // see comment in List.Remove about initialization of l - l.insert(l.remove(e), l.root.prev) + l.move(e, l.root.prev) } // MoveBefore moves element e to its new position before mark. @@ -191,7 +208,7 @@ func (l *List) MoveBefore(e, mark *Element) { if e.list != l || e == mark || mark.list != l { return } - l.insert(l.remove(e), mark.prev) + l.move(e, mark.prev) } // MoveAfter moves element e to its new position after mark. @@ -201,7 +218,7 @@ func (l *List) MoveAfter(e, mark *Element) { if e.list != l || e == mark || mark.list != l { return } - l.insert(l.remove(e), mark) + l.move(e, mark) } // PushBackList inserts a copy of an other list at the back of list l. diff --git a/src/crypto/tls/boring_test.go b/src/crypto/tls/boring_test.go index 2ac62a554053c..6868f1a370ed1 100644 --- a/src/crypto/tls/boring_test.go +++ b/src/crypto/tls/boring_test.go @@ -28,6 +28,7 @@ func TestBoringServerProtocolVersion(t *testing.T) { serverConfig.MinVersion = VersionSSL30 clientHello := &clientHelloMsg{ vers: v, + random: make([]byte, 32), cipherSuites: allCipherSuites(), compressionMethods: []uint8{compressionNone}, } @@ -110,6 +111,7 @@ func TestBoringServerCipherSuites(t *testing.T) { t.Run(fmt.Sprintf("suite=%#x", id), func(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS12, + random: make([]byte, 32), cipherSuites: []uint16{id}, compressionMethods: []uint8{compressionNone}, supportedCurves: defaultCurvePreferences, @@ -141,6 +143,7 @@ func TestBoringServerCurves(t *testing.T) { t.Run(fmt.Sprintf("curve=%d", curveid), func(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS12, + random: make([]byte, 32), cipherSuites: []uint16{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, compressionMethods: []uint8{compressionNone}, supportedCurves: []CurveID{curveid}, diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go index 13cebc9042dd3..dae5fd103a1ab 100644 --- a/src/crypto/tls/conn.go +++ b/src/crypto/tls/conn.go @@ -478,12 +478,18 @@ type RecordHeaderError struct { // RecordHeader contains the five bytes of TLS record header that // triggered the error. RecordHeader [5]byte + // Conn provides the underlying net.Conn in the case that a client + // sent an initial handshake that didn't look like TLS. + // It is nil if there's already been a handshake or a TLS alert has + // been written to the connection. + Conn net.Conn } func (e RecordHeaderError) Error() string { return "tls: " + e.Msg } -func (c *Conn) newRecordHeaderError(msg string) (err RecordHeaderError) { +func (c *Conn) newRecordHeaderError(conn net.Conn, msg string) (err RecordHeaderError) { err.Msg = msg + err.Conn = conn copy(err.RecordHeader[:], c.rawInput.Bytes()) return err } @@ -535,7 +541,7 @@ func (c *Conn) readRecord(want recordType) error { // an SSLv2 client. if want == recordTypeHandshake && typ == 0x80 { c.sendAlert(alertProtocolVersion) - return c.in.setErrorLocked(c.newRecordHeaderError("unsupported SSLv2 handshake received")) + return c.in.setErrorLocked(c.newRecordHeaderError(nil, "unsupported SSLv2 handshake received")) } vers := uint16(hdr[1])<<8 | uint16(hdr[2]) @@ -543,12 +549,7 @@ func (c *Conn) readRecord(want recordType) error { if c.haveVers && vers != c.vers { c.sendAlert(alertProtocolVersion) msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers) - return c.in.setErrorLocked(c.newRecordHeaderError(msg)) - } - if n > maxCiphertext { - c.sendAlert(alertRecordOverflow) - msg := fmt.Sprintf("oversized record received with length %d", n) - return c.in.setErrorLocked(c.newRecordHeaderError(msg)) + return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg)) } if !c.haveVers { // First message, be extra suspicious: this might not be a TLS @@ -556,10 +557,14 @@ func (c *Conn) readRecord(want recordType) error { // The current max version is 3.3 so if the version is >= 16.0, // it's probably not real. if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 { - c.sendAlert(alertUnexpectedMessage) - return c.in.setErrorLocked(c.newRecordHeaderError("first record does not look like a TLS handshake")) + return c.in.setErrorLocked(c.newRecordHeaderError(c.conn, "first record does not look like a TLS handshake")) } } + if n > maxCiphertext { + c.sendAlert(alertRecordOverflow) + msg := fmt.Sprintf("oversized record received with length %d", n) + return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg)) + } if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil { if e, ok := err.(net.Error); !ok || !e.Temporary() { c.in.setErrorLocked(err) @@ -894,7 +899,7 @@ func (c *Conn) readHandshake() (interface{}, error) { m = new(certificateMsg) case typeCertificateRequest: m = &certificateRequestMsg{ - hasSignatureAndHash: c.vers >= VersionTLS12, + hasSignatureAlgorithm: c.vers >= VersionTLS12, } case typeCertificateStatus: m = new(certificateStatusMsg) @@ -906,7 +911,7 @@ func (c *Conn) readHandshake() (interface{}, error) { m = new(clientKeyExchangeMsg) case typeCertificateVerify: m = &certificateVerifyMsg{ - hasSignatureAndHash: c.vers >= VersionTLS12, + hasSignatureAlgorithm: c.vers >= VersionTLS12, } case typeNextProtocol: m = new(nextProtoMsg) diff --git a/src/crypto/tls/handshake_client.go b/src/crypto/tls/handshake_client.go index 3ea81b7949fd0..a1f0731730ee4 100644 --- a/src/crypto/tls/handshake_client.go +++ b/src/crypto/tls/handshake_client.go @@ -476,7 +476,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { if chainToSend != nil && len(chainToSend.Certificate) > 0 { certVerify := &certificateVerifyMsg{ - hasSignatureAndHash: c.vers >= VersionTLS12, + hasSignatureAlgorithm: c.vers >= VersionTLS12, } key, ok := chainToSend.PrivateKey.(crypto.Signer) @@ -491,7 +491,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } // SignatureAndHashAlgorithm was introduced in TLS 1.2. - if certVerify.hasSignatureAndHash { + if certVerify.hasSignatureAlgorithm { certVerify.signatureAlgorithm = signatureAlgorithm } digest, err := hs.finishedHash.hashForClientCertificate(sigType, hashFunc, hs.masterSecret) @@ -744,7 +744,7 @@ func (hs *clientHandshakeState) getCertificate(certReq *certificateRequestMsg) ( if c.config.GetClientCertificate != nil { var signatureSchemes []SignatureScheme - if !certReq.hasSignatureAndHash { + if !certReq.hasSignatureAlgorithm { // Prior to TLS 1.2, the signature schemes were not // included in the certificate request message. In this // case we use a plausible list based on the acceptable diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go index dcd69140984d3..437aaed4625ae 100644 --- a/src/crypto/tls/handshake_client_test.go +++ b/src/crypto/tls/handshake_client_test.go @@ -384,10 +384,12 @@ func (test *clientTest) run(t *testing.T, write bool) { } for i, b := range flows { if i%2 == 1 { + serverConn.SetWriteDeadline(time.Now().Add(1 * time.Minute)) serverConn.Write(b) continue } bb := make([]byte, len(b)) + serverConn.SetReadDeadline(time.Now().Add(1 * time.Minute)) _, err := io.ReadFull(serverConn, bb) if err != nil { t.Fatalf("%s #%d: %s", test.name, i, err) @@ -1644,7 +1646,7 @@ func TestCloseClientConnectionOnIdleServer(t *testing.T) { serverConn.Read(b[:]) client.Close() }() - client.SetWriteDeadline(time.Now().Add(time.Second)) + client.SetWriteDeadline(time.Now().Add(time.Minute)) err := client.Handshake() if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { diff --git a/src/crypto/tls/handshake_messages.go b/src/crypto/tls/handshake_messages.go index 27004b2d698f9..d6785550a2994 100644 --- a/src/crypto/tls/handshake_messages.go +++ b/src/crypto/tls/handshake_messages.go @@ -5,10 +5,49 @@ package tls import ( - "bytes" + "fmt" + "golang_org/x/crypto/cryptobyte" "strings" ) +// The marshalingFunction type is an adapter to allow the use of ordinary +// functions as cryptobyte.MarshalingValue. +type marshalingFunction func(b *cryptobyte.Builder) error + +func (f marshalingFunction) Marshal(b *cryptobyte.Builder) error { + return f(b) +} + +// addBytesWithLength appends a sequence of bytes to the cryptobyte.Builder. If +// the length of the sequence is not the value specified, it produces an error. +func addBytesWithLength(b *cryptobyte.Builder, v []byte, n int) { + b.AddValue(marshalingFunction(func(b *cryptobyte.Builder) error { + if len(v) != n { + return fmt.Errorf("invalid value length: expected %d, got %d", n, len(v)) + } + b.AddBytes(v) + return nil + })) +} + +// readUint8LengthPrefixed acts like s.ReadUint8LengthPrefixed, but targets a +// []byte instead of a cryptobyte.String. +func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool { + return s.ReadUint8LengthPrefixed((*cryptobyte.String)(out)) +} + +// readUint16LengthPrefixed acts like s.ReadUint16LengthPrefixed, but targets a +// []byte instead of a cryptobyte.String. +func readUint16LengthPrefixed(s *cryptobyte.String, out *[]byte) bool { + return s.ReadUint16LengthPrefixed((*cryptobyte.String)(out)) +} + +// readUint24LengthPrefixed acts like s.ReadUint24LengthPrefixed, but targets a +// []byte instead of a cryptobyte.String. +func readUint24LengthPrefixed(s *cryptobyte.String, out *[]byte) bool { + return s.ReadUint24LengthPrefixed((*cryptobyte.String)(out)) +} + type clientHelloMsg struct { raw []byte vers uint16 @@ -30,473 +69,294 @@ type clientHelloMsg struct { alpnProtocols []string } -func (m *clientHelloMsg) equal(i interface{}) bool { - m1, ok := i.(*clientHelloMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.vers == m1.vers && - bytes.Equal(m.random, m1.random) && - bytes.Equal(m.sessionId, m1.sessionId) && - eqUint16s(m.cipherSuites, m1.cipherSuites) && - bytes.Equal(m.compressionMethods, m1.compressionMethods) && - m.nextProtoNeg == m1.nextProtoNeg && - m.serverName == m1.serverName && - m.ocspStapling == m1.ocspStapling && - m.scts == m1.scts && - eqCurveIDs(m.supportedCurves, m1.supportedCurves) && - bytes.Equal(m.supportedPoints, m1.supportedPoints) && - m.ticketSupported == m1.ticketSupported && - bytes.Equal(m.sessionTicket, m1.sessionTicket) && - eqSignatureAlgorithms(m.supportedSignatureAlgorithms, m1.supportedSignatureAlgorithms) && - m.secureRenegotiationSupported == m1.secureRenegotiationSupported && - bytes.Equal(m.secureRenegotiation, m1.secureRenegotiation) && - eqStrings(m.alpnProtocols, m1.alpnProtocols) -} - func (m *clientHelloMsg) marshal() []byte { if m.raw != nil { return m.raw } - length := 2 + 32 + 1 + len(m.sessionId) + 2 + len(m.cipherSuites)*2 + 1 + len(m.compressionMethods) - numExtensions := 0 - extensionsLength := 0 - if m.nextProtoNeg { - numExtensions++ - } - if m.ocspStapling { - extensionsLength += 1 + 2 + 2 - numExtensions++ - } - if len(m.serverName) > 0 { - extensionsLength += 5 + len(m.serverName) - numExtensions++ - } - if len(m.supportedCurves) > 0 { - extensionsLength += 2 + 2*len(m.supportedCurves) - numExtensions++ - } - if len(m.supportedPoints) > 0 { - extensionsLength += 1 + len(m.supportedPoints) - numExtensions++ - } - if m.ticketSupported { - extensionsLength += len(m.sessionTicket) - numExtensions++ - } - if len(m.supportedSignatureAlgorithms) > 0 { - extensionsLength += 2 + 2*len(m.supportedSignatureAlgorithms) - numExtensions++ - } - if m.secureRenegotiationSupported { - extensionsLength += 1 + len(m.secureRenegotiation) - numExtensions++ - } - if len(m.alpnProtocols) > 0 { - extensionsLength += 2 - for _, s := range m.alpnProtocols { - if l := len(s); l == 0 || l > 255 { - panic("invalid ALPN protocol") + var b cryptobyte.Builder + b.AddUint8(typeClientHello) + b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16(m.vers) + addBytesWithLength(b, m.random, 32) + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.sessionId) + }) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, suite := range m.cipherSuites { + b.AddUint16(suite) + } + }) + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.compressionMethods) + }) + + // If extensions aren't present, omit them. + var extensionsPresent bool + bWithoutExtensions := *b + + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + if m.nextProtoNeg { + // draft-agl-tls-nextprotoneg-04 + b.AddUint16(extensionNextProtoNeg) + b.AddUint16(0) // empty extension_data + } + if len(m.serverName) > 0 { + // RFC 6066, Section 3 + b.AddUint16(extensionServerName) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8(0) // name_type = host_name + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(m.serverName)) + }) + }) + }) + } + if m.ocspStapling { + // RFC 4366, Section 3.6 + b.AddUint16(extensionStatusRequest) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8(1) // status_type = ocsp + b.AddUint16(0) // empty responder_id_list + b.AddUint16(0) // empty request_extensions + }) + } + if len(m.supportedCurves) > 0 { + // RFC 4492, Section 5.1.1 + b.AddUint16(extensionSupportedCurves) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, curve := range m.supportedCurves { + b.AddUint16(uint16(curve)) + } + }) + }) + } + if len(m.supportedPoints) > 0 { + // RFC 4492, Section 5.1.2 + b.AddUint16(extensionSupportedPoints) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.supportedPoints) + }) + }) + } + if m.ticketSupported { + // RFC 5077, Section 3.2 + b.AddUint16(extensionSessionTicket) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.sessionTicket) + }) + } + if len(m.supportedSignatureAlgorithms) > 0 { + // RFC 5246, Section 7.4.1.4.1 + b.AddUint16(extensionSignatureAlgorithms) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, sigAlgo := range m.supportedSignatureAlgorithms { + b.AddUint16(uint16(sigAlgo)) + } + }) + }) + } + if m.secureRenegotiationSupported { + // RFC 5746, Section 3.2 + b.AddUint16(extensionRenegotiationInfo) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.secureRenegotiation) + }) + }) + } + if len(m.alpnProtocols) > 0 { + // RFC 7301, Section 3.1 + b.AddUint16(extensionALPN) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, proto := range m.alpnProtocols { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(proto)) + }) + } + }) + }) + } + if m.scts { + // RFC 6962, Section 3.3.1 + b.AddUint16(extensionSCT) + b.AddUint16(0) // empty extension_data } - extensionsLength++ - extensionsLength += len(s) - } - numExtensions++ - } - if m.scts { - numExtensions++ - } - if numExtensions > 0 { - extensionsLength += 4 * numExtensions - length += 2 + extensionsLength - } - - x := make([]byte, 4+length) - x[0] = typeClientHello - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(m.vers >> 8) - x[5] = uint8(m.vers) - copy(x[6:38], m.random) - x[38] = uint8(len(m.sessionId)) - copy(x[39:39+len(m.sessionId)], m.sessionId) - y := x[39+len(m.sessionId):] - y[0] = uint8(len(m.cipherSuites) >> 7) - y[1] = uint8(len(m.cipherSuites) << 1) - for i, suite := range m.cipherSuites { - y[2+i*2] = uint8(suite >> 8) - y[3+i*2] = uint8(suite) - } - z := y[2+len(m.cipherSuites)*2:] - z[0] = uint8(len(m.compressionMethods)) - copy(z[1:], m.compressionMethods) - - z = z[1+len(m.compressionMethods):] - if numExtensions > 0 { - z[0] = byte(extensionsLength >> 8) - z[1] = byte(extensionsLength) - z = z[2:] - } - if m.nextProtoNeg { - z[0] = byte(extensionNextProtoNeg >> 8) - z[1] = byte(extensionNextProtoNeg & 0xff) - // The length is always 0 - z = z[4:] - } - if len(m.serverName) > 0 { - z[0] = byte(extensionServerName >> 8) - z[1] = byte(extensionServerName & 0xff) - l := len(m.serverName) + 5 - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - - // RFC 3546, Section 3.1 - // - // struct { - // NameType name_type; - // select (name_type) { - // case host_name: HostName; - // } name; - // } ServerName; - // - // enum { - // host_name(0), (255) - // } NameType; - // - // opaque HostName<1..2^16-1>; - // - // struct { - // ServerName server_name_list<1..2^16-1> - // } ServerNameList; - - z[0] = byte((len(m.serverName) + 3) >> 8) - z[1] = byte(len(m.serverName) + 3) - z[3] = byte(len(m.serverName) >> 8) - z[4] = byte(len(m.serverName)) - copy(z[5:], []byte(m.serverName)) - z = z[l:] - } - if m.ocspStapling { - // RFC 4366, Section 3.6 - z[0] = byte(extensionStatusRequest >> 8) - z[1] = byte(extensionStatusRequest) - z[2] = 0 - z[3] = 5 - z[4] = 1 // OCSP type - // Two zero valued uint16s for the two lengths. - z = z[9:] - } - if len(m.supportedCurves) > 0 { - // RFC 4492, Section 5.5.1 - z[0] = byte(extensionSupportedCurves >> 8) - z[1] = byte(extensionSupportedCurves) - l := 2 + 2*len(m.supportedCurves) - z[2] = byte(l >> 8) - z[3] = byte(l) - l -= 2 - z[4] = byte(l >> 8) - z[5] = byte(l) - z = z[6:] - for _, curve := range m.supportedCurves { - z[0] = byte(curve >> 8) - z[1] = byte(curve) - z = z[2:] - } - } - if len(m.supportedPoints) > 0 { - // RFC 4492, Section 5.5.2 - z[0] = byte(extensionSupportedPoints >> 8) - z[1] = byte(extensionSupportedPoints) - l := 1 + len(m.supportedPoints) - z[2] = byte(l >> 8) - z[3] = byte(l) - l-- - z[4] = byte(l) - z = z[5:] - for _, pointFormat := range m.supportedPoints { - z[0] = pointFormat - z = z[1:] - } - } - if m.ticketSupported { - // RFC 5077, Section 3.2 - z[0] = byte(extensionSessionTicket >> 8) - z[1] = byte(extensionSessionTicket) - l := len(m.sessionTicket) - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - copy(z, m.sessionTicket) - z = z[len(m.sessionTicket):] - } - if len(m.supportedSignatureAlgorithms) > 0 { - // RFC 5246, Section 7.4.1.4.1 - z[0] = byte(extensionSignatureAlgorithms >> 8) - z[1] = byte(extensionSignatureAlgorithms) - l := 2 + 2*len(m.supportedSignatureAlgorithms) - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - - l -= 2 - z[0] = byte(l >> 8) - z[1] = byte(l) - z = z[2:] - for _, sigAlgo := range m.supportedSignatureAlgorithms { - z[0] = byte(sigAlgo >> 8) - z[1] = byte(sigAlgo) - z = z[2:] - } - } - if m.secureRenegotiationSupported { - z[0] = byte(extensionRenegotiationInfo >> 8) - z[1] = byte(extensionRenegotiationInfo & 0xff) - z[2] = 0 - z[3] = byte(len(m.secureRenegotiation) + 1) - z[4] = byte(len(m.secureRenegotiation)) - z = z[5:] - copy(z, m.secureRenegotiation) - z = z[len(m.secureRenegotiation):] - } - if len(m.alpnProtocols) > 0 { - z[0] = byte(extensionALPN >> 8) - z[1] = byte(extensionALPN & 0xff) - lengths := z[2:] - z = z[6:] - - stringsLength := 0 - for _, s := range m.alpnProtocols { - l := len(s) - z[0] = byte(l) - copy(z[1:], s) - z = z[1+l:] - stringsLength += 1 + l - } - lengths[2] = byte(stringsLength >> 8) - lengths[3] = byte(stringsLength) - stringsLength += 2 - lengths[0] = byte(stringsLength >> 8) - lengths[1] = byte(stringsLength) - } - if m.scts { - // RFC 6962, Section 3.3.1 - z[0] = byte(extensionSCT >> 8) - z[1] = byte(extensionSCT) - // zero uint16 for the zero-length extension_data - z = z[4:] - } + extensionsPresent = len(b.BytesOrPanic()) > 2 + }) - m.raw = x + if !extensionsPresent { + *b = bWithoutExtensions + } + }) - return x + m.raw = b.BytesOrPanic() + return m.raw } func (m *clientHelloMsg) unmarshal(data []byte) bool { - if len(data) < 42 { - return false - } - m.raw = data - m.vers = uint16(data[4])<<8 | uint16(data[5]) - m.random = data[6:38] - sessionIdLen := int(data[38]) - if sessionIdLen > 32 || len(data) < 39+sessionIdLen { - return false - } - m.sessionId = data[39 : 39+sessionIdLen] - data = data[39+sessionIdLen:] - if len(data) < 2 { + *m = clientHelloMsg{raw: data} + s := cryptobyte.String(data) + + if !s.Skip(4) || // message type and uint24 length field + !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) || + !readUint8LengthPrefixed(&s, &m.sessionId) { return false } - // cipherSuiteLen is the number of bytes of cipher suite numbers. Since - // they are uint16s, the number must be even. - cipherSuiteLen := int(data[0])<<8 | int(data[1]) - if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen { + + var cipherSuites cryptobyte.String + if !s.ReadUint16LengthPrefixed(&cipherSuites) { return false } - numCipherSuites := cipherSuiteLen / 2 - m.cipherSuites = make([]uint16, numCipherSuites) - for i := 0; i < numCipherSuites; i++ { - m.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i]) - if m.cipherSuites[i] == scsvRenegotiation { + m.cipherSuites = []uint16{} + m.secureRenegotiationSupported = false + for !cipherSuites.Empty() { + var suite uint16 + if !cipherSuites.ReadUint16(&suite) { + return false + } + if suite == scsvRenegotiation { m.secureRenegotiationSupported = true } + m.cipherSuites = append(m.cipherSuites, suite) } - data = data[2+cipherSuiteLen:] - if len(data) < 1 { - return false - } - compressionMethodsLen := int(data[0]) - if len(data) < 1+compressionMethodsLen { + + if !readUint8LengthPrefixed(&s, &m.compressionMethods) { return false } - m.compressionMethods = data[1 : 1+compressionMethodsLen] - - data = data[1+compressionMethodsLen:] - - m.nextProtoNeg = false - m.serverName = "" - m.ocspStapling = false - m.ticketSupported = false - m.sessionTicket = nil - m.supportedSignatureAlgorithms = nil - m.alpnProtocols = nil - m.scts = false - if len(data) == 0 { + if s.Empty() { // ClientHello is optionally followed by extension data return true } - if len(data) < 2 { - return false - } - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if extensionsLength != len(data) { + var extensions cryptobyte.String + if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() { return false } - for len(data) != 0 { - if len(data) < 4 { - return false - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { + for !extensions.Empty() { + var extension uint16 + var extData cryptobyte.String + if !extensions.ReadUint16(&extension) || + !extensions.ReadUint16LengthPrefixed(&extData) { return false } switch extension { case extensionServerName: - d := data[:length] - if len(d) < 2 { - return false - } - namesLen := int(d[0])<<8 | int(d[1]) - d = d[2:] - if len(d) != namesLen { + // RFC 6066, Section 3 + var nameList cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() { return false } - for len(d) > 0 { - if len(d) < 3 { + for !nameList.Empty() { + var nameType uint8 + var serverName cryptobyte.String + if !nameList.ReadUint8(&nameType) || + !nameList.ReadUint16LengthPrefixed(&serverName) || + serverName.Empty() { return false } - nameType := d[0] - nameLen := int(d[1])<<8 | int(d[2]) - d = d[3:] - if len(d) < nameLen { + if nameType != 0 { + continue + } + if len(m.serverName) != 0 { + // Multiple names of the same name_type are prohibited. return false } - if nameType == 0 { - m.serverName = string(d[:nameLen]) - // An SNI value may not include a trailing dot. - // See RFC 6066, Section 3. - if strings.HasSuffix(m.serverName, ".") { - return false - } - break + m.serverName = string(serverName) + // An SNI value may not include a trailing dot. + if strings.HasSuffix(m.serverName, ".") { + return false } - d = d[nameLen:] } case extensionNextProtoNeg: - if length > 0 { - return false - } + // draft-agl-tls-nextprotoneg-04 m.nextProtoNeg = true case extensionStatusRequest: - m.ocspStapling = length > 0 && data[0] == statusTypeOCSP - case extensionSupportedCurves: - // RFC 4492, Section 5.5.1 - if length < 2 { + // RFC 4366, Section 3.6 + var statusType uint8 + var ignored cryptobyte.String + if !extData.ReadUint8(&statusType) || + !extData.ReadUint16LengthPrefixed(&ignored) || + !extData.ReadUint16LengthPrefixed(&ignored) { return false } - l := int(data[0])<<8 | int(data[1]) - if l%2 == 1 || length != l+2 { + m.ocspStapling = statusType == statusTypeOCSP + case extensionSupportedCurves: + // RFC 4492, Section 5.1.1 + var curves cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&curves) || curves.Empty() { return false } - numCurves := l / 2 - m.supportedCurves = make([]CurveID, numCurves) - d := data[2:] - for i := 0; i < numCurves; i++ { - m.supportedCurves[i] = CurveID(d[0])<<8 | CurveID(d[1]) - d = d[2:] + for !curves.Empty() { + var curve uint16 + if !curves.ReadUint16(&curve) { + return false + } + m.supportedCurves = append(m.supportedCurves, CurveID(curve)) } case extensionSupportedPoints: - // RFC 4492, Section 5.5.2 - if length < 1 { + // RFC 4492, Section 5.1.2 + if !readUint8LengthPrefixed(&extData, &m.supportedPoints) || + len(m.supportedPoints) == 0 { return false } - l := int(data[0]) - if length != l+1 { - return false - } - m.supportedPoints = make([]uint8, l) - copy(m.supportedPoints, data[1:]) case extensionSessionTicket: // RFC 5077, Section 3.2 m.ticketSupported = true - m.sessionTicket = data[:length] + extData.ReadBytes(&m.sessionTicket, len(extData)) case extensionSignatureAlgorithms: // RFC 5246, Section 7.4.1.4.1 - if length < 2 || length&1 != 0 { + var sigAndAlgs cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() { return false } - l := int(data[0])<<8 | int(data[1]) - if l != length-2 { - return false - } - n := l / 2 - d := data[2:] - m.supportedSignatureAlgorithms = make([]SignatureScheme, n) - for i := range m.supportedSignatureAlgorithms { - m.supportedSignatureAlgorithms[i] = SignatureScheme(d[0])<<8 | SignatureScheme(d[1]) - d = d[2:] + for !sigAndAlgs.Empty() { + var sigAndAlg uint16 + if !sigAndAlgs.ReadUint16(&sigAndAlg) { + return false + } + m.supportedSignatureAlgorithms = append( + m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg)) } case extensionRenegotiationInfo: - if length == 0 { - return false - } - d := data[:length] - l := int(d[0]) - d = d[1:] - if l != len(d) { + // RFC 5746, Section 3.2 + if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) { return false } - - m.secureRenegotiation = d m.secureRenegotiationSupported = true case extensionALPN: - if length < 2 { - return false - } - l := int(data[0])<<8 | int(data[1]) - if l != length-2 { + // RFC 7301, Section 3.1 + var protoList cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() { return false } - d := data[2:length] - for len(d) != 0 { - stringLen := int(d[0]) - d = d[1:] - if stringLen == 0 || stringLen > len(d) { + for !protoList.Empty() { + var proto cryptobyte.String + if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() { return false } - m.alpnProtocols = append(m.alpnProtocols, string(d[:stringLen])) - d = d[stringLen:] + m.alpnProtocols = append(m.alpnProtocols, string(proto)) } case extensionSCT: + // RFC 6962, Section 3.3.1 m.scts = true - if length != 0 { - return false - } + default: + // Ignore unknown extensions. + continue + } + + if !extData.Empty() { + return false } - data = data[length:] } return true @@ -519,315 +379,170 @@ type serverHelloMsg struct { alpnProtocol string } -func (m *serverHelloMsg) equal(i interface{}) bool { - m1, ok := i.(*serverHelloMsg) - if !ok { - return false - } - - if len(m.scts) != len(m1.scts) { - return false - } - for i, sct := range m.scts { - if !bytes.Equal(sct, m1.scts[i]) { - return false - } - } - - return bytes.Equal(m.raw, m1.raw) && - m.vers == m1.vers && - bytes.Equal(m.random, m1.random) && - bytes.Equal(m.sessionId, m1.sessionId) && - m.cipherSuite == m1.cipherSuite && - m.compressionMethod == m1.compressionMethod && - m.nextProtoNeg == m1.nextProtoNeg && - eqStrings(m.nextProtos, m1.nextProtos) && - m.ocspStapling == m1.ocspStapling && - m.ticketSupported == m1.ticketSupported && - m.secureRenegotiationSupported == m1.secureRenegotiationSupported && - bytes.Equal(m.secureRenegotiation, m1.secureRenegotiation) && - m.alpnProtocol == m1.alpnProtocol -} - func (m *serverHelloMsg) marshal() []byte { if m.raw != nil { return m.raw } - length := 38 + len(m.sessionId) - numExtensions := 0 - extensionsLength := 0 - - nextProtoLen := 0 - if m.nextProtoNeg { - numExtensions++ - for _, v := range m.nextProtos { - nextProtoLen += len(v) - } - nextProtoLen += len(m.nextProtos) - extensionsLength += nextProtoLen - } - if m.ocspStapling { - numExtensions++ - } - if m.ticketSupported { - numExtensions++ - } - if m.secureRenegotiationSupported { - extensionsLength += 1 + len(m.secureRenegotiation) - numExtensions++ - } - if alpnLen := len(m.alpnProtocol); alpnLen > 0 { - if alpnLen >= 256 { - panic("invalid ALPN protocol") - } - extensionsLength += 2 + 1 + alpnLen - numExtensions++ - } - sctLen := 0 - if len(m.scts) > 0 { - for _, sct := range m.scts { - sctLen += len(sct) + 2 - } - extensionsLength += 2 + sctLen - numExtensions++ - } + var b cryptobyte.Builder + b.AddUint8(typeServerHello) + b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16(m.vers) + addBytesWithLength(b, m.random, 32) + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.sessionId) + }) + b.AddUint16(m.cipherSuite) + b.AddUint8(m.compressionMethod) + + // If extensions aren't present, omit them. + var extensionsPresent bool + bWithoutExtensions := *b + + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + if m.nextProtoNeg { + b.AddUint16(extensionNextProtoNeg) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, proto := range m.nextProtos { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(proto)) + }) + } + }) + } + if m.ocspStapling { + b.AddUint16(extensionStatusRequest) + b.AddUint16(0) // empty extension_data + } + if m.ticketSupported { + b.AddUint16(extensionSessionTicket) + b.AddUint16(0) // empty extension_data + } + if m.secureRenegotiationSupported { + b.AddUint16(extensionRenegotiationInfo) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.secureRenegotiation) + }) + }) + } + if len(m.alpnProtocol) > 0 { + b.AddUint16(extensionALPN) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(m.alpnProtocol)) + }) + }) + }) + } + if len(m.scts) > 0 { + b.AddUint16(extensionSCT) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, sct := range m.scts { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(sct) + }) + } + }) + }) + } - if numExtensions > 0 { - extensionsLength += 4 * numExtensions - length += 2 + extensionsLength - } + extensionsPresent = len(b.BytesOrPanic()) > 2 + }) - x := make([]byte, 4+length) - x[0] = typeServerHello - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(m.vers >> 8) - x[5] = uint8(m.vers) - copy(x[6:38], m.random) - x[38] = uint8(len(m.sessionId)) - copy(x[39:39+len(m.sessionId)], m.sessionId) - z := x[39+len(m.sessionId):] - z[0] = uint8(m.cipherSuite >> 8) - z[1] = uint8(m.cipherSuite) - z[2] = m.compressionMethod - - z = z[3:] - if numExtensions > 0 { - z[0] = byte(extensionsLength >> 8) - z[1] = byte(extensionsLength) - z = z[2:] - } - if m.nextProtoNeg { - z[0] = byte(extensionNextProtoNeg >> 8) - z[1] = byte(extensionNextProtoNeg & 0xff) - z[2] = byte(nextProtoLen >> 8) - z[3] = byte(nextProtoLen) - z = z[4:] - - for _, v := range m.nextProtos { - l := len(v) - if l > 255 { - l = 255 - } - z[0] = byte(l) - copy(z[1:], []byte(v[0:l])) - z = z[1+l:] + if !extensionsPresent { + *b = bWithoutExtensions } - } - if m.ocspStapling { - z[0] = byte(extensionStatusRequest >> 8) - z[1] = byte(extensionStatusRequest) - z = z[4:] - } - if m.ticketSupported { - z[0] = byte(extensionSessionTicket >> 8) - z[1] = byte(extensionSessionTicket) - z = z[4:] - } - if m.secureRenegotiationSupported { - z[0] = byte(extensionRenegotiationInfo >> 8) - z[1] = byte(extensionRenegotiationInfo & 0xff) - z[2] = 0 - z[3] = byte(len(m.secureRenegotiation) + 1) - z[4] = byte(len(m.secureRenegotiation)) - z = z[5:] - copy(z, m.secureRenegotiation) - z = z[len(m.secureRenegotiation):] - } - if alpnLen := len(m.alpnProtocol); alpnLen > 0 { - z[0] = byte(extensionALPN >> 8) - z[1] = byte(extensionALPN & 0xff) - l := 2 + 1 + alpnLen - z[2] = byte(l >> 8) - z[3] = byte(l) - l -= 2 - z[4] = byte(l >> 8) - z[5] = byte(l) - l -= 1 - z[6] = byte(l) - copy(z[7:], []byte(m.alpnProtocol)) - z = z[7+alpnLen:] - } - if sctLen > 0 { - z[0] = byte(extensionSCT >> 8) - z[1] = byte(extensionSCT) - l := sctLen + 2 - z[2] = byte(l >> 8) - z[3] = byte(l) - z[4] = byte(sctLen >> 8) - z[5] = byte(sctLen) - - z = z[6:] - for _, sct := range m.scts { - z[0] = byte(len(sct) >> 8) - z[1] = byte(len(sct)) - copy(z[2:], sct) - z = z[len(sct)+2:] - } - } + }) - m.raw = x - - return x + m.raw = b.BytesOrPanic() + return m.raw } func (m *serverHelloMsg) unmarshal(data []byte) bool { - if len(data) < 42 { - return false - } - m.raw = data - m.vers = uint16(data[4])<<8 | uint16(data[5]) - m.random = data[6:38] - sessionIdLen := int(data[38]) - if sessionIdLen > 32 || len(data) < 39+sessionIdLen { + *m = serverHelloMsg{raw: data} + s := cryptobyte.String(data) + + if !s.Skip(4) || // message type and uint24 length field + !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) || + !readUint8LengthPrefixed(&s, &m.sessionId) || + !s.ReadUint16(&m.cipherSuite) || + !s.ReadUint8(&m.compressionMethod) { return false } - m.sessionId = data[39 : 39+sessionIdLen] - data = data[39+sessionIdLen:] - if len(data) < 3 { - return false - } - m.cipherSuite = uint16(data[0])<<8 | uint16(data[1]) - m.compressionMethod = data[2] - data = data[3:] - - m.nextProtoNeg = false - m.nextProtos = nil - m.ocspStapling = false - m.scts = nil - m.ticketSupported = false - m.alpnProtocol = "" - - if len(data) == 0 { + + if s.Empty() { // ServerHello is optionally followed by extension data return true } - if len(data) < 2 { - return false - } - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if len(data) != extensionsLength { + var extensions cryptobyte.String + if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() { return false } - for len(data) != 0 { - if len(data) < 4 { - return false - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { + for !extensions.Empty() { + var extension uint16 + var extData cryptobyte.String + if !extensions.ReadUint16(&extension) || + !extensions.ReadUint16LengthPrefixed(&extData) { return false } switch extension { case extensionNextProtoNeg: m.nextProtoNeg = true - d := data[:length] - for len(d) > 0 { - l := int(d[0]) - d = d[1:] - if l == 0 || l > len(d) { + for !extData.Empty() { + var proto cryptobyte.String + if !extData.ReadUint8LengthPrefixed(&proto) || + proto.Empty() { return false } - m.nextProtos = append(m.nextProtos, string(d[:l])) - d = d[l:] + m.nextProtos = append(m.nextProtos, string(proto)) } case extensionStatusRequest: - if length > 0 { - return false - } m.ocspStapling = true case extensionSessionTicket: - if length > 0 { - return false - } m.ticketSupported = true case extensionRenegotiationInfo: - if length == 0 { + if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) { return false } - d := data[:length] - l := int(d[0]) - d = d[1:] - if l != len(d) { - return false - } - - m.secureRenegotiation = d m.secureRenegotiationSupported = true case extensionALPN: - d := data[:length] - if len(d) < 3 { - return false - } - l := int(d[0])<<8 | int(d[1]) - if l != len(d)-2 { + var protoList cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() { return false } - d = d[2:] - l = int(d[0]) - if l != len(d)-1 { + var proto cryptobyte.String + if !protoList.ReadUint8LengthPrefixed(&proto) || + proto.Empty() || !protoList.Empty() { return false } - d = d[1:] - if len(d) == 0 { - // ALPN protocols must not be empty. - return false - } - m.alpnProtocol = string(d) + m.alpnProtocol = string(proto) case extensionSCT: - d := data[:length] - - if len(d) < 2 { - return false - } - l := int(d[0])<<8 | int(d[1]) - d = d[2:] - if len(d) != l || l == 0 { + var sctList cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() { return false } - - m.scts = make([][]byte, 0, 3) - for len(d) != 0 { - if len(d) < 2 { - return false - } - sctLen := int(d[0])<<8 | int(d[1]) - d = d[2:] - if sctLen == 0 || len(d) < sctLen { + for !sctList.Empty() { + var sct []byte + if !readUint16LengthPrefixed(&sctList, &sct) || + len(sct) == 0 { return false } - m.scts = append(m.scts, d[:sctLen]) - d = d[sctLen:] + m.scts = append(m.scts, sct) } + default: + // Ignore unknown extensions. + continue + } + + if !extData.Empty() { + return false } - data = data[length:] } return true @@ -838,16 +553,6 @@ type certificateMsg struct { certificates [][]byte } -func (m *certificateMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - eqByteSlices(m.certificates, m1.certificates) -} - func (m *certificateMsg) marshal() (x []byte) { if m.raw != nil { return m.raw @@ -925,16 +630,6 @@ type serverKeyExchangeMsg struct { key []byte } -func (m *serverKeyExchangeMsg) equal(i interface{}) bool { - m1, ok := i.(*serverKeyExchangeMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.key, m1.key) -} - func (m *serverKeyExchangeMsg) marshal() []byte { if m.raw != nil { return m.raw @@ -966,17 +661,6 @@ type certificateStatusMsg struct { response []byte } -func (m *certificateStatusMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateStatusMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.statusType == m1.statusType && - bytes.Equal(m.response, m1.response) -} - func (m *certificateStatusMsg) marshal() []byte { if m.raw != nil { return m.raw @@ -1028,11 +712,6 @@ func (m *certificateStatusMsg) unmarshal(data []byte) bool { type serverHelloDoneMsg struct{} -func (m *serverHelloDoneMsg) equal(i interface{}) bool { - _, ok := i.(*serverHelloDoneMsg) - return ok -} - func (m *serverHelloDoneMsg) marshal() []byte { x := make([]byte, 4) x[0] = typeServerHelloDone @@ -1048,16 +727,6 @@ type clientKeyExchangeMsg struct { ciphertext []byte } -func (m *clientKeyExchangeMsg) equal(i interface{}) bool { - m1, ok := i.(*clientKeyExchangeMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.ciphertext, m1.ciphertext) -} - func (m *clientKeyExchangeMsg) marshal() []byte { if m.raw != nil { return m.raw @@ -1092,36 +761,27 @@ type finishedMsg struct { verifyData []byte } -func (m *finishedMsg) equal(i interface{}) bool { - m1, ok := i.(*finishedMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.verifyData, m1.verifyData) -} - -func (m *finishedMsg) marshal() (x []byte) { +func (m *finishedMsg) marshal() []byte { if m.raw != nil { return m.raw } - x = make([]byte, 4+len(m.verifyData)) - x[0] = typeFinished - x[3] = byte(len(m.verifyData)) - copy(x[4:], m.verifyData) - m.raw = x - return + var b cryptobyte.Builder + b.AddUint8(typeFinished) + b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.verifyData) + }) + + m.raw = b.BytesOrPanic() + return m.raw } func (m *finishedMsg) unmarshal(data []byte) bool { m.raw = data - if len(data) < 4 { - return false - } - m.verifyData = data[4:] - return true + s := cryptobyte.String(data) + return s.Skip(1) && + readUint24LengthPrefixed(&s, &m.verifyData) && + s.Empty() } type nextProtoMsg struct { @@ -1129,16 +789,6 @@ type nextProtoMsg struct { proto string } -func (m *nextProtoMsg) equal(i interface{}) bool { - m1, ok := i.(*nextProtoMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.proto == m1.proto -} - func (m *nextProtoMsg) marshal() []byte { if m.raw != nil { return m.raw @@ -1196,28 +846,15 @@ func (m *nextProtoMsg) unmarshal(data []byte) bool { type certificateRequestMsg struct { raw []byte - // hasSignatureAndHash indicates whether this message includes a list - // of signature and hash functions. This change was introduced with TLS - // 1.2. - hasSignatureAndHash bool + // hasSignatureAlgorithm indicates whether this message includes a list of + // supported signature algorithms. This change was introduced with TLS 1.2. + hasSignatureAlgorithm bool certificateTypes []byte supportedSignatureAlgorithms []SignatureScheme certificateAuthorities [][]byte } -func (m *certificateRequestMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateRequestMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.certificateTypes, m1.certificateTypes) && - eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities) && - eqSignatureAlgorithms(m.supportedSignatureAlgorithms, m1.supportedSignatureAlgorithms) -} - func (m *certificateRequestMsg) marshal() (x []byte) { if m.raw != nil { return m.raw @@ -1231,7 +868,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) { } length += casLength - if m.hasSignatureAndHash { + if m.hasSignatureAlgorithm { length += 2 + 2*len(m.supportedSignatureAlgorithms) } @@ -1246,7 +883,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) { copy(x[5:], m.certificateTypes) y := x[5+len(m.certificateTypes):] - if m.hasSignatureAndHash { + if m.hasSignatureAlgorithm { n := len(m.supportedSignatureAlgorithms) * 2 y[0] = uint8(n >> 8) y[1] = uint8(n) @@ -1298,7 +935,7 @@ func (m *certificateRequestMsg) unmarshal(data []byte) bool { data = data[numCertTypes:] - if m.hasSignatureAndHash { + if m.hasSignatureAlgorithm { if len(data) < 2 { return false } @@ -1350,22 +987,10 @@ func (m *certificateRequestMsg) unmarshal(data []byte) bool { } type certificateVerifyMsg struct { - raw []byte - hasSignatureAndHash bool - signatureAlgorithm SignatureScheme - signature []byte -} - -func (m *certificateVerifyMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateVerifyMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.hasSignatureAndHash == m1.hasSignatureAndHash && - m.signatureAlgorithm == m1.signatureAlgorithm && - bytes.Equal(m.signature, m1.signature) + raw []byte + hasSignatureAlgorithm bool // format change introduced in TLS 1.2 + signatureAlgorithm SignatureScheme + signature []byte } func (m *certificateVerifyMsg) marshal() (x []byte) { @@ -1373,62 +998,34 @@ func (m *certificateVerifyMsg) marshal() (x []byte) { return m.raw } - // See RFC 4346, Section 7.4.8. - siglength := len(m.signature) - length := 2 + siglength - if m.hasSignatureAndHash { - length += 2 - } - x = make([]byte, 4+length) - x[0] = typeCertificateVerify - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - y := x[4:] - if m.hasSignatureAndHash { - y[0] = uint8(m.signatureAlgorithm >> 8) - y[1] = uint8(m.signatureAlgorithm) - y = y[2:] - } - y[0] = uint8(siglength >> 8) - y[1] = uint8(siglength) - copy(y[2:], m.signature) - - m.raw = x + var b cryptobyte.Builder + b.AddUint8(typeCertificateVerify) + b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { + if m.hasSignatureAlgorithm { + b.AddUint16(uint16(m.signatureAlgorithm)) + } + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(m.signature) + }) + }) - return + m.raw = b.BytesOrPanic() + return m.raw } func (m *certificateVerifyMsg) unmarshal(data []byte) bool { m.raw = data + s := cryptobyte.String(data) - if len(data) < 6 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { + if !s.Skip(4) { // message type and uint24 length field return false } - - data = data[4:] - if m.hasSignatureAndHash { - m.signatureAlgorithm = SignatureScheme(data[0])<<8 | SignatureScheme(data[1]) - data = data[2:] - } - - if len(data) < 2 { - return false - } - siglength := int(data[0])<<8 + int(data[1]) - data = data[2:] - if len(data) != siglength { - return false + if m.hasSignatureAlgorithm { + if !s.ReadUint16((*uint16)(&m.signatureAlgorithm)) { + return false + } } - - m.signature = data - - return true + return readUint16LengthPrefixed(&s, &m.signature) && s.Empty() } type newSessionTicketMsg struct { @@ -1436,16 +1033,6 @@ type newSessionTicketMsg struct { ticket []byte } -func (m *newSessionTicketMsg) equal(i interface{}) bool { - m1, ok := i.(*newSessionTicketMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.ticket, m1.ticket) -} - func (m *newSessionTicketMsg) marshal() (x []byte) { if m.raw != nil { return m.raw @@ -1500,63 +1087,3 @@ func (*helloRequestMsg) marshal() []byte { func (*helloRequestMsg) unmarshal(data []byte) bool { return len(data) == 4 } - -func eqUint16s(x, y []uint16) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqCurveIDs(x, y []CurveID) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqStrings(x, y []string) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqByteSlices(x, y [][]byte) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if !bytes.Equal(v, y[i]) { - return false - } - } - return true -} - -func eqSignatureAlgorithms(x, y []SignatureScheme) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if v != y[i] { - return false - } - } - return true -} diff --git a/src/crypto/tls/handshake_messages_test.go b/src/crypto/tls/handshake_messages_test.go index e24089b4ba8e7..08622eac05acb 100644 --- a/src/crypto/tls/handshake_messages_test.go +++ b/src/crypto/tls/handshake_messages_test.go @@ -20,7 +20,9 @@ var tests = []interface{}{ &certificateMsg{}, &certificateRequestMsg{}, - &certificateVerifyMsg{}, + &certificateVerifyMsg{ + hasSignatureAlgorithm: true, + }, &certificateStatusMsg{}, &clientKeyExchangeMsg{}, &nextProtoMsg{}, @@ -28,12 +30,6 @@ var tests = []interface{}{ &sessionState{}, } -type testMessage interface { - marshal() []byte - unmarshal([]byte) bool - equal(interface{}) bool -} - func TestMarshalUnmarshal(t *testing.T) { rand := rand.New(rand.NewSource(0)) @@ -51,16 +47,16 @@ func TestMarshalUnmarshal(t *testing.T) { break } - m1 := v.Interface().(testMessage) + m1 := v.Interface().(handshakeMessage) marshaled := m1.marshal() - m2 := iface.(testMessage) + m2 := iface.(handshakeMessage) if !m2.unmarshal(marshaled) { t.Errorf("#%d failed to unmarshal %#v %x", i, m1, marshaled) break } m2.marshal() // to fill any marshal cache in the message - if !m1.equal(m2) { + if !reflect.DeepEqual(m1, m2) { t.Errorf("#%d got:%#v want:%#v %x", i, m2, m1, marshaled) break } @@ -85,7 +81,7 @@ func TestMarshalUnmarshal(t *testing.T) { func TestFuzz(t *testing.T) { rand := rand.New(rand.NewSource(0)) for _, iface := range tests { - m := iface.(testMessage) + m := iface.(handshakeMessage) for j := 0; j < 1000; j++ { len := rand.Intn(100) @@ -142,18 +138,23 @@ func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { m.ticketSupported = true if rand.Intn(10) > 5 { m.sessionTicket = randomBytes(rand.Intn(300), rand) + } else { + m.sessionTicket = make([]byte, 0) } } if rand.Intn(10) > 5 { m.supportedSignatureAlgorithms = supportedSignatureAlgorithms() } - m.alpnProtocols = make([]string, rand.Intn(5)) - for i := range m.alpnProtocols { - m.alpnProtocols[i] = randomString(rand.Intn(20)+1, rand) + for i := 0; i < rand.Intn(5); i++ { + m.alpnProtocols = append(m.alpnProtocols, randomString(rand.Intn(20)+1, rand)) } if rand.Intn(10) > 5 { m.scts = true } + if rand.Intn(10) > 5 { + m.secureRenegotiationSupported = true + m.secureRenegotiation = randomBytes(rand.Intn(50)+1, rand) + } return reflect.ValueOf(m) } @@ -168,11 +169,8 @@ func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { if rand.Intn(10) > 5 { m.nextProtoNeg = true - - n := rand.Intn(10) - m.nextProtos = make([]string, n) - for i := 0; i < n; i++ { - m.nextProtos[i] = randomString(20, rand) + for i := 0; i < rand.Intn(10); i++ { + m.nextProtos = append(m.nextProtos, randomString(20, rand)) } } @@ -184,12 +182,13 @@ func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { } m.alpnProtocol = randomString(rand.Intn(32)+1, rand) + for i := 0; i < rand.Intn(4); i++ { + m.scts = append(m.scts, randomBytes(rand.Intn(500)+1, rand)) + } + if rand.Intn(10) > 5 { - numSCTs := rand.Intn(4) - m.scts = make([][]byte, numSCTs) - for i := range m.scts { - m.scts[i] = randomBytes(rand.Intn(500)+1, rand) - } + m.secureRenegotiationSupported = true + m.secureRenegotiation = randomBytes(rand.Intn(50)+1, rand) } return reflect.ValueOf(m) @@ -208,16 +207,16 @@ func (*certificateMsg) Generate(rand *rand.Rand, size int) reflect.Value { func (*certificateRequestMsg) Generate(rand *rand.Rand, size int) reflect.Value { m := &certificateRequestMsg{} m.certificateTypes = randomBytes(rand.Intn(5)+1, rand) - numCAs := rand.Intn(100) - m.certificateAuthorities = make([][]byte, numCAs) - for i := 0; i < numCAs; i++ { - m.certificateAuthorities[i] = randomBytes(rand.Intn(15)+1, rand) + for i := 0; i < rand.Intn(100); i++ { + m.certificateAuthorities = append(m.certificateAuthorities, randomBytes(rand.Intn(15)+1, rand)) } return reflect.ValueOf(m) } func (*certificateVerifyMsg) Generate(rand *rand.Rand, size int) reflect.Value { m := &certificateVerifyMsg{} + m.hasSignatureAlgorithm = true + m.signatureAlgorithm = SignatureScheme(rand.Intn(30000)) m.signature = randomBytes(rand.Intn(15)+1, rand) return reflect.ValueOf(m) } diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go index edd48de1da80f..fc458f6b01710 100644 --- a/src/crypto/tls/handshake_server.go +++ b/src/crypto/tls/handshake_server.go @@ -418,7 +418,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { byte(certTypeECDSASign), } if c.vers >= VersionTLS12 { - certReq.hasSignatureAndHash = true + certReq.hasSignatureAlgorithm = true certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms() } diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go index 44c67ed063899..01de92d97108b 100644 --- a/src/crypto/tls/handshake_server_test.go +++ b/src/crypto/tls/handshake_server_test.go @@ -101,13 +101,17 @@ var badProtocolVersions = []uint16{0x0000, 0x0005, 0x0100, 0x0105, 0x0200, 0x020 func TestRejectBadProtocolVersion(t *testing.T) { for _, v := range badProtocolVersions { - testClientHelloFailure(t, testConfig, &clientHelloMsg{vers: v}, "unsupported, maximum protocol version") + testClientHelloFailure(t, testConfig, &clientHelloMsg{ + vers: v, + random: make([]byte, 32), + }, "unsupported, maximum protocol version") } } func TestNoSuiteOverlap(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{0xff00}, compressionMethods: []uint8{compressionNone}, } @@ -117,6 +121,7 @@ func TestNoSuiteOverlap(t *testing.T) { func TestNoCompressionOverlap(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, compressionMethods: []uint8{0xff}, } @@ -126,6 +131,7 @@ func TestNoCompressionOverlap(t *testing.T) { func TestNoRC4ByDefault(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, compressionMethods: []uint8{compressionNone}, } @@ -137,7 +143,11 @@ func TestNoRC4ByDefault(t *testing.T) { } func TestRejectSNIWithTrailingDot(t *testing.T) { - testClientHelloFailure(t, testConfig, &clientHelloMsg{vers: VersionTLS12, serverName: "foo.com."}, "unexpected message") + testClientHelloFailure(t, testConfig, &clientHelloMsg{ + vers: VersionTLS12, + random: make([]byte, 32), + serverName: "foo.com.", + }, "unexpected message") } func TestDontSelectECDSAWithRSAKey(t *testing.T) { @@ -145,6 +155,7 @@ func TestDontSelectECDSAWithRSAKey(t *testing.T) { // won't be selected if the server's private key doesn't support it. clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA}, compressionMethods: []uint8{compressionNone}, supportedCurves: []CurveID{CurveP256}, @@ -170,6 +181,7 @@ func TestDontSelectRSAWithECDSAKey(t *testing.T) { // won't be selected if the server's private key doesn't support it. clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA}, compressionMethods: []uint8{compressionNone}, supportedCurves: []CurveID{CurveP256}, @@ -242,11 +254,9 @@ func TestRenegotiationExtension(t *testing.T) { func TestTLS12OnlyCipherSuites(t *testing.T) { // Test that a Server doesn't select a TLS 1.2-only cipher suite when // the client negotiates TLS 1.1. - var zeros [32]byte - clientHello := &clientHelloMsg{ vers: VersionTLS11, - random: zeros[:], + random: make([]byte, 32), cipherSuites: []uint16{ // The Server, by default, will use the client's // preference order. So the GCM cipher suite @@ -615,10 +625,12 @@ func (test *serverTest) run(t *testing.T, write bool) { } for i, b := range flows { if i%2 == 0 { + clientConn.SetWriteDeadline(time.Now().Add(1 * time.Minute)) clientConn.Write(b) continue } bb := make([]byte, len(b)) + clientConn.SetReadDeadline(time.Now().Add(1 * time.Minute)) n, err := io.ReadFull(clientConn, bb) if err != nil { t.Fatalf("%s #%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", test.name, i+1, err, n, len(bb), bb[:n], b) @@ -876,6 +888,7 @@ func TestHandshakeServerSNIGetCertificateError(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, compressionMethods: []uint8{compressionNone}, serverName: "test", @@ -896,6 +909,7 @@ func TestHandshakeServerEmptyCertificates(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, compressionMethods: []uint8{compressionNone}, } @@ -907,6 +921,7 @@ func TestHandshakeServerEmptyCertificates(t *testing.T) { clientHello = &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, compressionMethods: []uint8{compressionNone}, } @@ -1210,6 +1225,7 @@ func TestSNIGivenOnFailure(t *testing.T) { clientHello := &clientHelloMsg{ vers: VersionTLS10, + random: make([]byte, 32), cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, compressionMethods: []uint8{compressionNone}, serverName: expectedServerName, @@ -1432,7 +1448,7 @@ func TestCloseServerConnectionOnIdleClient(t *testing.T) { clientConn.Write([]byte{'0'}) server.Close() }() - server.SetReadDeadline(time.Now().Add(time.Second)) + server.SetReadDeadline(time.Now().Add(time.Minute)) err := server.Handshake() if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { diff --git a/src/crypto/tls/ticket.go b/src/crypto/tls/ticket.go index 3e7aa93c82a7f..c1077e5ab2368 100644 --- a/src/crypto/tls/ticket.go +++ b/src/crypto/tls/ticket.go @@ -27,31 +27,6 @@ type sessionState struct { usedOldKey bool } -func (s *sessionState) equal(i interface{}) bool { - s1, ok := i.(*sessionState) - if !ok { - return false - } - - if s.vers != s1.vers || - s.cipherSuite != s1.cipherSuite || - !bytes.Equal(s.masterSecret, s1.masterSecret) { - return false - } - - if len(s.certificates) != len(s1.certificates) { - return false - } - - for i := range s.certificates { - if !bytes.Equal(s.certificates[i], s1.certificates[i]) { - return false - } - } - - return true -} - func (s *sessionState) marshal() []byte { length := 2 + 2 + 2 + len(s.masterSecret) + 2 for _, cert := range s.certificates { diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index 1ffe252ee3a40..31db7a47d6d7b 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -2605,6 +2605,15 @@ type Rows struct { lastcols []driver.Value } +// lasterrOrErrLocked returns either lasterr or the provided err. +// rs.closemu must be read-locked. +func (rs *Rows) lasterrOrErrLocked(err error) error { + if rs.lasterr != nil && rs.lasterr != io.EOF { + return rs.lasterr + } + return err +} + func (rs *Rows) initContextClose(ctx, txctx context.Context) { if ctx.Done() == nil && (txctx == nil || txctx.Done() == nil) { return @@ -2728,22 +2737,22 @@ func (rs *Rows) NextResultSet() bool { func (rs *Rows) Err() error { rs.closemu.RLock() defer rs.closemu.RUnlock() - if rs.lasterr == io.EOF { - return nil - } - return rs.lasterr + return rs.lasterrOrErrLocked(nil) } +var errRowsClosed = errors.New("sql: Rows are closed") +var errNoRows = errors.New("sql: no Rows available") + // Columns returns the column names. // Columns returns an error if the rows are closed. func (rs *Rows) Columns() ([]string, error) { rs.closemu.RLock() defer rs.closemu.RUnlock() if rs.closed { - return nil, errors.New("sql: Rows are closed") + return nil, rs.lasterrOrErrLocked(errRowsClosed) } if rs.rowsi == nil { - return nil, errors.New("sql: no Rows available") + return nil, rs.lasterrOrErrLocked(errNoRows) } rs.dc.Lock() defer rs.dc.Unlock() @@ -2757,10 +2766,10 @@ func (rs *Rows) ColumnTypes() ([]*ColumnType, error) { rs.closemu.RLock() defer rs.closemu.RUnlock() if rs.closed { - return nil, errors.New("sql: Rows are closed") + return nil, rs.lasterrOrErrLocked(errRowsClosed) } if rs.rowsi == nil { - return nil, errors.New("sql: no Rows available") + return nil, rs.lasterrOrErrLocked(errNoRows) } rs.dc.Lock() defer rs.dc.Unlock() @@ -2916,8 +2925,9 @@ func (rs *Rows) Scan(dest ...interface{}) error { return rs.lasterr } if rs.closed { + err := rs.lasterrOrErrLocked(errRowsClosed) rs.closemu.RUnlock() - return errors.New("sql: Rows are closed") + return err } rs.closemu.RUnlock() diff --git a/src/encoding/json/example_test.go b/src/encoding/json/example_test.go index 39b3231850a95..2031cba793892 100644 --- a/src/encoding/json/example_test.go +++ b/src/encoding/json/example_test.go @@ -292,3 +292,12 @@ func ExampleMarshalIndent() { // "b": 2 // } } + +func ExampleValid() { + goodJSON := `{"example": 1}` + badJSON := `{"example":2:]}}` + + fmt.Println(json.Valid([]byte(goodJSON)), json.Valid([]byte(badJSON))) + // Output: + // true false +} diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index c3ed0b4983c8f..b40bd46672874 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -166,6 +166,7 @@ var pkgDeps = map[string][]string{ // Other time dependencies: "internal/syscall/windows/registry", "syscall", + "syscall/js", }, "internal/poll": {"L0", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows"}, @@ -395,7 +396,7 @@ var pkgDeps = map[string][]string{ // SSL/TLS. "crypto/tls": { - "L4", "CRYPTO-MATH", "OS", + "L4", "CRYPTO-MATH", "OS", "golang_org/x/crypto/cryptobyte", "container/list", "crypto/x509", "encoding/pem", "net", "syscall", }, "crypto/x509": { diff --git a/src/go/types/expr.go b/src/go/types/expr.go index 35e9b36f31c63..0dc007069fadf 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -461,7 +461,11 @@ func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) { check.invalidOp(x.Pos(), "shifted operand %s (type %s) must be integer", x, typ) return } - } else if old.val != nil { + // Even if we have an integer, if the value is a constant we + // still must check that it is representable as the specific + // int type requested (was issue #22969). Fall through here. + } + if old.val != nil { // If x is a constant, it must be representable as a value of typ. c := operand{old.mode, x, old.typ, old.val, 0} check.convertUntyped(&c, typ) diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 229d2030995b6..84908fd190c4e 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -46,8 +46,11 @@ func TestStdlib(t *testing.T) { } } -// firstComment returns the contents of the first comment in -// the given file, assuming there's one within the first KB. +// firstComment returns the contents of the first non-empty comment in +// the given file, "skip", or the empty string. No matter the present +// comments, if any of them contains a build tag, the result is always +// "skip". Only comments before the "package" token and within the first +// 4K of the file are considered. func firstComment(filename string) string { f, err := os.Open(filename) if err != nil { @@ -55,11 +58,12 @@ func firstComment(filename string) string { } defer f.Close() - var src [1 << 10]byte // read at most 1KB + var src [4 << 10]byte // read at most 4KB n, _ := f.Read(src[:]) + var first string var s scanner.Scanner - s.Init(fset.AddFile("", fset.Base(), n), src[:n], nil, scanner.ScanComments) + s.Init(fset.AddFile("", fset.Base(), n), src[:n], nil /* ignore errors */, scanner.ScanComments) for { _, tok, lit := s.Scan() switch tok { @@ -68,9 +72,17 @@ func firstComment(filename string) string { if lit[1] == '*' { lit = lit[:len(lit)-2] } - return strings.TrimSpace(lit[2:]) - case token.EOF: - return "" + contents := strings.TrimSpace(lit[2:]) + if strings.HasPrefix(contents, "+build ") { + return "skip" + } + if first == "" { + first = contents // contents may be "" but that's ok + } + // continue as we may still see build tags + + case token.PACKAGE, token.EOF: + return first } } } @@ -142,15 +154,8 @@ func TestStdTest(t *testing.T) { t.Skip("skipping in short mode") } - // test/recover4.go is only built for Linux and Darwin. - // TODO(gri) Remove once tests consider +build tags (issue 10370). - if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { - return - } - testTestDir(t, filepath.Join(runtime.GOROOT(), "test"), "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore - "sigchld.go", // don't work on Windows; testTestDir should consult build tags ) } @@ -166,7 +171,6 @@ func TestStdFixed(t *testing.T) { "issue6889.go", // gc-specific test "issue7746.go", // large constants - consumes too much memory "issue11362.go", // canonical import path check - "issue15002.go", // uses Mmap; testTestDir should consult build tags "issue16369.go", // go/types handles this correctly - not an issue "issue18459.go", // go/types doesn't check validity of //go:xxx directives "issue18882.go", // go/types doesn't check validity of //go:xxx directives diff --git a/src/go/types/testdata/shifts.src b/src/go/types/testdata/shifts.src index ca288290d69ce..52e340ec65cb3 100644 --- a/src/go/types/testdata/shifts.src +++ b/src/go/types/testdata/shifts.src @@ -354,3 +354,15 @@ func issue21727() { var _ = string(1 << s) var _ = string(1.0 /* ERROR "cannot convert" */ << s) } + +func issue22969() { + var s uint + var a []byte + _ = a[0xffffffffffffffff /* ERROR "overflows int" */ <= v { diff --git a/src/math/bits/bits_test.go b/src/math/bits/bits_test.go index bd6b618f3557d..0bd52bee773b0 100644 --- a/src/math/bits/bits_test.go +++ b/src/math/bits/bits_test.go @@ -899,6 +899,21 @@ func BenchmarkAdd64(b *testing.B) { Output = int(z + c) } +func BenchmarkAdd64multiple(b *testing.B) { + var z0 = uint64(Input) + var z1 = uint64(Input) + var z2 = uint64(Input) + var z3 = uint64(Input) + for i := 0; i < b.N; i++ { + var c uint64 + z0, c = Add64(z0, uint64(i), c) + z1, c = Add64(z1, uint64(i), c) + z2, c = Add64(z2, uint64(i), c) + z3, _ = Add64(z3, uint64(i), c) + } + Output = int(z0 + z1 + z2 + z3) +} + func BenchmarkSub(b *testing.B) { var z, c uint for i := 0; i < b.N; i++ { @@ -918,11 +933,26 @@ func BenchmarkSub32(b *testing.B) { func BenchmarkSub64(b *testing.B) { var z, c uint64 for i := 0; i < b.N; i++ { - z, c = Add64(uint64(Input), uint64(i), c) + z, c = Sub64(uint64(Input), uint64(i), c) } Output = int(z + c) } +func BenchmarkSub64multiple(b *testing.B) { + var z0 = uint64(Input) + var z1 = uint64(Input) + var z2 = uint64(Input) + var z3 = uint64(Input) + for i := 0; i < b.N; i++ { + var c uint64 + z0, c = Sub64(z0, uint64(i), c) + z1, c = Sub64(z1, uint64(i), c) + z2, c = Sub64(z2, uint64(i), c) + z3, _ = Sub64(z3, uint64(i), c) + } + Output = int(z0 + z1 + z2 + z3) +} + func BenchmarkMul(b *testing.B) { var hi, lo uint for i := 0; i < b.N; i++ { diff --git a/src/net/cgo_stub.go b/src/net/cgo_stub.go index 51259722aec8e..041f8af12966a 100644 --- a/src/net/cgo_stub.go +++ b/src/net/cgo_stub.go @@ -24,7 +24,7 @@ func cgoLookupPort(ctx context.Context, network, service string) (port int, err return 0, nil, false } -func cgoLookupIP(ctx context.Context, name string) (addrs []IPAddr, err error, completed bool) { +func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error, completed bool) { return nil, nil, false } diff --git a/src/net/cgo_unix.go b/src/net/cgo_unix.go index 3db867a080e1f..b7cbcfe77a9d3 100644 --- a/src/net/cgo_unix.go +++ b/src/net/cgo_unix.go @@ -49,7 +49,7 @@ type reverseLookupResult struct { } func cgoLookupHost(ctx context.Context, name string) (hosts []string, err error, completed bool) { - addrs, err, completed := cgoLookupIP(ctx, name) + addrs, err, completed := cgoLookupIP(ctx, "ip", name) for _, addr := range addrs { hosts = append(hosts, addr.String()) } @@ -69,13 +69,11 @@ func cgoLookupPort(ctx context.Context, network, service string) (port int, err default: return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}, true } - if len(network) >= 4 { - switch network[3] { - case '4': - hints.ai_family = C.AF_INET - case '6': - hints.ai_family = C.AF_INET6 - } + switch ipVersion(network) { + case '4': + hints.ai_family = C.AF_INET + case '6': + hints.ai_family = C.AF_INET6 } if ctx.Done() == nil { port, err := cgoLookupServicePort(&hints, network, service) @@ -135,13 +133,20 @@ func cgoPortLookup(result chan<- portLookupResult, hints *C.struct_addrinfo, net result <- portLookupResult{port, err} } -func cgoLookupIPCNAME(name string) (addrs []IPAddr, cname string, err error) { +func cgoLookupIPCNAME(network, name string) (addrs []IPAddr, cname string, err error) { acquireThread() defer releaseThread() var hints C.struct_addrinfo hints.ai_flags = cgoAddrInfoFlags hints.ai_socktype = C.SOCK_STREAM + hints.ai_family = C.AF_UNSPEC + switch ipVersion(network) { + case '4': + hints.ai_family = C.AF_INET + case '6': + hints.ai_family = C.AF_INET6 + } h := make([]byte, len(name)+1) copy(h, name) @@ -197,18 +202,18 @@ func cgoLookupIPCNAME(name string) (addrs []IPAddr, cname string, err error) { return addrs, cname, nil } -func cgoIPLookup(result chan<- ipLookupResult, name string) { - addrs, cname, err := cgoLookupIPCNAME(name) +func cgoIPLookup(result chan<- ipLookupResult, network, name string) { + addrs, cname, err := cgoLookupIPCNAME(network, name) result <- ipLookupResult{addrs, cname, err} } -func cgoLookupIP(ctx context.Context, name string) (addrs []IPAddr, err error, completed bool) { +func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error, completed bool) { if ctx.Done() == nil { - addrs, _, err = cgoLookupIPCNAME(name) + addrs, _, err = cgoLookupIPCNAME(network, name) return addrs, err, true } result := make(chan ipLookupResult, 1) - go cgoIPLookup(result, name) + go cgoIPLookup(result, network, name) select { case r := <-result: return r.addrs, r.err, true @@ -219,11 +224,11 @@ func cgoLookupIP(ctx context.Context, name string) (addrs []IPAddr, err error, c func cgoLookupCNAME(ctx context.Context, name string) (cname string, err error, completed bool) { if ctx.Done() == nil { - _, cname, err = cgoLookupIPCNAME(name) + _, cname, err = cgoLookupIPCNAME("ip", name) return cname, err, true } result := make(chan ipLookupResult, 1) - go cgoIPLookup(result, name) + go cgoIPLookup(result, "ip", name) select { case r := <-result: return r.cname, r.err, true diff --git a/src/net/cgo_unix_test.go b/src/net/cgo_unix_test.go index b476a6d62686e..c3eab5b3b2a5e 100644 --- a/src/net/cgo_unix_test.go +++ b/src/net/cgo_unix_test.go @@ -15,7 +15,7 @@ import ( func TestCgoLookupIP(t *testing.T) { defer dnsWaitGroup.Wait() ctx := context.Background() - _, err, ok := cgoLookupIP(ctx, "localhost") + _, err, ok := cgoLookupIP(ctx, "ip", "localhost") if !ok { t.Errorf("cgoLookupIP must not be a placeholder") } @@ -28,7 +28,7 @@ func TestCgoLookupIPWithCancel(t *testing.T) { defer dnsWaitGroup.Wait() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, err, ok := cgoLookupIP(ctx, "localhost") + _, err, ok := cgoLookupIP(ctx, "ip", "localhost") if !ok { t.Errorf("cgoLookupIP must not be a placeholder") } diff --git a/src/net/dial_test.go b/src/net/dial_test.go index 3a45c0d2ec9a7..983338885dac5 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -346,7 +346,7 @@ func TestDialParallel(t *testing.T) { } } -func lookupSlowFast(ctx context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { +func lookupSlowFast(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { switch host { case "slow6loopback4": // Returns a slow IPv6 address, and a local IPv4 address. @@ -355,7 +355,7 @@ func lookupSlowFast(ctx context.Context, fn func(context.Context, string) ([]IPA {IP: ParseIP("127.0.0.1")}, }, nil default: - return fn(ctx, host) + return fn(ctx, network, host) } } diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go index 3b0293025d9fd..73630faa498c6 100644 --- a/src/net/dnsclient_unix.go +++ b/src/net/dnsclient_unix.go @@ -9,7 +9,6 @@ // TODO(rsc): // Could potentially handle many outstanding lookups faster. -// Could have a small cache. // Random UDP source port (net.Dial should do that for us). // Random request IDs. diff --git a/src/net/error_test.go b/src/net/error_test.go index e09670e5ce0e2..2819986c0cd7c 100644 --- a/src/net/error_test.go +++ b/src/net/error_test.go @@ -144,7 +144,7 @@ func TestDialError(t *testing.T) { origTestHookLookupIP := testHookLookupIP defer func() { testHookLookupIP = origTestHookLookupIP }() - testHookLookupIP = func(ctx context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { + testHookLookupIP = func(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { return nil, &DNSError{Err: "dial error test", Name: "name", Server: "server", IsTimeout: true} } sw.Set(socktest.FilterConnect, func(so *socktest.Status) (socktest.AfterFilter, error) { @@ -293,7 +293,7 @@ func TestListenError(t *testing.T) { origTestHookLookupIP := testHookLookupIP defer func() { testHookLookupIP = origTestHookLookupIP }() - testHookLookupIP = func(_ context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { + testHookLookupIP = func(_ context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { return nil, &DNSError{Err: "listen error test", Name: "name", Server: "server", IsTimeout: true} } sw.Set(socktest.FilterListen, func(so *socktest.Status) (socktest.AfterFilter, error) { @@ -353,7 +353,7 @@ func TestListenPacketError(t *testing.T) { origTestHookLookupIP := testHookLookupIP defer func() { testHookLookupIP = origTestHookLookupIP }() - testHookLookupIP = func(_ context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { + testHookLookupIP = func(_ context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { return nil, &DNSError{Err: "listen error test", Name: "name", Server: "server", IsTimeout: true} } diff --git a/src/net/hook.go b/src/net/hook.go index d7316ea4383f5..5a1156378b569 100644 --- a/src/net/hook.go +++ b/src/net/hook.go @@ -13,10 +13,11 @@ var ( testHookHostsPath = "/etc/hosts" testHookLookupIP = func( ctx context.Context, - fn func(context.Context, string) ([]IPAddr, error), + fn func(context.Context, string, string) ([]IPAddr, error), + network string, host string, ) ([]IPAddr, error) { - return fn(ctx, host) + return fn(ctx, network, host) } testHookSetKeepAlive = func() {} ) diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index a282c4bc1776d..6eb0088a96375 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -1556,6 +1556,32 @@ func TestServeTLS(t *testing.T) { } } +// Test that the HTTPS server nicely rejects plaintext HTTP/1.x requests. +func TestTLSServerRejectHTTPRequests(t *testing.T) { + setParallel(t) + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + t.Error("unexpected HTTPS request") + })) + var errBuf bytes.Buffer + ts.Config.ErrorLog = log.New(&errBuf, "", 0) + defer ts.Close() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + io.WriteString(conn, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n") + slurp, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + const wantPrefix = "HTTP/1.0 400 Bad Request\r\n" + if !strings.HasPrefix(string(slurp), wantPrefix) { + t.Errorf("response = %q; wanted prefix %q", slurp, wantPrefix) + } +} + // Issue 15908 func TestAutomaticHTTP2_Serve_NoTLSConfig(t *testing.T) { testAutomaticHTTP2_Serve(t, nil, true) diff --git a/src/net/http/server.go b/src/net/http/server.go index 4227343fbe4d2..6e1ccff4cd413 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -1782,6 +1782,14 @@ func (c *conn) serve(ctx context.Context) { c.rwc.SetWriteDeadline(time.Now().Add(d)) } if err := tlsConn.Handshake(); err != nil { + // If the handshake failed due to the client not speaking + // TLS, assume they're speaking plaintext HTTP and write a + // 400 response on the TLS conn's underlying net.Conn. + if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) { + io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n") + re.Conn.Close() + return + } c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) return } @@ -3390,3 +3398,13 @@ func strSliceContains(ss []string, s string) bool { } return false } + +// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header +// looks like it might've been a misdirected plaintext HTTP request. +func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool { + switch string(hdr[:]) { + case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO": + return true + } + return false +} diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 211f8cb467d5c..3f9750392c61d 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -3825,9 +3825,9 @@ func testTransportEventTrace(t *testing.T, h2 bool, noHooks bool) { } // Install a fake DNS server. - ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) { + ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, network, host string) ([]net.IPAddr, error) { if host != "dns-is-faked.golang" { - t.Errorf("unexpected DNS host lookup for %q", host) + t.Errorf("unexpected DNS host lookup for %q/%q", network, host) return nil, nil } return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil @@ -4176,7 +4176,7 @@ func TestTransportMaxIdleConns(t *testing.T) { if err != nil { t.Fatal(err) } - ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) { + ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, _, host string) ([]net.IPAddr, error) { return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil }) @@ -4416,9 +4416,9 @@ func testTransportIDNA(t *testing.T, h2 bool) { } // Install a fake DNS server. - ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) { + ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, network, host string) ([]net.IPAddr, error) { if host != punyDomain { - t.Errorf("got DNS host lookup for %q; want %q", host, punyDomain) + t.Errorf("got DNS host lookup for %q/%q; want %q", network, host, punyDomain) return nil, nil } return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil diff --git a/src/net/ipsock.go b/src/net/ipsock.go index 84fa0ac0a3270..7d0684d176400 100644 --- a/src/net/ipsock.go +++ b/src/net/ipsock.go @@ -277,7 +277,7 @@ func (r *Resolver) internetAddrList(ctx context.Context, net, addr string) (addr } // Try as a literal IP address, then as a DNS name. - ips, err := r.LookupIPAddr(ctx, host) + ips, err := r.lookupIPAddr(ctx, net, host) if err != nil { return nil, err } diff --git a/src/net/lookup.go b/src/net/lookup.go index e0f21fa9a8d3f..cb810dea267bd 100644 --- a/src/net/lookup.go +++ b/src/net/lookup.go @@ -97,6 +97,19 @@ func lookupPortMap(network, service string) (port int, error error) { return 0, &AddrError{Err: "unknown port", Addr: network + "/" + service} } +// ipVersion returns the provided network's IP version: '4', '6' or 0 +// if network does not end in a '4' or '6' byte. +func ipVersion(network string) byte { + if network == "" { + return 0 + } + n := network[len(network)-1] + if n != '4' && n != '6' { + n = 0 + } + return n +} + // DefaultResolver is the resolver used by the package-level Lookup // functions and by Dialers without a specified Resolver. var DefaultResolver = &Resolver{} @@ -189,6 +202,12 @@ func LookupIP(host string) ([]IP, error) { // LookupIPAddr looks up host using the local resolver. // It returns a slice of that host's IPv4 and IPv6 addresses. func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, error) { + return r.lookupIPAddr(ctx, "ip", host) +} + +// lookupIPAddr looks up host using the local resolver and particular network. +// It returns a slice of that host's IPv4 and IPv6 addresses. +func (r *Resolver) lookupIPAddr(ctx context.Context, network, host string) ([]IPAddr, error) { // Make sure that no matter what we do later, host=="" is rejected. // parseIP, for example, does accept empty strings. if host == "" { @@ -205,7 +224,7 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err // can be overridden by tests. This is needed by net/http, so it // uses a context key instead of unexported variables. resolverFunc := r.lookupIP - if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string) ([]IPAddr, error)); alt != nil { + if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string, string) ([]IPAddr, error)); alt != nil { resolverFunc = alt } @@ -218,7 +237,7 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err dnsWaitGroup.Add(1) ch, called := r.getLookupGroup().DoChan(host, func() (interface{}, error) { defer dnsWaitGroup.Done() - return testHookLookupIP(lookupGroupCtx, resolverFunc, host) + return testHookLookupIP(lookupGroupCtx, resolverFunc, network, host) }) if !called { dnsWaitGroup.Done() @@ -289,6 +308,13 @@ func LookupPort(network, service string) (port int, err error) { func (r *Resolver) LookupPort(ctx context.Context, network, service string) (port int, err error) { port, needsLookup := parsePort(service) if needsLookup { + switch network { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + case "": // a hint wildcard for Go 1.0 undocumented behavior + network = "ip" + default: + return 0, &AddrError{Err: "unknown network", Addr: network} + } port, err = r.lookupPort(ctx, network, service) if err != nil { return 0, err diff --git a/src/net/lookup_fake.go b/src/net/lookup_fake.go index d3d1dbc90032f..6c8a151bcac1c 100644 --- a/src/net/lookup_fake.go +++ b/src/net/lookup_fake.go @@ -19,7 +19,7 @@ func (*Resolver) lookupHost(ctx context.Context, host string) (addrs []string, e return nil, syscall.ENOPROTOOPT } -func (*Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) { +func (*Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) { return nil, syscall.ENOPROTOOPT } diff --git a/src/net/lookup_plan9.go b/src/net/lookup_plan9.go index d5ae9b2fd9d98..70805ddf4cd05 100644 --- a/src/net/lookup_plan9.go +++ b/src/net/lookup_plan9.go @@ -176,7 +176,7 @@ loop: return } -func (r *Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) { +func (r *Resolver) lookupIP(ctx context.Context, _, host string) (addrs []IPAddr, err error) { lits, err := r.lookupHost(ctx, host) if err != nil { return diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go index 5c66dfa2603ad..aeeda8f7d0eb7 100644 --- a/src/net/lookup_test.go +++ b/src/net/lookup_test.go @@ -20,7 +20,7 @@ import ( "time" ) -func lookupLocalhost(ctx context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { +func lookupLocalhost(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { switch host { case "localhost": return []IPAddr{ @@ -28,7 +28,7 @@ func lookupLocalhost(ctx context.Context, fn func(context.Context, string) ([]IP {IP: IPv6loopback}, }, nil default: - return fn(ctx, host) + return fn(ctx, network, host) } } @@ -1008,3 +1008,29 @@ func TestConcurrentPreferGoResolversDial(t *testing.T) { } } } + +var ipVersionTests = []struct { + network string + version byte +}{ + {"tcp", 0}, + {"tcp4", '4'}, + {"tcp6", '6'}, + {"udp", 0}, + {"udp4", '4'}, + {"udp6", '6'}, + {"ip", 0}, + {"ip4", '4'}, + {"ip6", '6'}, + {"ip7", 0}, + {"", 0}, +} + +func TestIPVersion(t *testing.T) { + for _, tt := range ipVersionTests { + if version := ipVersion(tt.network); version != tt.version { + t.Errorf("Family for: %s. Expected: %s, Got: %s", tt.network, + string(tt.version), string(version)) + } + } +} diff --git a/src/net/lookup_unix.go b/src/net/lookup_unix.go index e8e7a9bf5a291..bef9dcfe14634 100644 --- a/src/net/lookup_unix.go +++ b/src/net/lookup_unix.go @@ -87,13 +87,13 @@ func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string, return r.goLookupHostOrder(ctx, host, order) } -func (r *Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) { +func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) { if r.preferGo() { return r.goLookupIP(ctx, host) } order := systemConf().hostLookupOrder(r, host) if order == hostLookupCgo { - if addrs, err, ok := cgoLookupIP(ctx, host); ok { + if addrs, err, ok := cgoLookupIP(ctx, network, host); ok { return addrs, err } // cgo not available (or netgo); fall back to Go's DNS resolver diff --git a/src/net/lookup_windows.go b/src/net/lookup_windows.go index f76e0af400dcc..8a68d18a674b3 100644 --- a/src/net/lookup_windows.go +++ b/src/net/lookup_windows.go @@ -65,7 +65,7 @@ func lookupProtocol(ctx context.Context, name string) (int, error) { } func (r *Resolver) lookupHost(ctx context.Context, name string) ([]string, error) { - ips, err := r.lookupIP(ctx, name) + ips, err := r.lookupIP(ctx, "ip", name) if err != nil { return nil, err } @@ -76,14 +76,22 @@ func (r *Resolver) lookupHost(ctx context.Context, name string) ([]string, error return addrs, nil } -func (r *Resolver) lookupIP(ctx context.Context, name string) ([]IPAddr, error) { +func (r *Resolver) lookupIP(ctx context.Context, network, name string) ([]IPAddr, error) { // TODO(bradfitz,brainman): use ctx more. See TODO below. + var family int32 = syscall.AF_UNSPEC + switch ipVersion(network) { + case '4': + family = syscall.AF_INET + case '6': + family = syscall.AF_INET6 + } + getaddr := func() ([]IPAddr, error) { acquireThread() defer releaseThread() hints := syscall.AddrinfoW{ - Family: syscall.AF_UNSPEC, + Family: family, Socktype: syscall.SOCK_STREAM, Protocol: syscall.IPPROTO_IP, } diff --git a/src/net/netgo_unix_test.go b/src/net/netgo_unix_test.go index f2244ea69c40b..c672d3e8ebfbe 100644 --- a/src/net/netgo_unix_test.go +++ b/src/net/netgo_unix_test.go @@ -16,7 +16,7 @@ func TestGoLookupIP(t *testing.T) { defer dnsWaitGroup.Wait() host := "localhost" ctx := context.Background() - _, err, ok := cgoLookupIP(ctx, host) + _, err, ok := cgoLookupIP(ctx, "ip", host) if ok { t.Errorf("cgoLookupIP must be a placeholder") } diff --git a/src/os/exec_windows.go b/src/os/exec_windows.go index d5d553a2f6b33..86b52f69bf976 100644 --- a/src/os/exec_windows.go +++ b/src/os/exec_windows.go @@ -35,11 +35,6 @@ func (p *Process) wait() (ps *ProcessState, err error) { return nil, NewSyscallError("GetProcessTimes", e) } p.setDone() - // NOTE(brainman): It seems that sometimes process is not dead - // when WaitForSingleObject returns. But we do not know any - // other way to wait for it. Sleeping for a while seems to do - // the trick sometimes. So we will sleep and smell the roses. - defer time.Sleep(5 * time.Millisecond) defer p.Release() return &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil } diff --git a/src/os/executable_test.go b/src/os/executable_test.go index 4a9a8837be415..d513c8760ee6d 100644 --- a/src/os/executable_test.go +++ b/src/os/executable_test.go @@ -36,8 +36,8 @@ func TestExecutable(t *testing.T) { // forge argv[0] for child, so that we can verify we could correctly // get real path of the executable without influenced by argv[0]. cmd.Args = []string{"-", "-test.run=XXXX"} - if runtime.GOOS == "openbsd" { - // OpenBSD relies on argv[0] + if runtime.GOOS == "openbsd" || runtime.GOOS == "aix" { + // OpenBSD and AIX rely on argv[0] cmd.Args[0] = fn } cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", executable_EnvVar)) diff --git a/src/os/file_posix.go b/src/os/file_posix.go index 544d0ad55d4d9..1c0de5c3a163e 100644 --- a/src/os/file_posix.go +++ b/src/os/file_posix.go @@ -7,6 +7,7 @@ package os import ( + "runtime" "syscall" "time" ) @@ -19,6 +20,10 @@ func Readlink(name string) (string, error) { for len := 128; ; len *= 2 { b := make([]byte, len) n, e := fixCount(syscall.Readlink(fixLongPath(name), b)) + // buffer too small + if runtime.GOOS == "aix" && e == syscall.ERANGE { + continue + } if e != nil { return "", &PathError{"readlink", name, e} } diff --git a/src/os/os_test.go b/src/os/os_test.go index 876058e73ad9a..d838272215594 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -178,7 +178,6 @@ func TestStatError(t *testing.T) { defer chtmpdir(t)() path := "no-such-file" - Remove(path) // Just in case fi, err := Stat(path) if err == nil { @@ -194,12 +193,10 @@ func TestStatError(t *testing.T) { testenv.MustHaveSymlink(t) link := "symlink" - Remove(link) // Just in case err = Symlink(path, link) if err != nil { t.Fatal(err) } - defer Remove(link) fi, err = Stat(link) if err == nil { @@ -688,12 +685,10 @@ func TestHardLink(t *testing.T) { defer chtmpdir(t)() from, to := "hardlinktestfrom", "hardlinktestto" - Remove(from) // Just in case. file, err := Create(to) if err != nil { t.Fatalf("open %q failed: %v", to, err) } - defer Remove(to) if err = file.Close(); err != nil { t.Errorf("close %q failed: %v", to, err) } @@ -709,7 +704,6 @@ func TestHardLink(t *testing.T) { t.Errorf("link %q, %q failed to return a valid error", none, none) } - defer Remove(from) tostat, err := Stat(to) if err != nil { t.Fatalf("stat %q failed: %v", to, err) @@ -745,11 +739,8 @@ func TestHardLink(t *testing.T) { } // chtmpdir changes the working directory to a new temporary directory and -// provides a cleanup function. Used when PWD is read-only. +// provides a cleanup function. func chtmpdir(t *testing.T) func() { - if runtime.GOOS != "darwin" || (runtime.GOARCH != "arm" && runtime.GOARCH != "arm64") { - return func() {} // only needed on darwin/arm{,64} - } oldwd, err := Getwd() if err != nil { t.Fatalf("chtmpdir: %v", err) @@ -774,12 +765,10 @@ func TestSymlink(t *testing.T) { defer chtmpdir(t)() from, to := "symlinktestfrom", "symlinktestto" - Remove(from) // Just in case. file, err := Create(to) if err != nil { t.Fatalf("Create(%q) failed: %v", to, err) } - defer Remove(to) if err = file.Close(); err != nil { t.Errorf("Close(%q) failed: %v", to, err) } @@ -787,7 +776,6 @@ func TestSymlink(t *testing.T) { if err != nil { t.Fatalf("Symlink(%q, %q) failed: %v", to, from, err) } - defer Remove(from) tostat, err := Lstat(to) if err != nil { t.Fatalf("Lstat(%q) failed: %v", to, err) @@ -841,12 +829,10 @@ func TestLongSymlink(t *testing.T) { // Long, but not too long: a common limit is 255. s = s + s + s + s + s + s + s + s + s + s + s + s + s + s + s from := "longsymlinktestfrom" - Remove(from) // Just in case. err := Symlink(s, from) if err != nil { t.Fatalf("symlink %q, %q failed: %v", s, from, err) } - defer Remove(from) r, err := Readlink(from) if err != nil { t.Fatalf("readlink %q failed: %v", from, err) @@ -859,9 +845,6 @@ func TestLongSymlink(t *testing.T) { func TestRename(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - // Ensure we are not testing the overwrite case here. - Remove(from) - Remove(to) file, err := Create(from) if err != nil { @@ -874,7 +857,6 @@ func TestRename(t *testing.T) { if err != nil { t.Fatalf("rename %q, %q failed: %v", to, from, err) } - defer Remove(to) _, err = Stat(to) if err != nil { t.Errorf("stat %q failed: %v", to, err) @@ -884,9 +866,6 @@ func TestRename(t *testing.T) { func TestRenameOverwriteDest(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - // Just in case. - Remove(from) - Remove(to) toData := []byte("to") fromData := []byte("from") @@ -904,7 +883,6 @@ func TestRenameOverwriteDest(t *testing.T) { if err != nil { t.Fatalf("rename %q, %q failed: %v", to, from, err) } - defer Remove(to) _, err = Stat(from) if err == nil { @@ -925,9 +903,6 @@ func TestRenameOverwriteDest(t *testing.T) { func TestRenameFailed(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - // Ensure we are not testing the overwrite case here. - Remove(from) - Remove(to) err := Rename(from, to) switch err := err.(type) { @@ -943,9 +918,6 @@ func TestRenameFailed(t *testing.T) { } case nil: t.Errorf("rename %q, %q: expected error, got nil", from, to) - - // cleanup whatever was placed in "renameto" - Remove(to) default: t.Errorf("rename %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) } @@ -956,7 +928,6 @@ func TestRenameNotExisting(t *testing.T) { from, to := "doesnt-exist", "dest" Mkdir(to, 0777) - defer Remove(to) if err := Rename(from, to); !IsNotExist(err) { t.Errorf("Rename(%q, %q) = %v; want an IsNotExist error", from, to, err) @@ -967,12 +938,8 @@ func TestRenameToDirFailed(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - Remove(from) - Remove(to) Mkdir(from, 0777) Mkdir(to, 0777) - defer Remove(from) - defer Remove(to) err := Rename(from, to) switch err := err.(type) { @@ -988,9 +955,6 @@ func TestRenameToDirFailed(t *testing.T) { } case nil: t.Errorf("rename %q, %q: expected error, got nil", from, to) - - // cleanup whatever was placed in "renameto" - Remove(to) default: t.Errorf("rename %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) } @@ -1493,7 +1457,11 @@ func runBinHostname(t *testing.T) string { } defer r.Close() const path = "/bin/hostname" - p, err := StartProcess(path, []string{"hostname"}, &ProcAttr{Files: []*File{nil, w, Stderr}}) + argv := []string{"hostname"} + if runtime.GOOS == "aix" { + argv = []string{"hostname", "-s"} + } + p, err := StartProcess(path, argv, &ProcAttr{Files: []*File{nil, w, Stderr}}) if err != nil { if _, err := Stat(path); IsNotExist(err) { t.Skipf("skipping test; test requires %s but it does not exist", path) @@ -1698,7 +1666,6 @@ func writeFile(t *testing.T, fname string, flag int, text string) string { func TestAppend(t *testing.T) { defer chtmpdir(t)() const f = "append.txt" - defer Remove(f) s := writeFile(t, f, O_CREATE|O_TRUNC|O_RDWR, "new") if s != "new" { t.Fatalf("writeFile: have %q want %q", s, "new") @@ -1765,13 +1732,11 @@ func TestSameFile(t *testing.T) { if err != nil { t.Fatalf("Create(a): %v", err) } - defer Remove(fa.Name()) fa.Close() fb, err := Create("b") if err != nil { t.Fatalf("Create(b): %v", err) } - defer Remove(fb.Name()) fb.Close() ia1, err := Stat("a") diff --git a/src/os/sticky_bsd.go b/src/os/sticky_bsd.go index 6b54c758c70a1..ae2744f81756b 100644 --- a/src/os/sticky_bsd.go +++ b/src/os/sticky_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd solaris +// +build aix darwin dragonfly freebsd netbsd openbsd solaris package os diff --git a/src/os/sticky_notbsd.go b/src/os/sticky_notbsd.go index 834e79b0b5901..edb5f69bf0580 100644 --- a/src/os/sticky_notbsd.go +++ b/src/os/sticky_notbsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !aix // +build !darwin // +build !dragonfly // +build !freebsd diff --git a/src/path/filepath/example_unix_test.go b/src/path/filepath/example_unix_test.go index cd8233ceb6a60..20ec8927b4692 100644 --- a/src/path/filepath/example_unix_test.go +++ b/src/path/filepath/example_unix_test.go @@ -79,3 +79,18 @@ func ExampleJoin() { // a/b/c // a/b/c } + +func ExampleMatch() { + fmt.Println("On Unix:") + fmt.Println(filepath.Match("/home/catch/*", "/home/catch/foo")) + fmt.Println(filepath.Match("/home/catch/*", "/home/catch/foo/bar")) + fmt.Println(filepath.Match("/home/?opher", "/home/gopher")) + fmt.Println(filepath.Match("/home/\\*", "/home/*")) + + // Output: + // On Unix: + // true + // false + // true + // true +} diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 87076817f977f..21ed2bfcfdb34 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -519,9 +519,6 @@ again: // the BL deferreturn and jmpdefer rewinds to that. TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 MOVD 0(R1), R31 -#ifdef GOOS_aix - MOVD 16(R31), R31 // caller LR is on the previous stack frame on AIX -#endif SUB $8, R31 MOVD R31, LR diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 844e023715741..365e516ec8a30 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -483,7 +483,8 @@ func cpuinit() { const prefix = "GODEBUGCPU=" var env string - if GOOS == "linux" || GOOS == "darwin" { + switch GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "solaris", "linux": cpu.DebugOptions = true // Similar to goenv_unix but extracts the environment value for diff --git a/src/syscall/exec_libc.go b/src/syscall/exec_libc.go index d6d34c04c36dd..0133139000b69 100644 --- a/src/syscall/exec_libc.go +++ b/src/syscall/exec_libc.go @@ -124,14 +124,14 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr } if sys.Foreground { - pgrp := sys.Pgid + pgrp := _Pid_t(sys.Pgid) if pgrp == 0 { r1, err1 = getpid() if err1 != 0 { goto childerror } - pgrp = int(r1) + pgrp = _Pid_t(r1) } // Place process group in foreground. diff --git a/src/syscall/export_freebsd_test.go b/src/syscall/export_freebsd_test.go new file mode 100644 index 0000000000000..d47f09024f763 --- /dev/null +++ b/src/syscall/export_freebsd_test.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +type Dirent_freebsd11 = dirent_freebsd11 + +var ( + Roundup = roundup + ConvertFromDirents11 = convertFromDirents11 +) diff --git a/src/syscall/js/callback.go b/src/syscall/js/callback.go index 9d573074cbd39..2801e00b68399 100644 --- a/src/syscall/js/callback.go +++ b/src/syscall/js/callback.go @@ -20,6 +20,8 @@ var ( nextCallbackID uint32 = 1 ) +var _ Wrapper = Callback{} // Callback must implement Wrapper + // Callback is a Go function that got wrapped for use as a JavaScript callback. type Callback struct { Value // the JavaScript function that queues the callback for execution diff --git a/src/syscall/js/js.go b/src/syscall/js/js.go index 9d826c38864f4..19cdedc309ad6 100644 --- a/src/syscall/js/js.go +++ b/src/syscall/js/js.go @@ -26,11 +26,22 @@ type ref uint64 // nanHead are the upper 32 bits of a ref which are set if the value is not encoded as an IEEE 754 number (see above). const nanHead = 0x7FF80000 +// Wrapper is implemented by types that are backed by a JavaScript value. +type Wrapper interface { + // JSValue returns a JavaScript value associated with an object. + JSValue() Value +} + // Value represents a JavaScript value. The zero value is the JavaScript value "undefined". type Value struct { ref ref } +// JSValue implements Wrapper interface. +func (v Value) JSValue() Value { + return v +} + func makeValue(v ref) Value { return Value{ref: v} } @@ -105,12 +116,10 @@ func Global() Value { // | map[string]interface{} | new object | func ValueOf(x interface{}) Value { switch x := x.(type) { - case Value: + case Value: // should precede Wrapper to avoid a loop return x - case TypedArray: - return x.Value - case Callback: - return x.Value + case Wrapper: + return x.JSValue() case nil: return valueNull case bool: @@ -361,6 +370,26 @@ func (v Value) Bool() bool { } } +// Truthy returns the JavaScript "truthiness" of the value v. In JavaScript, +// false, 0, "", null, undefined, and NaN are "falsy", and everything else is +// "truthy". See https://developer.mozilla.org/en-US/docs/Glossary/Truthy. +func (v Value) Truthy() bool { + switch v.Type() { + case TypeUndefined, TypeNull: + return false + case TypeBoolean: + return v.Bool() + case TypeNumber: + return v.ref != valueNaN.ref && v.ref != valueZero.ref + case TypeString: + return v.String() != "" + case TypeSymbol, TypeFunction, TypeObject: + return true + default: + panic("bad type") + } +} + // String returns the value v converted to string according to JavaScript type conversions. func (v Value) String() string { str, length := valuePrepareString(v.ref) diff --git a/src/syscall/js/js_test.go b/src/syscall/js/js_test.go index ed39fe3714187..73d112a2e8f51 100644 --- a/src/syscall/js/js_test.go +++ b/src/syscall/js/js_test.go @@ -4,6 +4,15 @@ // +build js,wasm +// To run these tests: +// +// - Install Node +// - Add /path/to/go/misc/wasm to your $PATH (so that "go test" can find +// "go_js_wasm_exec"). +// - GOOS=js GOARCH=wasm go test +// +// See -exec in "go help test", and "go help run" for details. + package js_test import ( @@ -19,11 +28,19 @@ var dummys = js.Global().Call("eval", `({ someInt: 42, someFloat: 42.123, someArray: [41, 42, 43], + someDate: new Date(), add: function(a, b) { return a + b; }, zero: 0, + stringZero: "0", NaN: NaN, + emptyObj: {}, + emptyArray: [], + Infinity: Infinity, + NegInfinity: -Infinity, + objNumber0: new Number(0), + objBooleanFalse: new Boolean(false), })`) func TestBool(t *testing.T) { @@ -331,3 +348,40 @@ func ExampleNewCallback() { }) js.Global().Get("document").Call("getElementById", "myButton").Call("addEventListener", "click", cb) } + +// See +// - https://developer.mozilla.org/en-US/docs/Glossary/Truthy +// - https://stackoverflow.com/questions/19839952/all-falsey-values-in-javascript/19839953#19839953 +// - http://www.ecma-international.org/ecma-262/5.1/#sec-9.2 +func TestTruthy(t *testing.T) { + want := true + for _, key := range []string{ + "someBool", "someString", "someInt", "someFloat", "someArray", "someDate", + "stringZero", // "0" is truthy + "add", // functions are truthy + "emptyObj", "emptyArray", "Infinity", "NegInfinity", + // All objects are truthy, even if they're Number(0) or Boolean(false). + "objNumber0", "objBooleanFalse", + } { + if got := dummys.Get(key).Truthy(); got != want { + t.Errorf("%s: got %#v, want %#v", key, got, want) + } + } + + want = false + if got := dummys.Get("zero").Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := dummys.Get("NaN").Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := js.ValueOf("").Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := js.Null().Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := js.Undefined().Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } +} diff --git a/src/syscall/js/typedarray.go b/src/syscall/js/typedarray.go index afa15488ec7a4..aa56cf69f3c5e 100644 --- a/src/syscall/js/typedarray.go +++ b/src/syscall/js/typedarray.go @@ -22,6 +22,8 @@ var ( float64Array = Global().Get("Float64Array") ) +var _ Wrapper = TypedArray{} // TypedArray must implement Wrapper + // TypedArray represents a JavaScript typed array. type TypedArray struct { Value diff --git a/src/syscall/syscall_aix.go b/src/syscall/syscall_aix.go index bddc590c3406f..6512761c3381a 100644 --- a/src/syscall/syscall_aix.go +++ b/src/syscall/syscall_aix.go @@ -184,15 +184,15 @@ func ReadDirent(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } -//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) +//sys wait4(pid _Pid_t, status *_C_int, options int, rusage *Rusage) (wpid _Pid_t, err error) func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int - var r Pid_t + var r _Pid_t err = ERESTART // AIX wait4 may return with ERESTART errno, while the processus is still // active. for err == ERESTART { - r, err = wait4(Pid_t(pid), &status, options, rusage) + r, err = wait4(_Pid_t(pid), &status, options, rusage) } wpid = int(r) if wstatus != nil { diff --git a/src/syscall/syscall_freebsd.go b/src/syscall/syscall_freebsd.go index 9ae024131dbe6..d6f75098c0981 100644 --- a/src/syscall/syscall_freebsd.go +++ b/src/syscall/syscall_freebsd.go @@ -223,31 +223,31 @@ func Fstat(fd int, st *Stat_t) (err error) { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { +func Statfs(path string, st *Statfs_t) (err error) { var oldStatfs statfs_freebsd11_t if supportsABI(_ino64First) { - return statfs_freebsd12(path, stat) + return statfs_freebsd12(path, st) } err = statfs(path, &oldStatfs) if err != nil { return err } - stat.convertFrom(&oldStatfs) + st.convertFrom(&oldStatfs) return nil } -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func Fstatfs(fd int, st *Statfs_t) (err error) { var oldStatfs statfs_freebsd11_t if supportsABI(_ino64First) { - return fstatfs_freebsd12(fd, stat) + return fstatfs_freebsd12(fd, st) } err = fstatfs(fd, &oldStatfs) if err != nil { return err } - stat.convertFrom(&oldStatfs) + st.convertFrom(&oldStatfs) return nil } @@ -262,7 +262,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { oldBuf := make([]byte, oldBufLen) n, err = getdirentries(fd, oldBuf, basep) if err == nil && n > 0 { - n = convertFromDirents11(oldBuf[:n], buf) + n = convertFromDirents11(buf, oldBuf[:n]) } return } @@ -344,17 +344,20 @@ func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { copy(s.Mntonname[:], old.Mntonname[:n]) } -func convertFromDirents11(old []byte, buf []byte) int { - oldFixedSize := int(unsafe.Offsetof((*dirent_freebsd11)(nil).Name)) - fixedSize := int(unsafe.Offsetof((*Dirent)(nil).Name)) - srcPos := 0 +func convertFromDirents11(buf []byte, old []byte) int { + const ( + fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) + oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) + ) + dstPos := 0 + srcPos := 0 for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos])) dstDirent := (*Dirent)(unsafe.Pointer(&buf[dstPos])) + srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos])) reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) - if dstPos+reclen >= len(buf) { + if dstPos+reclen > len(buf) { break } diff --git a/src/syscall/syscall_freebsd_test.go b/src/syscall/syscall_freebsd_test.go new file mode 100644 index 0000000000000..3ccfe5d463f0e --- /dev/null +++ b/src/syscall/syscall_freebsd_test.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd + +package syscall_test + +import ( + "fmt" + "syscall" + "testing" + "unsafe" +) + +func TestConvertFromDirent11(t *testing.T) { + const ( + filenameFmt = "%04d" + numFiles = 64 + fixedHdrSize = int(unsafe.Offsetof(syscall.Dirent_freebsd11{}.Name)) + ) + + namlen := len(fmt.Sprintf(filenameFmt, 0)) + reclen := syscall.Roundup(fixedHdrSize+namlen+1, 4) + old := make([]byte, numFiles*reclen) + for i := 0; i < numFiles; i++ { + dent := syscall.Dirent_freebsd11{ + Fileno: uint32(i + 1), + Reclen: uint16(reclen), + Type: syscall.DT_REG, + Namlen: uint8(namlen), + } + rec := make([]byte, reclen) + copy(rec, (*[fixedHdrSize]byte)(unsafe.Pointer(&dent))[:]) + copy(rec[fixedHdrSize:], fmt.Sprintf(filenameFmt, i+1)) + copy(old[i*reclen:], rec) + } + + buf := make([]byte, 2*len(old)) + n := syscall.ConvertFromDirents11(buf, old) + + names := make([]string, 0, numFiles) + _, _, names = syscall.ParseDirent(buf[:n], -1, names) + + if len(names) != numFiles { + t.Errorf("expected %d files, have %d; names: %q", numFiles, len(names), names) + } + + for i, name := range names { + if expected := fmt.Sprintf(filenameFmt, i+1); name != expected { + t.Errorf("expected names[%d] to be %q; got %q", i, expected, name) + } + } +} diff --git a/src/syscall/types_aix.go b/src/syscall/types_aix.go index f9f05af6673bb..b961bdb197eaa 100644 --- a/src/syscall/types_aix.go +++ b/src/syscall/types_aix.go @@ -86,7 +86,7 @@ type Rusage C.struct_rusage type Rlimit C.struct_rlimit -type Pid_t C.pid_t +type _Pid_t C.pid_t type _Gid_t C.gid_t diff --git a/src/syscall/types_solaris.go b/src/syscall/types_solaris.go index a9e6d6bdd63b7..76a74508d2b4e 100644 --- a/src/syscall/types_solaris.go +++ b/src/syscall/types_solaris.go @@ -101,6 +101,8 @@ type Rusage C.struct_rusage type Rlimit C.struct_rlimit +type _Pid_t C.pid_t + type _Gid_t C.gid_t // Files diff --git a/src/syscall/zsyscall_aix_ppc64.go b/src/syscall/zsyscall_aix_ppc64.go index 3ea11f8af3f17..fe27dcadf2802 100644 --- a/src/syscall/zsyscall_aix_ppc64.go +++ b/src/syscall/zsyscall_aix_ppc64.go @@ -420,9 +420,9 @@ func getdirent(fd int, buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) { +func wait4(pid _Pid_t, status *_C_int, options int, rusage *Rusage) (wpid _Pid_t, err error) { r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_wait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(status)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = Pid_t(r0) + wpid = _Pid_t(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/src/syscall/ztypes_aix_ppc64.go b/src/syscall/ztypes_aix_ppc64.go index 4fbbe23134663..314266ea79776 100644 --- a/src/syscall/ztypes_aix_ppc64.go +++ b/src/syscall/ztypes_aix_ppc64.go @@ -64,7 +64,7 @@ type Rlimit struct { Max uint64 } -type Pid_t int32 +type _Pid_t int32 type _Gid_t uint32 diff --git a/src/syscall/ztypes_solaris_amd64.go b/src/syscall/ztypes_solaris_amd64.go index b892cd6612ff0..f846666fff2cb 100644 --- a/src/syscall/ztypes_solaris_amd64.go +++ b/src/syscall/ztypes_solaris_amd64.go @@ -60,6 +60,8 @@ type Rlimit struct { Max uint64 } +type _Pid_t int32 + type _Gid_t uint32 const ( diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go index efdad3297c686..7c35b0ff3d891 100644 --- a/src/text/template/parse/parse.go +++ b/src/text/template/parse/parse.go @@ -380,46 +380,44 @@ func (t *Tree) action() (n Node) { // Pipeline: // declarations? command ('|' command)* func (t *Tree) pipeline(context string) (pipe *PipeNode) { - decl := false - var vars []*VariableNode token := t.peekNonSpace() - pos := token.pos + pipe = t.newPipeline(token.pos, token.line, nil) // Are there declarations or assignments? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - next := t.peekNonSpace() - switch { - case next.typ == itemAssign, next.typ == itemDeclare, - next.typ == itemChar && next.val == ",": - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - vars = append(vars, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemDeclare { - decl = true - } - if next.typ == itemChar && next.val == "," { - if context == "range" && len(vars) < 2 { - continue - } - t.errorf("too many declarations in %s", context) +decls: + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + next := t.peekNonSpace() + switch { + case next.typ == itemAssign, next.typ == itemDeclare: + pipe.IsAssign = next.typ == itemAssign + t.nextNonSpace() + pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val)) + t.vars = append(t.vars, v.val) + case next.typ == itemChar && next.val == ",": + t.nextNonSpace() + pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val)) + t.vars = append(t.vars, v.val) + if context == "range" && len(pipe.Decl) < 2 { + switch t.peekNonSpace().typ { + case itemVariable, itemRightDelim, itemRightParen: + // second initialized variable in a range pipeline + goto decls + default: + t.errorf("range can only initialize variables") } - case tokenAfterVariable.typ == itemSpace: - t.backup3(v, tokenAfterVariable) - default: - t.backup2(v) } + t.errorf("too many declarations in %s", context) + case tokenAfterVariable.typ == itemSpace: + t.backup3(v, tokenAfterVariable) + default: + t.backup2(v) } - break } - pipe = t.newPipeline(pos, token.line, vars) - pipe.IsAssign = !decl for { switch token := t.nextNonSpace(); token.typ { case itemRightDelim, itemRightParen: diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go index d03987581c958..15cc65670adbb 100644 --- a/src/text/template/parse/parse_test.go +++ b/src/text/template/parse/parse_test.go @@ -450,18 +450,37 @@ var errorTests = []parseTest{ {"multilinerawstring", "{{ $v := `\n` }} {{", hasError, `multilinerawstring:2: unexpected unclosed action`}, + {"rangeundefvar", + "{{range $k}}{{end}}", + hasError, `undefined variable`}, + {"rangeundefvars", + "{{range $k, $v}}{{end}}", + hasError, `undefined variable`}, + {"rangemissingvalue1", + "{{range $k,}}{{end}}", + hasError, `missing value for range`}, + {"rangemissingvalue2", + "{{range $k, $v := }}{{end}}", + hasError, `missing value for range`}, + {"rangenotvariable1", + "{{range $k, .}}{{end}}", + hasError, `range can only initialize variables`}, + {"rangenotvariable2", + "{{range $k, 123 := .}}{{end}}", + hasError, `range can only initialize variables`}, } func TestErrors(t *testing.T) { for _, test := range errorTests { - _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree)) - if err == nil { - t.Errorf("%q: expected error", test.name) - continue - } - if !strings.Contains(err.Error(), test.result) { - t.Errorf("%q: error %q does not contain %q", test.name, err, test.result) - } + t.Run(test.name, func(t *testing.T) { + _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree)) + if err == nil { + t.Fatalf("expected error %q, got nil", test.result) + } + if !strings.Contains(err.Error(), test.result) { + t.Fatalf("error %q does not contain %q", err, test.result) + } + }) } } diff --git a/src/time/zoneinfo_js.go b/src/time/zoneinfo_js.go new file mode 100644 index 0000000000000..d640c831bfe82 --- /dev/null +++ b/src/time/zoneinfo_js.go @@ -0,0 +1,66 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js,wasm + +package time + +import ( + "runtime" + "syscall/js" +) + +var zoneSources = []string{ + "/usr/share/zoneinfo/", + "/usr/share/lib/zoneinfo/", + "/usr/lib/locale/TZ/", + runtime.GOROOT() + "/lib/time/zoneinfo.zip", +} + +func initLocal() { + localLoc.name = "Local" + + z := zone{} + d := js.Global().Get("Date").New() + offset := d.Call("getTimezoneOffset").Int() * -1 + z.offset = offset * 60 + // According to https://tc39.github.io/ecma262/#sec-timezoneestring, + // the timezone name from (new Date()).toTimeString() is an implementation-dependent + // result, and in Google Chrome, it gives the fully expanded name rather than + // the abbreviation. + // Hence, we construct the name from the offset. + z.name = "UTC" + if offset < 0 { + z.name += "-" + } else { + z.name += "+" + } + z.name += itoa(offset / 60) + min := offset % 60 + if min != 0 { + z.name += ":" + itoa(min) + } + localLoc.zone = []zone{z} +} + +// itoa is just the fast path implementation copied from strconv.Itoa. +// No timezone hour can exceed 100, so the fast path will always satisfy. +func itoa(i int) string { + if i < 10 { + return digits[i : i+1] + } + return smallsString[i*2 : i*2+2] +} + +const smallsString = "00010203040506070809" + + "10111213141516171819" + + "20212223242526272829" + + "30313233343536373839" + + "40414243444546474849" + + "50515253545556575859" + + "60616263646566676869" + + "70717273747576777879" + + "80818283848586878889" + + "90919293949596979899" +const digits = "0123456789" diff --git a/src/time/zoneinfo_unix.go b/src/time/zoneinfo_unix.go index fca8e5497b928..d6bcabfb8083b 100644 --- a/src/time/zoneinfo_unix.go +++ b/src/time/zoneinfo_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin,386 darwin,amd64 dragonfly freebsd js,wasm linux,!android nacl netbsd openbsd solaris +// +build aix darwin,386 darwin,amd64 dragonfly freebsd linux,!android nacl netbsd openbsd solaris // Parse "zoneinfo" time zone file. // This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others. diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index c21de19707d1f..977cbe6eb1317 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -326,6 +326,126 @@ func IterateBits8(n uint8) int { return i } +// --------------- // +// bits.Add* // +// --------------- // + +func Add(x, y, ci uint) (r, co uint) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add(x, y, ci) +} + +func AddC(x, ci uint) (r, co uint) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add(x, 7, ci) +} + +func AddZ(x, y uint) (r, co uint) { + // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" + return bits.Add(x, y, 0) +} + +func AddR(x, y, ci uint) uint { + // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" + r, _ := bits.Add(x, y, ci) + return r +} +func AddM(p, q, r *[3]uint) { + var c uint + r[0], c = bits.Add(p[0], q[0], c) + // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ" + r[1], c = bits.Add(p[1], q[1], c) + r[2], c = bits.Add(p[2], q[2], c) +} + +func Add64(x, y, ci uint64) (r, co uint64) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add64(x, y, ci) +} + +func Add64C(x, ci uint64) (r, co uint64) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add64(x, 7, ci) +} + +func Add64Z(x, y uint64) (r, co uint64) { + // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" + return bits.Add64(x, y, 0) +} + +func Add64R(x, y, ci uint64) uint64 { + // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" + r, _ := bits.Add64(x, y, ci) + return r +} +func Add64M(p, q, r *[3]uint64) { + var c uint64 + r[0], c = bits.Add64(p[0], q[0], c) + // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ" + r[1], c = bits.Add64(p[1], q[1], c) + r[2], c = bits.Add64(p[2], q[2], c) +} + +// --------------- // +// bits.Sub* // +// --------------- // + +func Sub(x, y, ci uint) (r, co uint) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub(x, y, ci) +} + +func SubC(x, ci uint) (r, co uint) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub(x, 7, ci) +} + +func SubZ(x, y uint) (r, co uint) { + // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" + return bits.Sub(x, y, 0) +} + +func SubR(x, y, ci uint) uint { + // amd64:"NEGL","SBBQ",-"NEGQ" + r, _ := bits.Sub(x, y, ci) + return r +} +func SubM(p, q, r *[3]uint) { + var c uint + r[0], c = bits.Sub(p[0], q[0], c) + // amd64:"SBBQ",-"NEGL",-"NEGQ" + r[1], c = bits.Sub(p[1], q[1], c) + r[2], c = bits.Sub(p[2], q[2], c) +} + +func Sub64(x, y, ci uint64) (r, co uint64) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub64(x, y, ci) +} + +func Sub64C(x, ci uint64) (r, co uint64) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub64(x, 7, ci) +} + +func Sub64Z(x, y uint64) (r, co uint64) { + // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" + return bits.Sub64(x, y, 0) +} + +func Sub64R(x, y, ci uint64) uint64 { + // amd64:"NEGL","SBBQ",-"NEGQ" + r, _ := bits.Sub64(x, y, ci) + return r +} +func Sub64M(p, q, r *[3]uint64) { + var c uint64 + r[0], c = bits.Sub64(p[0], q[0], c) + // amd64:"SBBQ",-"NEGL",-"NEGQ" + r[1], c = bits.Sub64(p[1], q[1], c) + r[2], c = bits.Sub64(p[2], q[2], c) +} + // --------------- // // bits.Mul* // // --------------- // diff --git a/test/codegen/memops.go b/test/codegen/memops.go index 223375ac11fd5..dcf5863666521 100644 --- a/test/codegen/memops.go +++ b/test/codegen/memops.go @@ -84,3 +84,12 @@ func compMem2() int { } return 0 } + +func compMem3(x, y *int) (int, bool) { + // We can do comparisons of a register with memory even if + // the register is used subsequently. + r := *x + // amd64:`CMPQ\t\(` + // 386:`CMPL\t\(` + return r, r < *y +} diff --git a/test/fixedbugs/issue27938.go b/test/fixedbugs/issue27938.go new file mode 100644 index 0000000000000..b0007be928361 --- /dev/null +++ b/test/fixedbugs/issue27938.go @@ -0,0 +1,23 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that we get a single non-confusing error +// message for embedded fields/interfaces that use +// a qualified identifier with non-existing package. + +package p + +type _ struct { + F sync.Mutex // ERROR "undefined: sync" +} + +type _ struct { + sync.Mutex // ERROR "undefined: sync" +} + +type _ interface { + sync.Mutex // ERROR "undefined: sync" +} diff --git a/test/fixedbugs/issue28268.go b/test/fixedbugs/issue28268.go new file mode 100644 index 0000000000000..fdc6974d1c0fb --- /dev/null +++ b/test/fixedbugs/issue28268.go @@ -0,0 +1,30 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that follow-on errors due to conflicting +// struct field and method names are suppressed. + +package p + +type T struct { + a, b, c int + E +} + +type E struct{} + +func (T) b() {} // ERROR "field and method named b" +func (*T) E() {} // ERROR "field and method named E" + +func _() { + var x T + _ = x.a + _ = x.b // no follow-on error here + x.b() // no follow-on error here + _ = x.c + _ = x.E // no follow-on error here + x.E() // no follow-on error here +} diff --git a/test/fixedbugs/issue28390.go b/test/fixedbugs/issue28390.go new file mode 100644 index 0000000000000..0a4d873182d48 --- /dev/null +++ b/test/fixedbugs/issue28390.go @@ -0,0 +1,39 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 28390/28430: Function call arguments were not +// converted correctly under some circumstances. + +package main + +import "fmt" + +type A struct { + K int + S string + M map[string]string +} + +func newA(k int, s string) (a A) { + a.K = k + a.S = s + a.M = make(map[string]string) + a.M[s] = s + return +} + +func proxy() (x int, a A) { + return 1, newA(2, "3") +} + +func consume(x int, a interface{}) { + fmt.Println(x) + fmt.Println(a) // used to panic here +} + +func main() { + consume(proxy()) +} diff --git a/test/fixedbugs/issue28390.out b/test/fixedbugs/issue28390.out new file mode 100644 index 0000000000000..c923108f817ad --- /dev/null +++ b/test/fixedbugs/issue28390.out @@ -0,0 +1,2 @@ +1 +{2 3 map[3:3]} diff --git a/test/fixedbugs/issue28430.go b/test/fixedbugs/issue28430.go new file mode 100644 index 0000000000000..b59259abdc0f0 --- /dev/null +++ b/test/fixedbugs/issue28430.go @@ -0,0 +1,17 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 28390/28430: Function call arguments were not +// converted correctly under some circumstances. + +package main + +func g(_ interface{}, e error) +func h() (int, error) + +func f() { + g(h()) +}