diff --git a/content/7-lang/defer.md b/content/7-lang/defer.md index f8931236..616076a5 100644 --- a/content/7-lang/defer.md +++ b/content/7-lang/defer.md @@ -266,7 +266,8 @@ func deferreturn(arg0 uintptr) { 只是函数的入口地址和参数,以及它的调用方 `deferreturn` 的 sp: ```asm -// void jmpdefer(fn, sp); +// func jmpdefer(fv *funcval, argp uintptr) +// argp 为调用方 SP // 从 deferreturn 调用 // 1. 出栈调用方 // 2. 替换调用方返回的 5 个字节 diff --git a/gosrc/runtime/README.md b/gosrc/runtime/README.md index 0359a29a..f241aae2 100644 --- a/gosrc/runtime/README.md +++ b/gosrc/runtime/README.md @@ -116,11 +116,8 @@ P 可以理解一个 OS 调度器中的 CPU,`p` 类型的内容类似于每个 在非托管内存中分配对象**不得包含**堆指针,除非遵循下列原则: -1. 任何来自非托管内存的堆指针必须在 `runtime.markroot` 中添加为显式垃圾回收的 root。 -2. If the memory is reused, the heap pointers must be zero-initialized - before they become visible as GC roots. Otherwise, the GC may - observe stale heap pointers. See "Zero-initialization versus - zeroing". 如果内存被重用,那么堆指针必须进行在他们作为 GC root 可见前进行零初始化。否则,GC 可能会回收已经过时的堆指针。请参考「零初始化与归零」 +1. 任何来自非托管内存的指向堆的指针必须为垃圾回收的 root。具体而言,所有指针必须要么能够被一个全局变量访问到,要么能够在 `runtime.markroot` 中添加为显式垃圾回收的 root。 +2. 如果内存被重用,那么堆指针必须进行在他们作为 GC root 可见前进行零初始化。否则,GC 可能会回收已经过时的堆指针。请参考「零初始化与归零」 零初始化 v.s. 归零 ================================== diff --git a/gosrc/runtime/alg.go b/gosrc/runtime/alg.go index 826b079d..8c5df4eb 100644 --- a/gosrc/runtime/alg.go +++ b/gosrc/runtime/alg.go @@ -296,6 +296,10 @@ func alginit() { } func initAlgAES() { + if GOOS == "aix" { + // runtime.algarray is immutable on AIX: see cmd/link/internal/ld/xcoff.go + return + } useAeshash = true algarray[alg_MEM32].hash = aeshash32 algarray[alg_MEM64].hash = aeshash64 diff --git a/gosrc/runtime/asm.s b/gosrc/runtime/asm.s index 6b209b2d..d7e5bbb6 100644 --- a/gosrc/runtime/asm.s +++ b/gosrc/runtime/asm.s @@ -38,3 +38,11 @@ GLOBL runtime·memstats(SB), NOPTR, $0 // This function must be sizeofSkipFunction bytes. TEXT runtime·skipPleaseUseCallersFrames(SB),NOSPLIT,$0-0 SKIP64; SKIP64; SKIP64; SKIP64 + +// abi0Syms is a dummy symbol that creates ABI0 wrappers for Go +// functions called from assembly in other packages. +TEXT abi0Syms<>(SB),NOSPLIT,$0-0 + // obj assumes it can call morestack* using ABI0, but + // morestackc is actually defined in Go. + CALL ·morestackc(SB) + // References from syscall are automatically collected by cmd/go. \ No newline at end of file diff --git a/gosrc/runtime/asm_amd64.s b/gosrc/runtime/asm_amd64.s index e26f7ad6..f49fb0ca 100644 --- a/gosrc/runtime/asm_amd64.s +++ b/gosrc/runtime/asm_amd64.s @@ -229,7 +229,7 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 * go-routine */ -// void gosave(Gobuf*) +// func gosave(buf *gobuf) // save state in Gobuf; setjmp TEXT runtime·gosave(SB), NOSPLIT, $0-8 MOVQ buf+0(FP), AX // gobuf @@ -249,7 +249,7 @@ TEXT runtime·gosave(SB), NOSPLIT, $0-8 MOVQ BX, gobuf_g(AX) RET -// void gogo(Gobuf*) +// func gosave(buf *gobuf) // 从 Gobuf 恢复状态; longjmp TEXT runtime·gogo(SB), NOSPLIT, $16-8 MOVQ buf+0(FP), BX // 运行现场 @@ -448,9 +448,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $0-32 MOVLQZX argsize+24(FP), CX DISPATCH(runtime·call32, 32) @@ -560,7 +557,8 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 // compile barrier. RET -// void jmpdefer(fn, sp); +// func jmpdefer(fv *funcval, argp uintptr) +// argp 为调用方 SP // 从 deferreturn 调用 // 1. 出栈调用方 // 2. 替换调用方返回的 5 个字节 @@ -662,7 +660,7 @@ nosave: MOVL AX, ret+16(FP) RET -// cgocallback(void (*fn)(void*), void *frame, uintptr framesize, uintptr ctxt) +// func cgocallback(fn, frame unsafe.Pointer, framesize, ctxt uintptr) // Turn the fn into a Go func (by taking its address) and call // cgocallback_gofunc. TEXT runtime·cgocallback(SB),NOSPLIT,$32-32 @@ -678,7 +676,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$32-32 CALL AX RET -// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize, uintptr ctxt) +// func cgocallback_gofunc(fn, frame, framesize, ctxt uintptr) // See cgocall.go for more details. TEXT ·cgocallback_gofunc(SB),NOSPLIT,$16-32 NO_LOCAL_POINTERS @@ -708,7 +706,7 @@ needm: get_tls(CX) MOVQ g(CX), BX MOVQ g_m(BX), BX - + // Set m->sched.sp = SP, so that if a panic happens // during the function we are about to execute, it will // have a valid SP to run on the g0 stack. @@ -803,7 +801,8 @@ havem: // Done! RET -// void setg(G*); set g. for use by needm. +// func setg(gg *g) +// set g. for use by needm. TEXT runtime·setg(SB), NOSPLIT, $0-8 MOVQ gg+0(FP), BX #ifdef GOOS_windows @@ -858,6 +857,7 @@ done: MOVQ AX, ret+0(FP) RET +// func aeshash(p unsafe.Pointer, h, s uintptr) uintptr // hash function using AES hardware instructions TEXT runtime·aeshash(SB),NOSPLIT,$0-32 MOVQ p+0(FP), AX // ptr to data @@ -865,6 +865,7 @@ TEXT runtime·aeshash(SB),NOSPLIT,$0-32 LEAQ ret+24(FP), DX JMP runtime·aeshashbody(SB) +// func aeshashstr(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·aeshashstr(SB),NOSPLIT,$0-24 MOVQ p+0(FP), AX // ptr to string struct MOVQ 8(AX), CX // length of string @@ -1202,7 +1203,8 @@ aesloop: PXOR X9, X8 MOVQ X8, (DX) RET - + +// func aeshash32(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·aeshash32(SB),NOSPLIT,$0-24 MOVQ p+0(FP), AX // ptr to data MOVQ h+8(FP), X0 // seed @@ -1213,6 +1215,7 @@ TEXT runtime·aeshash32(SB),NOSPLIT,$0-24 MOVQ X0, ret+16(FP) RET +// func aeshash64(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·aeshash64(SB),NOSPLIT,$0-24 MOVQ p+0(FP), AX // ptr to data MOVQ h+8(FP), X0 // seed @@ -1258,6 +1261,7 @@ DATA masks<>+0xf0(SB)/8, $0xffffffffffffffff DATA masks<>+0xf8(SB)/8, $0x00ffffffffffffff GLOBL masks<>(SB),RODATA,$256 +// func checkASM() bool TEXT ·checkASM(SB),NOSPLIT,$0-1 // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte MOVQ $masks<>(SB), AX @@ -1457,7 +1461,7 @@ GLOBL debugCallFrameTooLarge<>(SB), RODATA, $0x14 // Size duplicated below // This function communicates back to the debugger by setting RAX and // invoking INT3 to raise a breakpoint signal. See the comments in the // implementation for the protocol the debugger is expected to -// follow. InjectDebugCall in the runtime tests demonstates this protocol. +// follow. InjectDebugCall in the runtime tests demonstrates this protocol. // // The debugger must ensure that any pointers passed to the function // obey escape analysis requirements. Specifically, it must not pass @@ -1608,6 +1612,7 @@ DEBUG_CALL_FN(debugCall16384<>, 16384) DEBUG_CALL_FN(debugCall32768<>, 32768) DEBUG_CALL_FN(debugCall65536<>, 65536) +// func debugCallPanicked(val interface{}) TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 // Copy the panic value to the top of stack. MOVQ val_type+0(FP), AX diff --git a/gosrc/runtime/asm_wasm.s b/gosrc/runtime/asm_wasm.s index baf840d0..374b9f73 100644 --- a/gosrc/runtime/asm_wasm.s +++ b/gosrc/runtime/asm_wasm.s @@ -293,9 +293,6 @@ TEXT ·cgocallback_gofunc(SB), NOSPLIT, $16-32 JMP NAME(SB); \ End -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $0-32 I64Load fn+8(FP) I64Eqz diff --git a/gosrc/runtime/atomic_pointer.go b/gosrc/runtime/atomic_pointer.go index 8bde86c1..a9069d68 100644 --- a/gosrc/runtime/atomic_pointer.go +++ b/gosrc/runtime/atomic_pointer.go @@ -11,7 +11,6 @@ import ( // 这些函数不能拥有 go:noescape 标记,因为虽然 ptr 没有逃逸,但是 new 会逃逸。 // 如果 new 被标记为非逃逸,则编译器将不正确的对该指针变量存储的值进行逃逸分析决策。 -// 相反,他们实际上是围绕原子 (casp1 等) 的封装,它们使用 noescape 来传递哪些参数不会逃逸。 // atomicwb 在原子指针写入之前执行 write barrier,调用方应使用 "if writeBarrier.enabled" 对调用 // 进行保护 @@ -34,17 +33,6 @@ func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { atomic.StorepNoWB(noescape(ptr), new) } -//go:nosplit -func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { - // The write barrier is only necessary if the CAS succeeds, - // but since it needs to happen before the write becomes - // public, we have to do it conservatively all the time. - if writeBarrier.enabled { - atomicwb(ptr, new) - } - return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) -} - // Like above, but implement in terms of sync/atomic's uintptr operations. // We cannot just call the runtime routines, because the race detector expects // to be able to intercept the sync/atomic forms but not the runtime forms. diff --git a/gosrc/runtime/chan.go b/gosrc/runtime/chan.go index 76699914..b9ad508a 100644 --- a/gosrc/runtime/chan.go +++ b/gosrc/runtime/chan.go @@ -125,7 +125,8 @@ func makechan(t *chantype, size int) *hchan { throw("makechan: bad alignment") } - if size < 0 || uintptr(size) > maxSliceCap(elem.size) || uintptr(size)*elem.size > maxAlloc-hchanSize { + mem, overflow := math.MulUintptr(elem.size, uintptr(size)) + if overflow || mem > maxAlloc-hchanSize || size < 0 { panic(plainError("makechan: size out of range")) } @@ -135,7 +136,7 @@ func makechan(t *chantype, size int) *hchan { // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. var c *hchan switch { - case size == 0 || elem.size == 0: + case mem == 0: // Queue or element size is zero. c = (*hchan)(mallocgc(hchanSize, nil, true)) // Race detector uses this location for synchronization. @@ -143,12 +144,12 @@ func makechan(t *chantype, size int) *hchan { case elem.kind&kindNoPointers != 0: // Elements do not contain pointers. // Allocate hchan and buf in one call. - c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true)) + c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) c.buf = add(unsafe.Pointer(c), hchanSize) default: // 元素包含指针 c = new(hchan) - c.buf = mallocgc(uintptr(size)*elem.size, elem, true) + c.buf = mallocgc(mem, elem, true) } c.elemsize = uint16(elem.size) @@ -277,6 +278,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { gp.param = nil c.sendq.enqueue(mysg) goparkunlock(&c.lock, waitReasonChanSend, traceEvGoBlockSend, 3) + // Ensure the value being sent is kept alive until the + // receiver copies it out. The sudog has a pointer to the + // stack object, but sudogs aren't considered as roots of the + // stack tracer. + KeepAlive(ep) // someone woke us up. if mysg != gp.waiting { @@ -388,7 +394,7 @@ func closechan(c *hchan) { c.closed = 1 - var glist *g + var glist gList // release all readers for { @@ -408,8 +414,7 @@ func closechan(c *hchan) { if raceenabled { raceacquireg(gp, c.raceaddr()) } - gp.schedlink.set(glist) - glist = gp + glist.push(gp) } // release all writers (they will panic) @@ -427,15 +432,13 @@ func closechan(c *hchan) { if raceenabled { raceacquireg(gp, c.raceaddr()) } - gp.schedlink.set(glist) - glist = gp + glist.push(gp) } unlock(&c.lock) // Ready all Gs now that we've dropped the channel lock. - for glist != nil { - gp := glist - glist = glist.schedlink.ptr() + for !glist.empty() { + gp := glist.pop() gp.schedlink = 0 goready(gp, 3) } diff --git a/gosrc/runtime/defs_linux_amd64.go b/gosrc/runtime/defs_linux_amd64.go index e8c6a212..c0a0ef0d 100644 --- a/gosrc/runtime/defs_linux_amd64.go +++ b/gosrc/runtime/defs_linux_amd64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/gosrc/runtime/env_posix.go b/gosrc/runtime/env_posix.go index 637a6343..dece11ef 100644 --- a/gosrc/runtime/env_posix.go +++ b/gosrc/runtime/env_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package runtime @@ -14,13 +14,36 @@ func gogetenv(key string) string { throw("getenv before env init") } for _, s := range env { - if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key { + if len(s) > len(key) && s[len(key)] == '=' && envKeyEqual(s[:len(key)], key) { return s[len(key)+1:] } } return "" } +// envKeyEqual reports whether a == b, with ASCII-only case insensitivity +// on Windows. The two strings must have the same length. +func envKeyEqual(a, b string) bool { + if GOOS == "windows" { // case insensitive + for i := 0; i < len(a); i++ { + ca, cb := a[i], b[i] + if ca == cb || lowerASCII(ca) == lowerASCII(cb) { + continue + } + return false + } + return true + } + return a == b +} + +func lowerASCII(c byte) byte { + if 'A' <= c && c <= 'Z' { + return c + ('a' - 'A') + } + return c +} + var _cgo_setenv unsafe.Pointer // 指向 C 函数的指针 var _cgo_unsetenv unsafe.Pointer // 指向 C 函数的指针 diff --git a/gosrc/runtime/extern.go b/gosrc/runtime/extern.go index bf2d3184..92be5af5 100644 --- a/gosrc/runtime/extern.go +++ b/gosrc/runtime/extern.go @@ -49,19 +49,13 @@ It is a comma-separated list of name=val pairs setting these named variables: gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines onto smaller stacks. In this mode, a goroutine's stack can only grow. - gcrescanstacks: setting gcrescanstacks=1 enables stack - re-scanning during the STW mark termination phase. This is - helpful for debugging if objects are being prematurely - garbage collected. - gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection, making every garbage collection a stop-the-world event. Setting gcstoptheworld=2 also disables concurrent sweeping after the garbage collection finishes. gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard error at each collection, summarizing the amount of memory collected and the - length of the pause. Setting gctrace=2 emits the same summary but also - repeats each collection. The format of this line is subject to change. + length of the pause. The format of this line is subject to change. Currently, it is: gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P where the fields are as follows: @@ -94,6 +88,11 @@ It is a comma-separated list of name=val pairs setting these named variables: released: # MB released to the system consumed: # MB allocated from the system + madvdontneed: setting madvdontneed=1 will use MADV_DONTNEED + instead of MADV_FREE on Linux when returning memory to the + kernel. This is less efficient, but causes RSS numbers to drop + more quickly. + memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate. When set to 0 memory profiling is disabled. Refer to the description of MemProfileRate for the default value. @@ -169,29 +168,13 @@ import "runtime/internal/sys" // (由于历史原因,Caller 和 Callers 之间 skip 的含义不同。)返回值报告相应调用文件中的程序计数器, // 文件名和行号。如果无法恢复信息,则布尔值 ok 为 false。 func Caller(skip int) (pc uintptr, file string, line int, ok bool) { - // Make room for three PCs: the one we were asked for, - // what it called, so that CallersFrames can see if it "called" - // sigpanic, and possibly a PC for skipPleaseUseCallersFrames. - // 为三个 PC 让出空间,包括我们要求的 PC、调用它的,从而 CallersFrames 可观察到它是被调用、 - // sigpanic 和可能 - var rpc [3]uintptr - if callers(1+skip-1, rpc[:]) < 2 { - return - } - var stackExpander stackExpander - callers := stackExpander.init(rpc[:]) - // We asked for one extra, so skip that one. If this is sigpanic, - // stepping over this frame will set up state in Frames so the - // next frame is correct. - callers, _, ok = stackExpander.next(callers, true) - if !ok { + rpc := make([]uintptr, 1) + n := callers(skip+1, rpc[:]) + if n < 1 { return } - _, frame, _ := stackExpander.next(callers, true) - pc = frame.PC - file = frame.File - line = frame.Line - return + frame, _ := CallersFrames(rpc).Next() + return frame.PC, frame.File, frame.Line, frame.PC != 0 } // Callers 使用调用 goroutine 栈上的函数调用的返回程序计数器数组 pc。 @@ -202,6 +185,7 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) { // CallersFrames 考虑内联函数并将返回程序计数器调整为调用程序计数器。 // 不鼓励迭代返回的 PC 片,就像在任何返回的 PC 上使用 FuncForPC 一样, // 因为这些不能解释内联或返回程序计数器调整。 +//go:noinline func Callers(skip int, pc []uintptr) int { // runtime.callers uses pc.array==nil as a signal // to print a stack trace. Pick off 0-length pc here @@ -229,6 +213,7 @@ func Version() string { // GOOS 是正在运行的程序的操作系统目标: // darwin,freebsd,linux 等之一。 +// 查看所有 GOOS 和 GOARCH 的组合,请运行 "go tool dist list" const GOOS string = sys.GOOS // GOARCH 是正在运行的程序的架构目标: diff --git a/gosrc/runtime/fastlog2table.go b/gosrc/runtime/fastlog2table.go index c36d5835..6ba4a7d3 100644 --- a/gosrc/runtime/fastlog2table.go +++ b/gosrc/runtime/fastlog2table.go @@ -1,4 +1,4 @@ -// AUTO-GENERATED by mkfastlog2table.go +// Code generated by mkfastlog2table.go; DO NOT EDIT. // Run go generate from src/runtime to update. // See mkfastlog2table.go for comments. diff --git a/gosrc/runtime/funcdata.h b/gosrc/runtime/funcdata.h index e6e0306e..1ee67c86 100644 --- a/gosrc/runtime/funcdata.h +++ b/gosrc/runtime/funcdata.h @@ -16,6 +16,7 @@ #define FUNCDATA_LocalsPointerMaps 1 #define FUNCDATA_InlTree 2 #define FUNCDATA_RegPointerMaps 3 +#define FUNCDATA_StackObjects 4 // Pseudo-assembly statements. diff --git a/gosrc/runtime/heapdump.go b/gosrc/runtime/heapdump.go index 0fc02a8e..ca56708a 100644 --- a/gosrc/runtime/heapdump.go +++ b/gosrc/runtime/heapdump.go @@ -346,7 +346,7 @@ func dumpgoroutine(gp *g) { dumpint(uint64(gp.goid)) dumpint(uint64(gp.gopc)) dumpint(uint64(readgstatus(gp))) - dumpbool(isSystemGoroutine(gp)) + dumpbool(isSystemGoroutine(gp, false)) dumpbool(false) // isbackground dumpint(uint64(gp.waitsince)) dumpstr(gp.waitreason.String()) @@ -428,9 +428,9 @@ func dumproots() { dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss) dumpfields(firstmoduledata.gcbssmask) - // MSpan.types + // mspan.types for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { // Finalizers for sp := s.specials; sp != nil; sp = sp.next { if sp.kind != _KindSpecialFinalizer { @@ -453,7 +453,7 @@ var freemark [_PageSize / 8]bool func dumpobjs() { for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } p := s.base() @@ -616,7 +616,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, func dumpmemprof() { iterate_memprof(dumpmemprof_callback) for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } for sp := s.specials; sp != nil; sp = sp.next { @@ -637,7 +637,7 @@ var dumphdr = []byte("go1.7 heap dump\n") func mdump() { // make sure we're done sweeping for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { s.ensureSwept() } } @@ -661,7 +661,7 @@ func writeheapdump_m(fd uintptr) { _g_.waitreason = waitReasonDumpingHeap // Update stats so we can dump them. - // As a side effect, flushes all the MCaches so the MSpan.freelist + // As a side effect, flushes all the mcaches so the mspan.freelist // lists contain all the free objects. updatememstats() diff --git a/gosrc/runtime/iface.go b/gosrc/runtime/iface.go index ab311afd..b55ca9a6 100644 --- a/gosrc/runtime/iface.go +++ b/gosrc/runtime/iface.go @@ -267,6 +267,34 @@ func panicnildottype(want *_type) { // Just to match other nil conversion errors, we don't for now. } +// The specialized convTx routines need a type descriptor to use when calling mallocgc. +// We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. +// However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, +// so we use named types here. +// We then construct interface values of these types, +// and then extract the type word to use as needed. +type ( + uint16InterfacePtr uint16 + uint32InterfacePtr uint32 + uint64InterfacePtr uint64 + stringInterfacePtr string + sliceInterfacePtr []byte +) + +var ( + uint16Eface interface{} = uint16InterfacePtr(0) + uint32Eface interface{} = uint32InterfacePtr(0) + uint64Eface interface{} = uint64InterfacePtr(0) + stringEface interface{} = stringInterfacePtr("") + sliceEface interface{} = sliceInterfacePtr(nil) + + uint16Type *_type = (*eface)(unsafe.Pointer(&uint16Eface))._type + uint32Type *_type = (*eface)(unsafe.Pointer(&uint32Eface))._type + uint64Type *_type = (*eface)(unsafe.Pointer(&uint64Eface))._type + stringType *_type = (*eface)(unsafe.Pointer(&stringEface))._type + sliceType *_type = (*eface)(unsafe.Pointer(&sliceEface))._type +) + // The conv and assert functions below do very similar things. // The convXXX functions are guaranteed by the compiler to succeed. // The assertXXX functions may fail (either panicking or returning false, @@ -290,80 +318,54 @@ func convT2E(t *_type, elem unsafe.Pointer) (e eface) { return } -func convT2E16(t *_type, val uint16) (e eface) { - var x unsafe.Pointer +func convT16(val uint16) (x unsafe.Pointer) { if val == 0 { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(2, t, false) + x = mallocgc(2, uint16Type, false) *(*uint16)(x) = val } - e._type = t - e.data = x return } -func convT2E32(t *_type, val uint32) (e eface) { - var x unsafe.Pointer +func convT32(val uint32) (x unsafe.Pointer) { if val == 0 { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(4, t, false) + x = mallocgc(4, uint32Type, false) *(*uint32)(x) = val } - e._type = t - e.data = x return } -func convT2E64(t *_type, val uint64) (e eface) { - var x unsafe.Pointer +func convT64(val uint64) (x unsafe.Pointer) { if val == 0 { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(8, t, false) + x = mallocgc(8, uint64Type, false) *(*uint64)(x) = val } - e._type = t - e.data = x return } -func convT2Estring(t *_type, elem unsafe.Pointer) (e eface) { - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Estring)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if *(*string)(elem) == "" { +func convTstring(val string) (x unsafe.Pointer) { + if val == "" { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(t.size, t, true) - *(*string)(x) = *(*string)(elem) + x = mallocgc(unsafe.Sizeof(val), stringType, true) + *(*string)(x) = val } - e._type = t - e.data = x return } -func convT2Eslice(t *_type, elem unsafe.Pointer) (e eface) { - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Eslice)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if v := *(*slice)(elem); uintptr(v.array) == 0 { +func convTslice(val []byte) (x unsafe.Pointer) { + // Note: this must work for any element type, not just byte. + if (*slice)(unsafe.Pointer(&val)).array == nil { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(t.size, t, true) - *(*slice)(x) = *(*slice)(elem) + x = mallocgc(unsafe.Sizeof(val), sliceType, true) + *(*[]byte)(x) = val } - e._type = t - e.data = x return } @@ -396,88 +398,6 @@ func convT2I(tab *itab, elem unsafe.Pointer) (i iface) { return } -func convT2I16(tab *itab, val uint16) (i iface) { - t := tab._type - var x unsafe.Pointer - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(2, t, false) - *(*uint16)(x) = val - } - i.tab = tab - i.data = x - return -} - -func convT2I32(tab *itab, val uint32) (i iface) { - t := tab._type - var x unsafe.Pointer - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(4, t, false) - *(*uint32)(x) = val - } - i.tab = tab - i.data = x - return -} - -func convT2I64(tab *itab, val uint64) (i iface) { - t := tab._type - var x unsafe.Pointer - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(8, t, false) - *(*uint64)(x) = val - } - i.tab = tab - i.data = x - return -} - -func convT2Istring(tab *itab, elem unsafe.Pointer) (i iface) { - t := tab._type - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Istring)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if *(*string)(elem) == "" { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(t.size, t, true) - *(*string)(x) = *(*string)(elem) - } - i.tab = tab - i.data = x - return -} - -func convT2Islice(tab *itab, elem unsafe.Pointer) (i iface) { - t := tab._type - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Islice)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if v := *(*slice)(elem); uintptr(v.array) == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(t.size, t, true) - *(*slice)(x) = *(*slice)(elem) - } - i.tab = tab - i.data = x - return -} - func convT2Inoptr(tab *itab, elem unsafe.Pointer) (i iface) { t := tab._type if raceenabled { diff --git a/gosrc/runtime/internal/atomic/asm_amd64.s b/gosrc/runtime/internal/atomic/asm_amd64.s index 6fb5211c..e18aee7d 100644 --- a/gosrc/runtime/internal/atomic/asm_amd64.s +++ b/gosrc/runtime/internal/atomic/asm_amd64.s @@ -43,6 +43,9 @@ TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25 TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25 JMP runtime∕internal∕atomic·Cas64(SB) +TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17 + JMP runtime∕internal∕atomic·Cas(SB) + TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16 JMP runtime∕internal∕atomic·Load64(SB) @@ -130,6 +133,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12 XCHGL AX, 0(BX) RET +TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12 + JMP runtime∕internal∕atomic·Store(SB) + TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 MOVQ ptr+0(FP), BX MOVQ val+8(FP), AX diff --git a/gosrc/runtime/internal/atomic/atomic_amd64x.go b/gosrc/runtime/internal/atomic/atomic_amd64x.go index 54851d30..d4fe4616 100644 --- a/gosrc/runtime/internal/atomic/atomic_amd64x.go +++ b/gosrc/runtime/internal/atomic/atomic_amd64x.go @@ -26,6 +26,12 @@ func Load64(ptr *uint64) uint64 { return *ptr } +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + //go:noescape func Xadd(ptr *uint32, delta int32) uint32 @@ -55,12 +61,18 @@ func Or8(ptr *uint8, val uint8) //go:noescape func Cas64(ptr *uint64, old, new uint64) bool +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + //go:noescape func Store(ptr *uint32, val uint32) //go:noescape func Store64(ptr *uint64, val uint64) +//go:noescape +func StoreRel(ptr *uint32, val uint32) + // StorepNoWB performs *ptr = val atomically and without a write // barrier. // diff --git a/gosrc/runtime/internal/atomic/atomic_wasm.go b/gosrc/runtime/internal/atomic/atomic_wasm.go index cbf254fc..71288e90 100644 --- a/gosrc/runtime/internal/atomic/atomic_wasm.go +++ b/gosrc/runtime/internal/atomic/atomic_wasm.go @@ -21,6 +21,12 @@ func Loadp(ptr unsafe.Pointer) unsafe.Pointer { return *(*unsafe.Pointer)(ptr) } +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + //go:nosplit //go:noinline func Load64(ptr *uint64) uint64 { @@ -105,6 +111,12 @@ func Store(ptr *uint32, val uint32) { *ptr = val } +//go:nosplit +//go:noinline +func StoreRel(ptr *uint32, val uint32) { + *ptr = val +} + //go:nosplit //go:noinline func Store64(ptr *uint64, val uint64) { @@ -147,6 +159,16 @@ func Casuintptr(ptr *uintptr, old, new uintptr) bool { return false } +//go:nosplit +//go:noinline +func CasRel(ptr *uint32, old, new uint32) bool { + if *ptr == old { + *ptr = new + return true + } + return false +} + //go:nosplit //go:noinline func Storeuintptr(ptr *uintptr, new uintptr) { diff --git a/gosrc/runtime/internal/math/math.go b/gosrc/runtime/internal/math/math.go new file mode 100644 index 00000000..5385f5dd --- /dev/null +++ b/gosrc/runtime/internal/math/math.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math + +import "runtime/internal/sys" + +const MaxUintptr = ^uintptr(0) + +// MulUintptr returns a * b and whether the multiplication overflowed. +// On supported platforms this is an intrinsic lowered by the compiler. +func MulUintptr(a, b uintptr) (uintptr, bool) { + if a|b < 1<<(4*sys.PtrSize) || a == 0 { + return a * b, false + } + overflow := b > MaxUintptr/a + return a * b, overflow +} diff --git a/gosrc/runtime/internal/sys/arch_amd64.go b/gosrc/runtime/internal/sys/arch_amd64.go index 2f32bc46..86fed4d5 100644 --- a/gosrc/runtime/internal/sys/arch_amd64.go +++ b/gosrc/runtime/internal/sys/arch_amd64.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = AMD64 BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = 4096 PCQuantum = 1 Int64Align = 8 diff --git a/gosrc/runtime/internal/sys/arch_wasm.go b/gosrc/runtime/internal/sys/arch_wasm.go index 5463f934..203fc2e4 100644 --- a/gosrc/runtime/internal/sys/arch_wasm.go +++ b/gosrc/runtime/internal/sys/arch_wasm.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = WASM BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = 65536 PCQuantum = 1 Int64Align = 8 diff --git a/gosrc/runtime/internal/sys/stubs.go b/gosrc/runtime/internal/sys/stubs.go index 53280232..10b0173f 100644 --- a/gosrc/runtime/internal/sys/stubs.go +++ b/gosrc/runtime/internal/sys/stubs.go @@ -11,3 +11,6 @@ const RegSize = 4 << (^Uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) const SpAlign = 1*(1-GoarchArm64) + 16*GoarchArm64 // SP alignment: 1 normally, 16 for ARM64 var DefaultGoroot string // set at link time + +// AIX requires a larger stack for syscalls. +const StackGuardMultiplier = StackGuardMultiplierDefault*(1-GoosAix) + 2*GoosAix diff --git a/gosrc/runtime/internal/sys/zgoos_darwin.go b/gosrc/runtime/internal/sys/zgoos_darwin.go index 1c4667f6..b645d1cf 100644 --- a/gosrc/runtime/internal/sys/zgoos_darwin.go +++ b/gosrc/runtime/internal/sys/zgoos_darwin.go @@ -6,10 +6,12 @@ package sys const GOOS = `darwin` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 1 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/gosrc/runtime/internal/sys/zgoos_js.go b/gosrc/runtime/internal/sys/zgoos_js.go index cc8eef08..49956b8f 100644 --- a/gosrc/runtime/internal/sys/zgoos_js.go +++ b/gosrc/runtime/internal/sys/zgoos_js.go @@ -6,10 +6,12 @@ package sys const GOOS = `js` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosAix = 0 const GoosJs = 1 const GoosLinux = 0 const GoosNacl = 0 diff --git a/gosrc/runtime/internal/sys/zgoos_linux.go b/gosrc/runtime/internal/sys/zgoos_linux.go index 289400c6..61fa4168 100644 --- a/gosrc/runtime/internal/sys/zgoos_linux.go +++ b/gosrc/runtime/internal/sys/zgoos_linux.go @@ -7,11 +7,13 @@ package sys const GOOS = `linux` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 const GoosJs = 0 +const GoosAix = 0 const GoosLinux = 1 const GoosNacl = 0 const GoosNetbsd = 0 diff --git a/gosrc/runtime/lfstack_64bit.go b/gosrc/runtime/lfstack_64bit.go index aa9e5540..fee0992a 100644 --- a/gosrc/runtime/lfstack_64bit.go +++ b/gosrc/runtime/lfstack_64bit.go @@ -23,13 +23,23 @@ const ( // 除了从顶部取 16 位之外,我们可以从底部取 3,因为节点必须是指针对齐的,总计19位计数。 cntBits = 64 - addrBits + 3 - // 0x0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 // cntBits -------| // |------------ addrBits ---------- + + // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit + // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) + // are available for mmap. + // We assume all lfnode addresses are from memory allocated with mmap. + // We use one bit to distinguish between the two ranges. + aixAddrBits = 57 + aixCntBits = 64 - aixAddrBits + 3 ) func lfstackPack(node *lfnode, cnt uintptr) uint64 { + if GOARCH == "ppc64" && GOOS == "aix" { + return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<> cntBits << 3))) } + if GOARCH == "ppc64" && GOOS == "aix" { + return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56))) + } return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) } diff --git a/gosrc/runtime/lock_futex.go b/gosrc/runtime/lock_futex.go index f5aa53f3..96536148 100644 --- a/gosrc/runtime/lock_futex.go +++ b/gosrc/runtime/lock_futex.go @@ -227,7 +227,7 @@ func notetsleepg(n *note, ns int64) bool { return ok } -func pauseSchedulerUntilCallback() bool { +func beforeIdle() bool { return false } diff --git a/gosrc/runtime/lock_js.go b/gosrc/runtime/lock_js.go index d5be4071..84d72abe 100644 --- a/gosrc/runtime/lock_js.go +++ b/gosrc/runtime/lock_js.go @@ -90,7 +90,7 @@ func notetsleepg(n *note, ns int64) bool { delay = 1<<31 - 1 // cap to max int32 } - id := scheduleCallback(delay) + id := scheduleTimeoutEvent(delay) mp := acquirem() notes[n] = gp notesWithTimeout[n] = noteWithTimeout{gp: gp, deadline: deadline} @@ -98,7 +98,7 @@ func notetsleepg(n *note, ns int64) bool { gopark(nil, nil, waitReasonSleep, traceEvNone, 1) - clearScheduledCallback(id) // note might have woken early, clear timeout + clearTimeoutEvent(id) // note might have woken early, clear timeout mp = acquirem() delete(notes, n) delete(notesWithTimeout, n) @@ -125,46 +125,66 @@ func notetsleepg(n *note, ns int64) bool { func checkTimeouts() { now := nanotime() for n, nt := range notesWithTimeout { - if n.key == note_cleared && now > nt.deadline { + if n.key == note_cleared && now >= nt.deadline { n.key = note_timeout goready(nt.gp, 1) } } } -var waitingForCallback *g +var returnedEventHandler *g -// sleepUntilCallback puts the current goroutine to sleep until a callback is triggered. -// It is currently only used by the callback routine of the syscall/js package. -//go:linkname sleepUntilCallback syscall/js.sleepUntilCallback -func sleepUntilCallback() { - waitingForCallback = getg() +func init() { + // At the toplevel we need an extra goroutine that handles asynchronous events. + initg := getg() + go func() { + returnedEventHandler = getg() + goready(initg, 1) + + gopark(nil, nil, waitReasonZero, traceEvNone, 1) + returnedEventHandler = nil + + pause(getcallersp() - 16) + }() gopark(nil, nil, waitReasonZero, traceEvNone, 1) - waitingForCallback = nil } -// pauseSchedulerUntilCallback gets called from the scheduler and pauses the execution -// of Go's WebAssembly code until a callback is triggered. Then it checks for note timeouts -// and resumes goroutines that are waiting for a callback. -func pauseSchedulerUntilCallback() bool { - if waitingForCallback == nil && len(notesWithTimeout) == 0 { - return false +// beforeIdle gets called by the scheduler if no goroutine is awake. +// We resume the event handler (if available) which will pause the execution. +func beforeIdle() bool { + if returnedEventHandler != nil { + goready(returnedEventHandler, 1) + return true } + return false +} + +// pause sets SP to newsp and pauses the execution of Go's WebAssembly code until an event is triggered. +func pause(newsp uintptr) + +// scheduleTimeoutEvent tells the WebAssembly environment to trigger an event after ms milliseconds. +// It returns a timer id that can be used with clearTimeoutEvent. +func scheduleTimeoutEvent(ms int64) int32 + +// clearTimeoutEvent clears a timeout event scheduled by scheduleTimeoutEvent. +func clearTimeoutEvent(id int32) - pause() +func handleEvent() { + prevReturnedEventHandler := returnedEventHandler + returnedEventHandler = nil checkTimeouts() - if waitingForCallback != nil { - goready(waitingForCallback, 1) - } - return true -} + eventHandler() -// pause pauses the execution of Go's WebAssembly code until a callback is triggered. -func pause() + returnedEventHandler = getg() + gopark(nil, nil, waitReasonZero, traceEvNone, 1) -// scheduleCallback tells the WebAssembly environment to trigger a callback after ms milliseconds. -// It returns a timer id that can be used with clearScheduledCallback. -func scheduleCallback(ms int64) int32 + returnedEventHandler = prevReturnedEventHandler + pause(getcallersp() - 16) +} + +var eventHandler func() -// clearScheduledCallback clears a callback scheduled by scheduleCallback. -func clearScheduledCallback(id int32) +//go:linkname setEventHandler syscall/js.setEventHandler +func setEventHandler(fn func()) { + eventHandler = fn +} diff --git a/gosrc/runtime/lock_sema.go b/gosrc/runtime/lock_sema.go index acdb9825..585e0669 100644 --- a/gosrc/runtime/lock_sema.go +++ b/gosrc/runtime/lock_sema.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin nacl netbsd openbsd plan9 solaris windows +// +build aix darwin nacl netbsd openbsd plan9 solaris windows package runtime @@ -290,7 +290,7 @@ func notetsleepg(n *note, ns int64) bool { return ok } -func pauseSchedulerUntilCallback() bool { +func beforeIdle() bool { return false } diff --git a/gosrc/runtime/type.go b/gosrc/runtime/type.go index 39e8ae8a..ea51c508 100644 --- a/gosrc/runtime/type.go +++ b/gosrc/runtime/type.go @@ -113,10 +113,6 @@ func (t *_type) uncommon() *uncommontype { } } -func hasPrefix(s, prefix string) bool { - return len(s) >= len(prefix) && s[:len(prefix)] == prefix -} - func (t *_type) name() string { if t.tflag&tflagNamed == 0 { return "" @@ -372,17 +368,33 @@ type interfacetype struct { } type maptype struct { - typ _type - key *_type - elem *_type - bucket *_type // internal type representing a hash bucket - keysize uint8 // size of key slot - indirectkey bool // store ptr to key instead of key itself - valuesize uint8 // size of value slot - indirectvalue bool // store ptr to value instead of value itself - bucketsize uint16 // size of bucket - reflexivekey bool // true if k==k for all keys - needkeyupdate bool // true if we need to update key on an overwrite + typ _type + key *_type + elem *_type + bucket *_type // internal type representing a hash bucket + keysize uint8 // size of key slot + indirectkey bool // store ptr to key instead of key itself + valuesize uint8 // size of value slot + bucketsize uint16 // size of bucket + flags uint32 +} + +// Note: flag values must match those used in the TMAP case +// in ../cmd/compile/internal/gc/reflect.go:dtypesym. +func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself + return mt.flags&1 != 0 +} +func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself + return mt.flags&2 != 0 +} +func (mt *maptype) reflexivekey() bool { // true if k==k for all keys + return mt.flags&4 != 0 +} +func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite + return mt.flags&8 != 0 +} +func (mt *maptype) hashMightPanic() bool { // true if hash function might panic + return mt.flags&16 != 0 } type arraytype struct { diff --git a/gosrc/runtime/vdso_elf64.go b/gosrc/runtime/vdso_elf64.go index c0373d54..4993d68f 100644 --- a/gosrc/runtime/vdso_elf64.go +++ b/gosrc/runtime/vdso_elf64.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build linux -// +build amd64 arm64 +// +build amd64 arm64 ppc64 ppc64le package runtime diff --git a/gosrc/runtime/vdso_linux.go b/gosrc/runtime/vdso_linux.go index 440f0f32..66345394 100644 --- a/gosrc/runtime/vdso_linux.go +++ b/gosrc/runtime/vdso_linux.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build linux -// +build 386 amd64 arm arm64 +// +build 386 amd64 arm arm64 ppc64 ppc64le package runtime @@ -40,7 +40,8 @@ const ( _SHT_DYNSYM = 11 /* Dynamic linker symbol table */ - _STT_FUNC = 2 /* Symbol is a code object */ + _STT_FUNC = 2 /* Symbol is a code object */ + _STT_NOTYPE = 0 /* Symbol type is not specified */ _STB_GLOBAL = 1 /* Global symbol */ _STB_WEAK = 2 /* Weak symbol */ @@ -212,7 +213,8 @@ func vdsoParseSymbols(info *vdsoInfo, version int32) { sym := &info.symtab[symIndex] typ := _ELF_ST_TYPE(sym.st_info) bind := _ELF_ST_BIND(sym.st_info) - if typ != _STT_FUNC || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF { + // On ppc64x, VDSO functions are of type _STT_NOTYPE. + if typ != _STT_FUNC && typ != _STT_NOTYPE || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF { return false } if k.name != gostringnocopy(&info.symstrings[sym.st_name]) { @@ -277,7 +279,7 @@ func vdsoauxv(tag, val uintptr) { } } -// vdsoMarker returns whether PC is on the VDSO page. +// vdsoMarker reports whether PC is on the VDSO page. func inVDSOPage(pc uintptr) bool { for _, k := range vdsoSymbolKeys { if *k.ptr != 0 {