diff --git a/src/runtime/mem_js.go b/src/runtime/mem_js.go index 78eda47b1fbb0..a9e15bf63be22 100644 --- a/src/runtime/mem_js.go +++ b/src/runtime/mem_js.go @@ -6,72 +6,21 @@ package runtime -import ( - "unsafe" -) +import "unsafe" -// Don't split the stack as this function may be invoked without a valid G, -// which prevents us from allocating more stack. -// -//go:nosplit -func sysAllocOS(n uintptr) unsafe.Pointer { - p := sysReserveOS(nil, n) - sysMapOS(p, n) - return p -} - -func sysUnusedOS(v unsafe.Pointer, n uintptr) { -} - -func sysUsedOS(v unsafe.Pointer, n uintptr) { -} - -func sysHugePageOS(v unsafe.Pointer, n uintptr) { -} - -// Don't split the stack as this function may be invoked without a valid G, -// which prevents us from allocating more stack. -// -//go:nosplit -func sysFreeOS(v unsafe.Pointer, n uintptr) { -} - -func sysFaultOS(v unsafe.Pointer, n uintptr) { -} +// https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances +const _PAGESIZE = 64 * 1024 -var reserveEnd uintptr +func sbrk(n uintptr) unsafe.Pointer { + grow := (int32(n) + _PAGESIZE - 1) / _PAGESIZE + size := currentMemory() -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { - // TODO(neelance): maybe unify with mem_plan9.go, depending on how https://github.com/WebAssembly/design/blob/master/FutureFeatures.md#finer-grained-control-over-memory turns out - - if v != nil { - // The address space of WebAssembly's linear memory is contiguous, - // so requesting specific addresses is not supported. We could use - // a different address, but then mheap.sysAlloc discards the result - // right away and we don't reuse chunks passed to sysFree. + if growMemory(grow) < 0 { return nil } - // Round up the initial reserveEnd to 64 KiB so that - // reservations are always aligned to the page size. - initReserveEnd := alignUp(lastmoduledatap.end, physPageSize) - if reserveEnd < initReserveEnd { - reserveEnd = initReserveEnd - } - v = unsafe.Pointer(reserveEnd) - reserveEnd += alignUp(n, physPageSize) - - current := currentMemory() - // reserveEnd is always at a page boundary. - needed := int32(reserveEnd / physPageSize) - if current < needed { - if growMemory(needed-current) == -1 { - return nil - } - resetMemoryDataView() - } - - return v + resetMemoryDataView() + return unsafe.Pointer(uintptr(size) * _PAGESIZE) } func currentMemory() int32 @@ -82,6 +31,3 @@ func growMemory(pages int32) int32 // //go:wasmimport gojs runtime.resetMemoryDataView func resetMemoryDataView() - -func sysMapOS(v unsafe.Pointer, n uintptr) { -} diff --git a/src/runtime/mem_plan9.go b/src/runtime/mem_plan9.go index 88e7d92a7c5d9..9b18a2919d84c 100644 --- a/src/runtime/mem_plan9.go +++ b/src/runtime/mem_plan9.go @@ -6,126 +6,6 @@ package runtime import "unsafe" -const memDebug = false - -var bloc uintptr -var blocMax uintptr -var memlock mutex - -type memHdr struct { - next memHdrPtr - size uintptr -} - -var memFreelist memHdrPtr // sorted in ascending order - -type memHdrPtr uintptr - -func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } -func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } - -func memAlloc(n uintptr) unsafe.Pointer { - n = memRound(n) - var prevp *memHdr - for p := memFreelist.ptr(); p != nil; p = p.next.ptr() { - if p.size >= n { - if p.size == n { - if prevp != nil { - prevp.next = p.next - } else { - memFreelist = p.next - } - } else { - p.size -= n - p = (*memHdr)(add(unsafe.Pointer(p), p.size)) - } - *p = memHdr{} - return unsafe.Pointer(p) - } - prevp = p - } - return sbrk(n) -} - -func memFree(ap unsafe.Pointer, n uintptr) { - n = memRound(n) - memclrNoHeapPointers(ap, n) - bp := (*memHdr)(ap) - bp.size = n - bpn := uintptr(ap) - if memFreelist == 0 { - bp.next = 0 - memFreelist.set(bp) - return - } - p := memFreelist.ptr() - if bpn < uintptr(unsafe.Pointer(p)) { - memFreelist.set(bp) - if bpn+bp.size == uintptr(unsafe.Pointer(p)) { - bp.size += p.size - bp.next = p.next - *p = memHdr{} - } else { - bp.next.set(p) - } - return - } - for ; p.next != 0; p = p.next.ptr() { - if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) { - break - } - } - if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { - bp.size += p.next.ptr().size - bp.next = p.next.ptr().next - *p.next.ptr() = memHdr{} - } else { - bp.next = p.next - } - if uintptr(unsafe.Pointer(p))+p.size == bpn { - p.size += bp.size - p.next = bp.next - *bp = memHdr{} - } else { - p.next.set(bp) - } -} - -func memCheck() { - if !memDebug { - return - } - for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() { - if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) { - print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n") - throw("mem: infinite loop") - } - if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) { - print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n") - throw("mem: unordered list") - } - if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) { - print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n") - throw("mem: overlapping blocks") - } - for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) { - if *(*byte)(b) != 0 { - print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n") - throw("mem: uninitialised memory") - } - } - } -} - -func memRound(p uintptr) uintptr { - return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1) -} - -func initBloc() { - bloc = memRound(firstmoduledata.end) - blocMax = bloc -} - func sbrk(n uintptr) unsafe.Pointer { // Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c bl := bloc @@ -139,57 +19,3 @@ func sbrk(n uintptr) unsafe.Pointer { bloc += n return unsafe.Pointer(bl) } - -func sysAllocOS(n uintptr) unsafe.Pointer { - lock(&memlock) - p := memAlloc(n) - memCheck() - unlock(&memlock) - return p -} - -func sysFreeOS(v unsafe.Pointer, n uintptr) { - lock(&memlock) - if uintptr(v)+n == bloc { - // Address range being freed is at the end of memory, - // so record a new lower value for end of memory. - // Can't actually shrink address space because segment is shared. - memclrNoHeapPointers(v, n) - bloc -= n - } else { - memFree(v, n) - memCheck() - } - unlock(&memlock) -} - -func sysUnusedOS(v unsafe.Pointer, n uintptr) { -} - -func sysUsedOS(v unsafe.Pointer, n uintptr) { -} - -func sysHugePageOS(v unsafe.Pointer, n uintptr) { -} - -func sysMapOS(v unsafe.Pointer, n uintptr) { -} - -func sysFaultOS(v unsafe.Pointer, n uintptr) { -} - -func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { - lock(&memlock) - var p unsafe.Pointer - if uintptr(v) == bloc { - // Address hint is the current end of memory, - // so try to extend the address space. - p = sbrk(n) - } - if p == nil && v == nil { - p = memAlloc(n) - memCheck() - } - unlock(&memlock) - return p -} diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go new file mode 100644 index 0000000000000..83d63af866c26 --- /dev/null +++ b/src/runtime/mem_sbrk.go @@ -0,0 +1,183 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 || wasm + +package runtime + +import "unsafe" + +const memDebug = false + +var bloc uintptr +var blocMax uintptr +var memlock mutex + +type memHdr struct { + next memHdrPtr + size uintptr +} + +var memFreelist memHdrPtr // sorted in ascending order + +type memHdrPtr uintptr + +func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) } +func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) } + +func memAlloc(n uintptr) unsafe.Pointer { + n = memRound(n) + var prevp *memHdr + for p := memFreelist.ptr(); p != nil; p = p.next.ptr() { + if p.size >= n { + if p.size == n { + if prevp != nil { + prevp.next = p.next + } else { + memFreelist = p.next + } + } else { + p.size -= n + p = (*memHdr)(add(unsafe.Pointer(p), p.size)) + } + *p = memHdr{} + return unsafe.Pointer(p) + } + prevp = p + } + return sbrk(n) +} + +func memFree(ap unsafe.Pointer, n uintptr) { + n = memRound(n) + memclrNoHeapPointers(ap, n) + bp := (*memHdr)(ap) + bp.size = n + bpn := uintptr(ap) + if memFreelist == 0 { + bp.next = 0 + memFreelist.set(bp) + return + } + p := memFreelist.ptr() + if bpn < uintptr(unsafe.Pointer(p)) { + memFreelist.set(bp) + if bpn+bp.size == uintptr(unsafe.Pointer(p)) { + bp.size += p.size + bp.next = p.next + *p = memHdr{} + } else { + bp.next.set(p) + } + return + } + for ; p.next != 0; p = p.next.ptr() { + if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) { + break + } + } + if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) { + bp.size += p.next.ptr().size + bp.next = p.next.ptr().next + *p.next.ptr() = memHdr{} + } else { + bp.next = p.next + } + if uintptr(unsafe.Pointer(p))+p.size == bpn { + p.size += bp.size + p.next = bp.next + *bp = memHdr{} + } else { + p.next.set(bp) + } +} + +func memCheck() { + if !memDebug { + return + } + for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() { + if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n") + throw("mem: infinite loop") + } + if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n") + throw("mem: unordered list") + } + if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) { + print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n") + throw("mem: overlapping blocks") + } + for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) { + if *(*byte)(b) != 0 { + print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n") + throw("mem: uninitialised memory") + } + } + } +} + +func memRound(p uintptr) uintptr { + return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1) +} + +func initBloc() { + bloc = memRound(firstmoduledata.end) + blocMax = bloc +} + +func sysAllocOS(n uintptr) unsafe.Pointer { + lock(&memlock) + p := memAlloc(n) + memCheck() + unlock(&memlock) + return p +} + +func sysFreeOS(v unsafe.Pointer, n uintptr) { + lock(&memlock) + if uintptr(v)+n == bloc { + // Address range being freed is at the end of memory, + // so record a new lower value for end of memory. + // Can't actually shrink address space because segment is shared. + memclrNoHeapPointers(v, n) + bloc -= n + } else { + memFree(v, n) + memCheck() + } + unlock(&memlock) +} + +func sysUnusedOS(v unsafe.Pointer, n uintptr) { +} + +func sysUsedOS(v unsafe.Pointer, n uintptr) { +} + +func sysHugePageOS(v unsafe.Pointer, n uintptr) { +} + +func sysMapOS(v unsafe.Pointer, n uintptr) { +} + +func sysFaultOS(v unsafe.Pointer, n uintptr) { +} + +func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer { + lock(&memlock) + var p unsafe.Pointer + if uintptr(v) == bloc { + // Address hint is the current end of memory, + // so try to extend the address space. + p = sbrk(n) + } + if p == nil && v == nil { + p = memAlloc(n) + memCheck() + } + unlock(&memlock) + return p +} diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go index 63a3d95afa512..5b614bb165cfc 100644 --- a/src/runtime/os_js.go +++ b/src/runtime/os_js.go @@ -102,9 +102,10 @@ func mdestroy(mp *m) { } func osinit() { + initBloc() ncpu = 1 getg().m.procid = 2 - physPageSize = 64 * 1024 + physPageSize = _PAGESIZE } // wasm has no signals