Skip to content

Commit

Permalink
runtime: reused freed memory blocks on wasm
Browse files Browse the repository at this point in the history
When compiling Go programs to WebAssembly, the memory allocation strategy
was neither releasing memory to the OS nor reusing blocks freed by calls to
`runtime.sysFreeOS`.

This CL unifies the plan9 and wasm memory management strategy since both
platforms use a linear memory space and do not have a mechanism for
returning memory blocks to the OS.

Fixes golang#59061
  • Loading branch information
achille-roussel committed Mar 15, 2023
1 parent 70308d1 commit 3612863
Show file tree
Hide file tree
Showing 4 changed files with 194 additions and 238 deletions.
72 changes: 9 additions & 63 deletions src/runtime/mem_js.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,72 +6,21 @@

package runtime

import (
"unsafe"
)
import "unsafe"

// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer {
p := sysReserveOS(nil, n)
sysMapOS(p, n)
return p
}

func sysUnusedOS(v unsafe.Pointer, n uintptr) {
}

func sysUsedOS(v unsafe.Pointer, n uintptr) {
}

func sysHugePageOS(v unsafe.Pointer, n uintptr) {
}

// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
}

func sysFaultOS(v unsafe.Pointer, n uintptr) {
}
// https://webassembly.github.io/spec/core/exec/runtime.html#memory-instances
const _PAGESIZE = 64 * 1024

var reserveEnd uintptr
func sbrk(n uintptr) unsafe.Pointer {
grow := (int32(n) + _PAGESIZE - 1) / _PAGESIZE
size := currentMemory()

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
// TODO(neelance): maybe unify with mem_plan9.go, depending on how https://github.com/WebAssembly/design/blob/master/FutureFeatures.md#finer-grained-control-over-memory turns out

if v != nil {
// The address space of WebAssembly's linear memory is contiguous,
// so requesting specific addresses is not supported. We could use
// a different address, but then mheap.sysAlloc discards the result
// right away and we don't reuse chunks passed to sysFree.
if growMemory(grow) < 0 {
return nil
}

// Round up the initial reserveEnd to 64 KiB so that
// reservations are always aligned to the page size.
initReserveEnd := alignUp(lastmoduledatap.end, physPageSize)
if reserveEnd < initReserveEnd {
reserveEnd = initReserveEnd
}
v = unsafe.Pointer(reserveEnd)
reserveEnd += alignUp(n, physPageSize)

current := currentMemory()
// reserveEnd is always at a page boundary.
needed := int32(reserveEnd / physPageSize)
if current < needed {
if growMemory(needed-current) == -1 {
return nil
}
resetMemoryDataView()
}

return v
resetMemoryDataView()
return unsafe.Pointer(uintptr(size) * _PAGESIZE)
}

func currentMemory() int32
Expand All @@ -82,6 +31,3 @@ func growMemory(pages int32) int32
//
//go:wasmimport gojs runtime.resetMemoryDataView
func resetMemoryDataView()

func sysMapOS(v unsafe.Pointer, n uintptr) {
}
174 changes: 0 additions & 174 deletions src/runtime/mem_plan9.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,126 +6,6 @@ package runtime

import "unsafe"

const memDebug = false

var bloc uintptr
var blocMax uintptr
var memlock mutex

type memHdr struct {
next memHdrPtr
size uintptr
}

var memFreelist memHdrPtr // sorted in ascending order

type memHdrPtr uintptr

func (p memHdrPtr) ptr() *memHdr { return (*memHdr)(unsafe.Pointer(p)) }
func (p *memHdrPtr) set(x *memHdr) { *p = memHdrPtr(unsafe.Pointer(x)) }

func memAlloc(n uintptr) unsafe.Pointer {
n = memRound(n)
var prevp *memHdr
for p := memFreelist.ptr(); p != nil; p = p.next.ptr() {
if p.size >= n {
if p.size == n {
if prevp != nil {
prevp.next = p.next
} else {
memFreelist = p.next
}
} else {
p.size -= n
p = (*memHdr)(add(unsafe.Pointer(p), p.size))
}
*p = memHdr{}
return unsafe.Pointer(p)
}
prevp = p
}
return sbrk(n)
}

func memFree(ap unsafe.Pointer, n uintptr) {
n = memRound(n)
memclrNoHeapPointers(ap, n)
bp := (*memHdr)(ap)
bp.size = n
bpn := uintptr(ap)
if memFreelist == 0 {
bp.next = 0
memFreelist.set(bp)
return
}
p := memFreelist.ptr()
if bpn < uintptr(unsafe.Pointer(p)) {
memFreelist.set(bp)
if bpn+bp.size == uintptr(unsafe.Pointer(p)) {
bp.size += p.size
bp.next = p.next
*p = memHdr{}
} else {
bp.next.set(p)
}
return
}
for ; p.next != 0; p = p.next.ptr() {
if bpn > uintptr(unsafe.Pointer(p)) && bpn < uintptr(unsafe.Pointer(p.next)) {
break
}
}
if bpn+bp.size == uintptr(unsafe.Pointer(p.next)) {
bp.size += p.next.ptr().size
bp.next = p.next.ptr().next
*p.next.ptr() = memHdr{}
} else {
bp.next = p.next
}
if uintptr(unsafe.Pointer(p))+p.size == bpn {
p.size += bp.size
p.next = bp.next
*bp = memHdr{}
} else {
p.next.set(bp)
}
}

func memCheck() {
if !memDebug {
return
}
for p := memFreelist.ptr(); p != nil && p.next != 0; p = p.next.ptr() {
if uintptr(unsafe.Pointer(p)) == uintptr(unsafe.Pointer(p.next)) {
print("runtime: ", unsafe.Pointer(p), " == ", unsafe.Pointer(p.next), "\n")
throw("mem: infinite loop")
}
if uintptr(unsafe.Pointer(p)) > uintptr(unsafe.Pointer(p.next)) {
print("runtime: ", unsafe.Pointer(p), " > ", unsafe.Pointer(p.next), "\n")
throw("mem: unordered list")
}
if uintptr(unsafe.Pointer(p))+p.size > uintptr(unsafe.Pointer(p.next)) {
print("runtime: ", unsafe.Pointer(p), "+", p.size, " > ", unsafe.Pointer(p.next), "\n")
throw("mem: overlapping blocks")
}
for b := add(unsafe.Pointer(p), unsafe.Sizeof(memHdr{})); uintptr(b) < uintptr(unsafe.Pointer(p))+p.size; b = add(b, 1) {
if *(*byte)(b) != 0 {
print("runtime: value at addr ", b, " with offset ", uintptr(b)-uintptr(unsafe.Pointer(p)), " in block ", p, " of size ", p.size, " is not zero\n")
throw("mem: uninitialised memory")
}
}
}
}

func memRound(p uintptr) uintptr {
return (p + _PAGESIZE - 1) &^ (_PAGESIZE - 1)
}

func initBloc() {
bloc = memRound(firstmoduledata.end)
blocMax = bloc
}

func sbrk(n uintptr) unsafe.Pointer {
// Plan 9 sbrk from /sys/src/libc/9sys/sbrk.c
bl := bloc
Expand All @@ -139,57 +19,3 @@ func sbrk(n uintptr) unsafe.Pointer {
bloc += n
return unsafe.Pointer(bl)
}

func sysAllocOS(n uintptr) unsafe.Pointer {
lock(&memlock)
p := memAlloc(n)
memCheck()
unlock(&memlock)
return p
}

func sysFreeOS(v unsafe.Pointer, n uintptr) {
lock(&memlock)
if uintptr(v)+n == bloc {
// Address range being freed is at the end of memory,
// so record a new lower value for end of memory.
// Can't actually shrink address space because segment is shared.
memclrNoHeapPointers(v, n)
bloc -= n
} else {
memFree(v, n)
memCheck()
}
unlock(&memlock)
}

func sysUnusedOS(v unsafe.Pointer, n uintptr) {
}

func sysUsedOS(v unsafe.Pointer, n uintptr) {
}

func sysHugePageOS(v unsafe.Pointer, n uintptr) {
}

func sysMapOS(v unsafe.Pointer, n uintptr) {
}

func sysFaultOS(v unsafe.Pointer, n uintptr) {
}

func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer {
lock(&memlock)
var p unsafe.Pointer
if uintptr(v) == bloc {
// Address hint is the current end of memory,
// so try to extend the address space.
p = sbrk(n)
}
if p == nil && v == nil {
p = memAlloc(n)
memCheck()
}
unlock(&memlock)
return p
}
Loading

0 comments on commit 3612863

Please sign in to comment.