Skip to content

Commit

Permalink
runtime: fix ReadMemStatsSlow's and CheckScavengedBits' chunk iteration
Browse files Browse the repository at this point in the history
Both ReadMemStatsSlow and CheckScavengedBits iterate over the page
allocator's chunks but don't actually check if they exist. During the
development process the chunks index became sparse, so now this was a
possibility. If the runtime tests' heap is sparse we might end up
segfaulting in either one of these functions, though this will generally
be very rare.

The pattern here to return nil for a nonexistent chunk is also useful
elsewhere, so this change introduces tryChunkOf which won't throw, but
might return nil. It also updates the documentation of chunkOf.

Fixes #41296.

Change-Id: Id5ae0ca3234480de1724fdf2e3677eeedcf76fa0
Reviewed-on: https://go-review.googlesource.com/c/go/+/253777
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
  • Loading branch information
mknyszek committed Sep 9, 2020
1 parent 9ef3ee3 commit 34835df
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 7 deletions.
17 changes: 10 additions & 7 deletions src/runtime/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
}

for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
pg := mheap_.pages.chunkOf(i).scavenged.popcntRange(0, pallocChunkPages)
chunk := mheap_.pages.tryChunkOf(i)
if chunk == nil {
continue
}
pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
Expand Down Expand Up @@ -756,11 +760,7 @@ func (p *PageAlloc) InUse() []AddrRange {
// Returns nil if the PallocData's L2 is missing.
func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
ci := chunkIdx(i)
l2 := (*pageAlloc)(p).chunks[ci.l1()]
if l2 == nil {
return nil
}
return (*PallocData)(&l2[ci.l2()])
return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
}

// AddrRange represents a range over addresses.
Expand Down Expand Up @@ -900,7 +900,10 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
lock(&mheap_.lock)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
chunk := mheap_.pages.chunkOf(i)
chunk := mheap_.pages.tryChunkOf(i)
if chunk == nil {
continue
}
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
Expand Down
13 changes: 13 additions & 0 deletions src/runtime/mpagealloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,20 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) {
s.scav.scavLWM = maxSearchAddr
}

// tryChunkOf returns the bitmap data for the given chunk.
//
// Returns nil if the chunk data has not been mapped.
func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
l2 := s.chunks[ci.l1()]
if l2 == nil {
return nil
}
return &l2[ci.l2()]
}

// chunkOf returns the chunk at the given chunk index.
//
// The chunk index must be valid or this method may throw.
func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
return &s.chunks[ci.l1()][ci.l2()]
}
Expand Down

0 comments on commit 34835df

Please sign in to comment.