Skip to content

Commit

Permalink
Merge pull request #466 from capnproto/cleanup/fix-bufpool
Browse files Browse the repository at this point in the history
Improvements to buffer pool
  • Loading branch information
lthibault committed Feb 28, 2023
2 parents 05c5d1a + 907b4a0 commit 29a3330
Show file tree
Hide file tree
Showing 2 changed files with 176 additions and 27 deletions.
128 changes: 101 additions & 27 deletions exp/bufferpool/pool.go
Original file line number Diff line number Diff line change
@@ -1,52 +1,126 @@
// Package bufferpool supports object pooling for byte buffers.
package bufferpool

import "sync"
import (
"sync"
)

const (
nBuckets = 20
defaultMinSize = 1024
defaultBucketCount = 20
)

// A default global pool.
var Default Pool

// A pool of buffers, in variable sizes.
// Pool maintains a list of BucketCount buckets that contain buffers
// of exponentially-increasing capacity, 1 << 0 to 1 << BucketCount.
//
// The MinAlloc field specifies the minimum capacity of new buffers
// allocated by Pool, which improves reuse of small buffers. For the
// avoidance of doubt: calls to Get() with size < MinAlloc return a
// buffer of len(buf) = size and cap(buf) >= MinAlloc. MinAlloc MUST
// NOT exceed 1 << BucketCount, or method calls to Pool will panic.
//
// The zero-value Pool is ready to use, defaulting to BucketCount=20
// and MinAlloc=1024 (max size = ~1MiB). Most applications will not
// benefit from tuning these parameters.
//
// As a general rule, increasing MinAlloc reduces GC latency at the
// expense of increased memory usage. Increasing BucketCount can
// reduce GC latency in applications that frequently allocate large
// buffers.
type Pool struct {
buckets [nBuckets]sync.Pool
once sync.Once
MinAlloc, BucketCount int
buckets bucketSlice
}

// Get a buffer of the given length. Its capacity may be larger than the
// requested size.
// Get a buffer of len(buf) == size and cap >= size.
func (p *Pool) Get(size int) []byte {
for i := 0; i < nBuckets; i++ {
capacity := 1 << i
if capacity >= size {
var ret []byte
item := p.buckets[i].Get()
if item == nil {
ret = make([]byte, capacity)
} else {
ret = item.([]byte)
}
ret = ret[:size]
return ret
}
p.init()

if buf := p.buckets.Get(size); buf != nil {
return buf[:size]
}

return make([]byte, size)
}

// Return a buffer to the pool. Zeros the slice (but not the full backing array)
// before making it available for future use.
// Put returns the buffer to the pool. The first len(buf) bytes
// of the buffer are zeroed.
func (p *Pool) Put(buf []byte) {
for i := 0; i < len(buf); i++ {
p.init()

for i := range buf {
buf[i] = 0
}

capacity := cap(buf)
for i := 0; i < nBuckets; i++ {
if (1 << i) == capacity {
p.buckets[i].Put(buf[:capacity])
return
p.buckets.Put(buf[:cap(buf)])
}

func (p *Pool) init() {
p.once.Do(func() {
if p.MinAlloc <= 0 {
p.MinAlloc = defaultMinSize
}

if p.BucketCount <= 0 {
p.BucketCount = defaultBucketCount
}

if p.MinAlloc > (1 << p.BucketCount) {
panic("MinAlloc greater than largest bucket")
}

// Get the index of the bucket responsible for MinAlloc.
var idx int
for idx = range p.buckets {
if 1<<idx >= p.MinAlloc {
break
}
}

p.buckets = make(bucketSlice, p.BucketCount)
for i := range p.buckets {
if i < idx {
// Set the 'New' function for all "small" buckets to
// n.buckets[idx].Get, so as to allow reuse of buffers
// smaller than MinAlloc that are passed to Put, while
// still maximizing reuse of buffers allocated by Get.
// Note that we cannot simply use n.buckets[idx].New,
// as this would side-step pooling.
p.buckets[i].New = p.buckets[idx].Get
} else {
p.buckets[i].New = newAllocFunc(i)
}
}
})
}

type bucketSlice []sync.Pool

func (bs bucketSlice) Get(size int) []byte {
for i := range bs {
if 1<<i >= size {
return bs[i].Get().([]byte)
}
}

return nil
}

func (bs bucketSlice) Put(buf []byte) {
for i := range bs {
if cap(buf) >= 1<<i && cap(buf) < 1<<(i+1) {
bs[i].Put(buf)
break
}
}
}

func newAllocFunc(i int) func() any {
return func() any {
return make([]byte, 1<<i)
}
}
75 changes: 75 additions & 0 deletions exp/bufferpool/pool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
package bufferpool_test

import (
"testing"

"capnproto.org/go/capnp/v3/exp/bufferpool"
"github.com/stretchr/testify/assert"
)

func TestInvariants(t *testing.T) {
t.Parallel()

pool := bufferpool.Pool{
BucketCount: 1,
MinAlloc: 1024,
}

assert.Panics(t, func() {
pool.Get(32)
}, "should panic when BucketCount cannot satisfy MinAlloc.")
}

func TestGet(t *testing.T) {
t.Parallel()
t.Helper()

pool := bufferpool.Pool{
BucketCount: 8,
MinAlloc: 64,
}

t.Run("Size<MinAlloc", func(t *testing.T) {
assert.Len(t, pool.Get(32), 32, "should return buffer with len=32")
assert.Equal(t, 32, cap(pool.Get(32)), "should return buffer with cap=MinAlloc")
})

t.Run("Size=MinAlloc", func(t *testing.T) {
assert.Len(t, pool.Get(pool.MinAlloc), pool.MinAlloc, "should return buffer with len=MinAlloc")
assert.Equal(t, pool.MinAlloc, cap(pool.Get(pool.MinAlloc)), "should return buffer with cap=MinAlloc")
})

t.Run("Size>MinAlloc", func(t *testing.T) {
assert.Len(t, pool.Get(33), 33, "should return buffer with len=33")
assert.Equal(t, 128, cap(pool.Get(128)), "should return buffer with cap=128")
})

t.Run("Size>MaxSize", func(t *testing.T) {
assert.Len(t, pool.Get(512), 512, "should return buffer with len=512")
assert.GreaterOrEqual(t, 512, cap(pool.Get(512)), "should return buffer with cap>=512")
})
}

func TestPut(t *testing.T) {
t.Parallel()

var pool bufferpool.Pool

buf := make([]byte, 1024)
for i := range buf {
buf[i] = byte(i)
}
buf = buf[:8]

pool.Put(buf)
buf = pool.Get(8)

assert.Equal(t, make([]byte, 8), buf, "should zero first 8 bytes")
}

func BenchmarkPool(b *testing.B) {
for i := 0; i < b.N; i++ {
buf := bufferpool.Default.Get(32)
bufferpool.Default.Put(buf)
}
}

0 comments on commit 29a3330

Please sign in to comment.