Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

backporting fork changes to v1.17.1 #5

Merged
merged 7 commits into from
Oct 3, 2024
6 changes: 2 additions & 4 deletions pkg/kgo/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@ type Client struct {
producer producer
consumer consumer

compressor *compressor
decompressor *decompressor
compressor *compressor

coordinatorsMu sync.Mutex
coordinators map[coordinatorKey]*coordinatorLoad
Expand Down Expand Up @@ -482,8 +481,7 @@ func NewClient(opts ...Opt) (*Client, error) {
bufPool: newBufPool(),
prsPool: newPrsPool(),

compressor: compressor,
decompressor: newDecompressor(),
compressor: compressor,

coordinators: make(map[coordinatorKey]*coordinatorLoad),

Expand Down
59 changes: 51 additions & 8 deletions pkg/kgo/compression.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (
"github.com/klauspost/compress/s2"
"github.com/klauspost/compress/zstd"
"github.com/pierrec/lz4/v4"

"github.com/twmb/franz-go/pkg/kgo/internal/pool"
)

var byteBuffers = sync.Pool{New: func() any { return bytes.NewBuffer(make([]byte, 8<<10)) }}
Expand Down Expand Up @@ -235,6 +237,8 @@ type decompressor struct {
unzstdPool sync.Pool
}

var defaultDecompressor = newDecompressor()

func newDecompressor() *decompressor {
d := &decompressor{
ungzPool: sync.Pool{
Expand Down Expand Up @@ -264,15 +268,23 @@ type zstdDecoder struct {
inner *zstd.Decoder
}

func (d *decompressor) decompress(src []byte, codec byte) ([]byte, error) {
func (d *decompressor) decompress(src []byte, codec byte, pool *pool.BucketedPool[byte]) ([]byte, error) {
// Early return in case there is no compression
compCodec := codecType(codec)
if compCodec == codecNone {
return src, nil
}
out := byteBuffers.Get().(*bytes.Buffer)
out.Reset()
defer byteBuffers.Put(out)

out, buf, err := d.getDecodedBuffer(src, compCodec, pool)
if err != nil {
return nil, err
}
defer func() {
if compCodec == codecSnappy {
return
}
pool.Put(buf)
}()

switch compCodec {
case codecGzip:
Expand All @@ -284,7 +296,7 @@ func (d *decompressor) decompress(src []byte, codec byte) ([]byte, error) {
if _, err := io.Copy(out, ungz); err != nil {
return nil, err
}
return append([]byte(nil), out.Bytes()...), nil
return d.copyDecodedBuffer(out.Bytes(), compCodec, pool), nil
case codecSnappy:
if len(src) > 16 && bytes.HasPrefix(src, xerialPfx) {
return xerialDecode(src)
Expand All @@ -293,28 +305,59 @@ func (d *decompressor) decompress(src []byte, codec byte) ([]byte, error) {
if err != nil {
return nil, err
}
return append([]byte(nil), decoded...), nil
return d.copyDecodedBuffer(decoded, compCodec, pool), nil
case codecLZ4:
unlz4 := d.unlz4Pool.Get().(*lz4.Reader)
defer d.unlz4Pool.Put(unlz4)
unlz4.Reset(bytes.NewReader(src))
if _, err := io.Copy(out, unlz4); err != nil {
return nil, err
}
return append([]byte(nil), out.Bytes()...), nil
return d.copyDecodedBuffer(out.Bytes(), compCodec, pool), nil
case codecZstd:
unzstd := d.unzstdPool.Get().(*zstdDecoder)
defer d.unzstdPool.Put(unzstd)
decoded, err := unzstd.inner.DecodeAll(src, out.Bytes())
if err != nil {
return nil, err
}
return append([]byte(nil), decoded...), nil
return d.copyDecodedBuffer(decoded, compCodec, pool), nil
default:
return nil, errors.New("unknown compression codec")
}
}

func (d *decompressor) getDecodedBuffer(src []byte, compCodec codecType, pool *pool.BucketedPool[byte]) (*bytes.Buffer, []byte, error) {
var (
decodedBufSize int
err error
)
switch compCodec {
case codecSnappy:
decodedBufSize, err = s2.DecodedLen(src)
if err != nil {
return nil, nil, err
}

default:
// Make a guess at the output size.
decodedBufSize = len(src) * 2
}
buf := pool.Get(decodedBufSize)[:0]

return bytes.NewBuffer(buf), buf, nil
}

func (d *decompressor) copyDecodedBuffer(decoded []byte, compCodec codecType, pool *pool.BucketedPool[byte]) []byte {
if compCodec == codecSnappy {
// We already know the actual size of the decoded buffer before decompression,
// so there's no need to copy the buffer.
return decoded
}
out := pool.Get(len(decoded))
return append(out[:0], decoded...)
}

var xerialPfx = []byte{130, 83, 78, 65, 80, 80, 89, 0}

var errMalformedXerial = errors.New("malformed xerial framing")
Expand Down
8 changes: 6 additions & 2 deletions pkg/kgo/compression_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (
"testing"

"github.com/pierrec/lz4/v4"

"github.com/twmb/franz-go/pkg/kgo/internal/pool"
)

// Regression test for #778.
Expand Down Expand Up @@ -78,6 +80,8 @@ func TestCompressDecompress(t *testing.T) {
randStr(1 << 8),
}

buffPool := pool.NewBucketedPool(1, 1<<16, 2, func(int) []byte { return make([]byte, 1<<16) })

var wg sync.WaitGroup
for _, produceVersion := range []int16{
0, 7,
Expand Down Expand Up @@ -110,7 +114,7 @@ func TestCompressDecompress(t *testing.T) {
w.Reset()

got, used := c.compress(w, in, produceVersion)
got, err := d.decompress(got, byte(used))
got, err := d.decompress(got, byte(used), buffPool)
if err != nil {
t.Errorf("unexpected decompress err: %v", err)
return
Expand Down Expand Up @@ -156,7 +160,7 @@ func BenchmarkDecompress(b *testing.B) {
b.Run(fmt.Sprint(codec), func(b *testing.B) {
for i := 0; i < b.N; i++ {
d := newDecompressor()
d.decompress(w.Bytes(), byte(codec))
d.decompress(w.Bytes(), byte(codec), pool.NewBucketedPool(1, 1<<16, 2, func(int) []byte { return make([]byte, 1<<16) }))
}
})
byteBuffers.Put(w)
Expand Down
22 changes: 22 additions & 0 deletions pkg/kgo/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ import (
"github.com/twmb/franz-go/pkg/kmsg"
"github.com/twmb/franz-go/pkg/kversion"
"github.com/twmb/franz-go/pkg/sasl"

"github.com/twmb/franz-go/pkg/kgo/internal/pool"
)

// Opt is an option to configure a client.
Expand Down Expand Up @@ -151,6 +153,9 @@ type cfg struct {
partitions map[string]map[int32]Offset // partitions to directly consume from
regex bool

recordsPool *recordsPool
decompressBufferPool *pool.BucketedPool[byte]

////////////////////////////
// CONSUMER GROUP SECTION //
////////////////////////////
Expand Down Expand Up @@ -389,6 +394,11 @@ func (cfg *cfg) validate() error {
}
cfg.hooks = processedHooks

// Assume a 2x compression ratio.
maxDecompressedBatchSize := int(cfg.maxBytes.load()) * 2
cfg.decompressBufferPool = pool.NewBucketedPool[byte](4096, maxDecompressedBatchSize, 2, func(sz int) []byte {
return make([]byte, sz)
})
return nil
}

Expand Down Expand Up @@ -1347,6 +1357,18 @@ func ConsumeRegex() ConsumerOpt {
return consumerOpt{func(cfg *cfg) { cfg.regex = true }}
}

// EnableRecordsPool sets the client to obtain the *kgo.Record objects from a pool,
// in order to minimize the number of allocations.
//
// By enabling this option, the records returned by PollFetches/PollRecords
// can be sent back to the pool via ReuseRecords method in order to be recycled.
//
// This option is particularly useful for use cases where the volume of generated records is very high,
// as it can negatively impact performance due to the extra GC overhead.
func EnableRecordsPool() ConsumerOpt {
return consumerOpt{func(cfg *cfg) { cfg.recordsPool = newRecordsPool() }}
}

// DisableFetchSessions sets the client to not use fetch sessions (Kafka 1.0+).
//
// A "fetch session" is is a way to reduce bandwidth for fetch requests &
Expand Down
94 changes: 94 additions & 0 deletions pkg/kgo/internal/pool/bucketed_pool.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package pool

import (
"sync"
)

// BucketedPool is a bucketed pool for variably sized slices.
type BucketedPool[T any] struct {
buckets []sync.Pool
sizes []int
// make is the function used to create an empty slice when none exist yet.
make func(int) []T
}

// NewBucketedPool returns a new BucketedPool with size buckets for minSize to maxSize
// increasing by the given factor.
func NewBucketedPool[T any](minSize, maxSize int, factor float64, makeFunc func(int) []T) *BucketedPool[T] {
if minSize < 1 {
panic("invalid minimum pool size")
}
if maxSize < 1 {
panic("invalid maximum pool size")
}
if factor < 1 {
panic("invalid factor")
}

var sizes []int

for s := minSize; s <= maxSize; s = int(float64(s) * factor) {
sizes = append(sizes, s)
}

p := &BucketedPool[T]{
buckets: make([]sync.Pool, len(sizes)),
sizes: sizes,
make: makeFunc,
}
return p
}

// Get returns a new slice with capacity greater than or equal to size.
func (p *BucketedPool[T]) Get(size int) []T {
for i, bktSize := range p.sizes {
if size > bktSize {
continue
}
buff := p.buckets[i].Get()
if buff == nil {
buff = p.make(bktSize)
}
return buff.([]T)
}
return p.make(size)
}

// Put adds a slice to the right bucket in the pool.
// If the slice does not belong to any bucket in the pool, it is ignored.
func (p *BucketedPool[T]) Put(s []T) {
sCap := cap(s)
if sCap < p.sizes[0] {
return
}

for i, size := range p.sizes {
if sCap > size {
continue
}

if sCap == size {
// Buffer is exactly the minimum size for this bucket. Add it to this bucket.
p.buckets[i].Put(s)
} else {
// Buffer belongs in previous bucket.
p.buckets[i-1].Put(s)
}
return
}
}


68 changes: 68 additions & 0 deletions pkg/kgo/internal/pool/bucketed_pool_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
// SPDX-License-Identifier: Apache-2.0
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/main/util/pool/pool_test.go
// Provenance-includes-copyright: The Prometheus Authors

package pool

import (
"testing"
)

func makeFunc(size int) []int {
return make([]int, 0, size)
}

func TestBucketedPool_HappyPath(t *testing.T) {
testPool := NewBucketedPool(1, 8, 2, makeFunc)
cases := []struct {
size int
expectedCap int
}{
{
size: -1,
expectedCap: 1,
},
{
size: 3,
expectedCap: 4,
},
{
size: 10,
expectedCap: 10,
},
}
for _, c := range cases {
ret := testPool.Get(c.size)
if cap(ret) < c.expectedCap {
t.Fatalf("expected cap >= %d, got %d", c.expectedCap , cap(ret))
}
testPool.Put(ret)
}
}

func TestBucketedPool_SliceNotAlignedToBuckets(t *testing.T) {
pool := NewBucketedPool(1, 1000, 10, makeFunc)
pool.Put(make([]int, 0, 2))
s := pool.Get(3)
if cap(s) < 3 {
t.Fatalf("expected cap >= 3, got %d", cap(s))
}
}

func TestBucketedPool_PutEmptySlice(t *testing.T) {
pool := NewBucketedPool(1, 1000, 10, makeFunc)
pool.Put([]int{})
s := pool.Get(1)
if cap(s) < 1 {
t.Fatalf("expected cap >= 1, got %d", cap(s))
}
}

func TestBucketedPool_PutSliceSmallerThanMinimum(t *testing.T) {
pool := NewBucketedPool(3, 1000, 10, makeFunc)
pool.Put([]int{1, 2})
s := pool.Get(3)
if cap(s) < 3 {
t.Fatalf("expected cap >= 3, got %d", cap(s))
}
}
Loading