forked from twmb/franz-go
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
backporting fork changes to
v1.17.1
(#5)
* fetching: export utilities for decompressing and parsing partition retch responses ### Background In grafana/mimir we are working towards making fetch requests ourselves. The primary reason behind that is that individual requests to the kafka backend are slow, so doing them sequentially per partition becomes the bottleneck in our application. So we want to fetch records in parallel to speed up the consumption. One difficulty I met when issuing `FetchRequest`s ourselves is that parsing the response is non-trivial. That's why I'm proposing to export these functions for downstream projects to use. Alternatively, I can also try contributing the concurrent fetching logic. But I believe that is much more nuanced and with more tradeoffs around fetched bytes and latency. So I wasn't sure whether it's a good fit for a general purpose library. I'm open to discuss this further. ### What this PR does Moves `(*kgo.cursorOffsetNext).processRespPartition` from being a method to being a standalone function - `kgo.processRespPartition`. There were also little changes necessary to make the interface suitable for public use (like removing the `*broker` parameter). ### Side effects To minimize the necessary changes and the API surface of the package I opted to use a single global decompressor for all messages. Previously, there would be one decompressor per client and that decompressor would be passed down to `(*cursorOffsetNext).processRespPartition`. My understanding is that using different pooled readers (lz4, zst, gzip) shouldn't have a negative impact on performance because usage patterns do not affect the behaviour of the reader (for example, a consistent size of decompressed data doesn't make the reader more or less efficient). I have not thoroughly verified or tested this - Let me know if you think that's important. An alternative to this is to also export the `decompressor` along with `newDecompressor()` and the auxiliary types for decompression. * Restore multiline processV0OuterMessage * `*kgo.Records` pooling support Signed-off-by: Miguel Ángel Ortuño <ortuman@gmail.com> * Merge pull request #1 from grafana/ortuman/reduce-kgo-record-alloc `*kgo.Record` pooling support * fetching: export utilities for decompressing and parsing partition retch responses * Merge pull request #4 from dimitarvdimitrov/dimitar/grafana-master-with-export-partition-parsing-utils fetching: export utilities for decompressing and parsing partition fetch responses * Merge pull request #3 from ortuman/reduce-decompression-buffer-allocations Signed-off-by: Miguel Ángel Ortuño <ortuman@gmail.com> --------- Signed-off-by: Miguel Ángel Ortuño <ortuman@gmail.com> Co-authored-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>
- Loading branch information
1 parent
8b955b4
commit f8f76a4
Showing
8 changed files
with
473 additions
and
78 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,94 @@ | ||
// Copyright 2017 The Prometheus Authors | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
package pool | ||
|
||
import ( | ||
"sync" | ||
) | ||
|
||
// BucketedPool is a bucketed pool for variably sized slices. | ||
type BucketedPool[T any] struct { | ||
buckets []sync.Pool | ||
sizes []int | ||
// make is the function used to create an empty slice when none exist yet. | ||
make func(int) []T | ||
} | ||
|
||
// NewBucketedPool returns a new BucketedPool with size buckets for minSize to maxSize | ||
// increasing by the given factor. | ||
func NewBucketedPool[T any](minSize, maxSize int, factor float64, makeFunc func(int) []T) *BucketedPool[T] { | ||
if minSize < 1 { | ||
panic("invalid minimum pool size") | ||
} | ||
if maxSize < 1 { | ||
panic("invalid maximum pool size") | ||
} | ||
if factor < 1 { | ||
panic("invalid factor") | ||
} | ||
|
||
var sizes []int | ||
|
||
for s := minSize; s <= maxSize; s = int(float64(s) * factor) { | ||
sizes = append(sizes, s) | ||
} | ||
|
||
p := &BucketedPool[T]{ | ||
buckets: make([]sync.Pool, len(sizes)), | ||
sizes: sizes, | ||
make: makeFunc, | ||
} | ||
return p | ||
} | ||
|
||
// Get returns a new slice with capacity greater than or equal to size. | ||
func (p *BucketedPool[T]) Get(size int) []T { | ||
for i, bktSize := range p.sizes { | ||
if size > bktSize { | ||
continue | ||
} | ||
buff := p.buckets[i].Get() | ||
if buff == nil { | ||
buff = p.make(bktSize) | ||
} | ||
return buff.([]T) | ||
} | ||
return p.make(size) | ||
} | ||
|
||
// Put adds a slice to the right bucket in the pool. | ||
// If the slice does not belong to any bucket in the pool, it is ignored. | ||
func (p *BucketedPool[T]) Put(s []T) { | ||
sCap := cap(s) | ||
if sCap < p.sizes[0] { | ||
return | ||
} | ||
|
||
for i, size := range p.sizes { | ||
if sCap > size { | ||
continue | ||
} | ||
|
||
if sCap == size { | ||
// Buffer is exactly the minimum size for this bucket. Add it to this bucket. | ||
p.buckets[i].Put(s) | ||
} else { | ||
// Buffer belongs in previous bucket. | ||
p.buckets[i-1].Put(s) | ||
} | ||
return | ||
} | ||
} | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
// SPDX-License-Identifier: Apache-2.0 | ||
// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/main/util/pool/pool_test.go | ||
// Provenance-includes-copyright: The Prometheus Authors | ||
|
||
package pool | ||
|
||
import ( | ||
"testing" | ||
) | ||
|
||
func makeFunc(size int) []int { | ||
return make([]int, 0, size) | ||
} | ||
|
||
func TestBucketedPool_HappyPath(t *testing.T) { | ||
testPool := NewBucketedPool(1, 8, 2, makeFunc) | ||
cases := []struct { | ||
size int | ||
expectedCap int | ||
}{ | ||
{ | ||
size: -1, | ||
expectedCap: 1, | ||
}, | ||
{ | ||
size: 3, | ||
expectedCap: 4, | ||
}, | ||
{ | ||
size: 10, | ||
expectedCap: 10, | ||
}, | ||
} | ||
for _, c := range cases { | ||
ret := testPool.Get(c.size) | ||
if cap(ret) < c.expectedCap { | ||
t.Fatalf("expected cap >= %d, got %d", c.expectedCap , cap(ret)) | ||
} | ||
testPool.Put(ret) | ||
} | ||
} | ||
|
||
func TestBucketedPool_SliceNotAlignedToBuckets(t *testing.T) { | ||
pool := NewBucketedPool(1, 1000, 10, makeFunc) | ||
pool.Put(make([]int, 0, 2)) | ||
s := pool.Get(3) | ||
if cap(s) < 3 { | ||
t.Fatalf("expected cap >= 3, got %d", cap(s)) | ||
} | ||
} | ||
|
||
func TestBucketedPool_PutEmptySlice(t *testing.T) { | ||
pool := NewBucketedPool(1, 1000, 10, makeFunc) | ||
pool.Put([]int{}) | ||
s := pool.Get(1) | ||
if cap(s) < 1 { | ||
t.Fatalf("expected cap >= 1, got %d", cap(s)) | ||
} | ||
} | ||
|
||
func TestBucketedPool_PutSliceSmallerThanMinimum(t *testing.T) { | ||
pool := NewBucketedPool(3, 1000, 10, makeFunc) | ||
pool.Put([]int{1, 2}) | ||
s := pool.Get(3) | ||
if cap(s) < 3 { | ||
t.Fatalf("expected cap >= 3, got %d", cap(s)) | ||
} | ||
} |
Oops, something went wrong.