Skip to content

Commit

Permalink
Address code review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
ash2k committed Oct 29, 2024
1 parent 1f1760d commit 8987d55
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 38 deletions.
19 changes: 12 additions & 7 deletions mem/buffer_slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ import (
)

const (
readAllBufSize = 32 * 1024 // 32 KiB
// 32 KiB is what io.Copy uses.
readAllBufSize = 32 * 1024
)

// BufferSlice offers a means to represent data that spans one or more Buffer
Expand Down Expand Up @@ -223,8 +224,8 @@ func (w *writer) Write(p []byte) (n int, err error) {

// NewWriter wraps the given BufferSlice and BufferPool to implement the
// io.Writer interface. Every call to Write copies the contents of the given
// buffer into a new Buffer pulled from the given pool and the Buffer is added to
// the given BufferSlice.
// buffer into a new Buffer pulled from the given pool and the Buffer is
// added to the given BufferSlice.
func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
return &writer{buffers: buffers, pool: pool}
}
Expand All @@ -233,20 +234,24 @@ func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
// Make sure to free the returned buffers even if you get an error from this function.
// A failed call returns a non-nil error and could return partially read buffers.
// It is the responsibility of the caller to free this buffer.
func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
var result BufferSlice
wt, ok := r.(io.WriterTo)
if ok {
// This is more optimal since wt knows the size of chunks it wants to write and, hence, we can allocate
// buffers of an optimal size to fit them. E.g. might be a single big chunk, and we wouldn't chop it into pieces.
// This is more optimal since wt knows the size of chunks it wants to
// write and, hence, we can allocate buffers of an optimal size to fit
// them. E.g. might be a single big chunk, and we wouldn't chop it
// into pieces.
w := NewWriter(&result, pool)
_, err := wt.WriteTo(w)
return result, err
}
for {
buf := pool.Get(readAllBufSize)
// We asked for 32KiB but may have been given a bigger buffer. Use all of it if that's the case.
// We asked for 32KiB but may have been given a bigger buffer.
// Use all of it if that's the case.
*buf = (*buf)[:cap(*buf)]
usedCap := 0
for {
Expand Down
70 changes: 39 additions & 31 deletions mem/buffer_slice_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,12 +166,13 @@ func (s) TestBufferSlice_Reader(t *testing.T) {
}
}

// TestBufferSlice_ReadAll_Reads exercises ReadAll by allowing it to read various combinations of data, empty data, EOF.
func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
testcases := []struct {
name string
reads []readStep
expectedErr string
expectedBufs int
name string
reads []readStep
wantErr string
wantBufs int
}{
{
name: "EOF",
Expand All @@ -191,7 +192,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "data+EOF",
Expand All @@ -201,7 +202,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "0,data+EOF",
Expand All @@ -212,7 +213,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "0,data,EOF",
Expand All @@ -225,7 +226,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "data,data+EOF",
Expand All @@ -238,7 +239,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "error",
Expand All @@ -247,7 +248,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: errors.New("boom"),
},
},
expectedErr: "boom",
wantErr: "boom",
},
{
name: "data+error",
Expand All @@ -257,8 +258,8 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: errors.New("boom"),
},
},
expectedErr: "boom",
expectedBufs: 1,
wantErr: "boom",
wantBufs: 1,
},
{
name: "data,data+error",
Expand All @@ -271,8 +272,8 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: errors.New("boom"),
},
},
expectedErr: "boom",
expectedBufs: 1,
wantErr: "boom",
wantBufs: 1,
},
{
name: "data,data+EOF - whole buf",
Expand All @@ -285,7 +286,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "data,data,EOF - whole buf",
Expand All @@ -300,7 +301,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 1,
wantBufs: 1,
},
{
name: "data,data,EOF - 2 bufs",
Expand All @@ -321,7 +322,7 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
err: io.EOF,
},
},
expectedBufs: 3,
wantBufs: 3,
},
}

Expand All @@ -334,30 +335,30 @@ func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) {
reads: tc.reads,
}
data, err := mem.ReadAll(r, pool)
if tc.expectedErr != "" {
if err == nil || err.Error() != tc.expectedErr {
t.Fatalf("ReadAll() expected error %q, got %q", tc.expectedErr, err)
if tc.wantErr != "" {
if err == nil || err.Error() != tc.wantErr {
t.Fatalf("ReadAll() returned err %v, wanted %q", err, tc.wantErr)
}
} else {
if err != nil {
t.Fatal(err)
}
}
actualData := data.Materialize()
if !bytes.Equal(r.read, actualData) {
t.Fatalf("ReadAll() expected data %q, got %q", r.read, actualData)
gotData := data.Materialize()
if !bytes.Equal(r.read, gotData) {
t.Fatalf("ReadAll() returned data %q, wanted %q", gotData, r.read)
}
if len(data) != tc.expectedBufs {
t.Fatalf("ReadAll() expected %d bufs, got %d", tc.expectedBufs, len(data))
if len(data) != tc.wantBufs {
t.Fatalf("ReadAll() returned %d bufs, wanted %d bufs", len(data), tc.wantBufs)
}
for i := 0; i < len(data)-1; i++ { // all but last should be full buffers
if data[i].Len() != readAllBufSize {
t.Fatalf("ReadAll() expected data length %d, got %d", readAllBufSize, data[i].Len())
t.Fatalf("ReadAll() returned data length %d, wanted %d", data[i].Len(), readAllBufSize)
}
}
data.Free()
if len(pool.allocated) > 0 {
t.Fatalf("expected no allocated buffers, got %d", len(pool.allocated))
t.Fatalf("got %d allocated buffers, wanted none", len(pool.allocated))
}
})
}
Expand Down Expand Up @@ -397,13 +398,13 @@ func (s) TestBufferSlice_ReadAll_WriteTo(t *testing.T) {
t.Fatal(err)
}

actualData := data.Materialize()
if !bytes.Equal(buf, actualData) {
t.Fatalf("ReadAll() expected data %q, got %q", buf, actualData)
gotData := data.Materialize()
if !bytes.Equal(buf, gotData) {
t.Fatalf("ReadAll() = %q, wanted %q", gotData, buf)
}
data.Free()
if len(pool.allocated) > 0 {
t.Fatalf("expected no allocated buffers, got %d", len(pool.allocated))
t.Fatalf("wanted no allocated buffers, got %d", len(pool.allocated))
}
})
}
Expand Down Expand Up @@ -435,11 +436,15 @@ var (
_ mem.BufferPool = (*testPool)(nil)
)

// readStep describes what a single stepReader.Read should do - how much data to return and what error to return.
type readStep struct {
n int
err error
}

// stepReader implements io.Reader that reads specified amount of data and/or
// returns the specified error in specified steps.
// The read data is accumulated in the read field.
type stepReader struct {
reads []readStep
read []byte
Expand All @@ -459,6 +464,9 @@ func (s *stepReader) Read(buf []byte) (int, error) {
return read.n, read.err
}

// testPool is an implementation of BufferPool that allows to ensure that:
// - there are matching Put calls for all Get calls.
// - there are no unexpected Put calls.
type testPool struct {
allocated map[*[]byte]struct{}
}
Expand Down

0 comments on commit 8987d55

Please sign in to comment.