From 5b9637b5e5c1c92c04591077104a796e8bf96818 Mon Sep 17 00:00:00 2001 From: acud <12988138+acud@users.noreply.github.com> Date: Tue, 25 Jan 2022 05:56:59 -0600 Subject: [PATCH] fix(localstore): corrupt reserve size (#2735) Fixes a bug where incorrect reserve size bookkeeping was done on Put calls that were using a variadic number of parameters. --- pkg/localstore/gc.go | 2 +- pkg/localstore/localstore_test.go | 18 +++- pkg/localstore/mode_put.go | 136 ++++++++++++++++-------------- pkg/localstore/mode_set.go | 31 ++++--- pkg/localstore/reserve.go | 4 +- pkg/localstore/reserve_test.go | 103 ++++++++++++++++++++++ 6 files changed, 215 insertions(+), 79 deletions(-) diff --git a/pkg/localstore/gc.go b/pkg/localstore/gc.go index 6e48aa103cc..e04271d7a82 100644 --- a/pkg/localstore/gc.go +++ b/pkg/localstore/gc.go @@ -374,7 +374,7 @@ func (db *DB) evictReserve() (totalEvicted uint64, done bool, err error) { if err != nil { return 0, false, err } - if reserveSizeStart == target { + if reserveSizeStart <= target { return 0, true, nil } diff --git a/pkg/localstore/localstore_test.go b/pkg/localstore/localstore_test.go index 837efae39b3..e4800b59328 100644 --- a/pkg/localstore/localstore_test.go +++ b/pkg/localstore/localstore_test.go @@ -381,7 +381,7 @@ func newItemsCountTest(i shed.Index, want int) func(t *testing.T) { } } -// newIndexGCSizeTest retruns a test function that validates if DB.gcSize +// newIndexGCSizeTest returns a test function that validates if DB.gcSize // value is the same as the number of items in DB.gcIndex. func newIndexGCSizeTest(db *DB) func(t *testing.T) { return func(t *testing.T) { @@ -405,6 +405,22 @@ func newIndexGCSizeTest(db *DB) func(t *testing.T) { } } +// reserveSizeTest checks that the reserveSize scalar is equal +// to the expected value. +func reserveSizeTest(db *DB, want uint64) func(t *testing.T) { + return func(t *testing.T) { + t.Helper() + + got, err := db.reserveSize.Get() + if err != nil { + t.Fatal(err) + } + if got != want { + t.Errorf("got reserve size %v, want %v", got, want) + } + } +} + // testIndexChunk embeds storageChunk with additional data that is stored // in database. It is used for index values validations. type testIndexChunk struct { diff --git a/pkg/localstore/mode_put.go b/pkg/localstore/mode_put.go index 377f4c90fa6..85e6a8e6e0e 100644 --- a/pkg/localstore/mode_put.go +++ b/pkg/localstore/mode_put.go @@ -20,6 +20,7 @@ import ( "context" "encoding/binary" "errors" + "fmt" "time" "github.com/ethersphere/bee/pkg/shed" @@ -63,7 +64,7 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e if len(chs) == 1 && mode != storage.ModePutRequestPin && mode != storage.ModePutUploadPin { has, err := db.retrievalDataIndex.Has(chunkToItem(chs[0])) if err != nil { - return nil, err + return nil, fmt.Errorf("initial has check: %w", err) } if has { return []bool{true}, nil @@ -83,7 +84,10 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e // variables that provide information for operations // to be done after write batch function successfully executes - var gcSizeChange int64 // number to add or subtract from gcSize + var ( + gcSizeChange int64 // number to add or subtract from gcSize + reserveSizeChange int64 // number to add or subtract from reserveSize + ) var triggerPushFeed bool // signal push feed subscriptions to iterate triggerPullFeed := make(map[uint8]struct{}) // signal pull feed subscriptions to iterate @@ -105,12 +109,13 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e item := chunkToItem(ch) pin := mode == storage.ModePutRequestPin // force pin in this mode cache := mode == storage.ModePutRequestCache // force cache - exists, c, err := db.putRequest(batch, binIDs, item, pin, cache) + exists, c, r, err := db.putRequest(batch, binIDs, item, pin, cache) if err != nil { - return nil, err + return nil, fmt.Errorf("put request: %w", err) } exist[i] = exists gcSizeChange += c + reserveSizeChange += r } case storage.ModePutUpload, storage.ModePutUploadPin: @@ -122,7 +127,7 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e item := chunkToItem(ch) exists, c, err := db.putUpload(batch, binIDs, item) if err != nil { - return nil, err + return nil, fmt.Errorf("put upload: %w", err) } exist[i] = exists if !exists { @@ -135,7 +140,7 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e if mode == storage.ModePutUploadPin { c, err = db.setPin(batch, item) if err != nil { - return nil, err + return nil, fmt.Errorf("upload set pin: %w", err) } gcSizeChange += c } @@ -147,9 +152,9 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e exist[i] = true continue } - exists, c, err := db.putSync(batch, binIDs, chunkToItem(ch)) + exists, c, r, err := db.putSync(batch, binIDs, chunkToItem(ch)) if err != nil { - return nil, err + return nil, fmt.Errorf("put sync: %w", err) } exist[i] = exists if !exists { @@ -158,6 +163,7 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e triggerPullFeed[db.po(ch.Address())] = struct{}{} } gcSizeChange += c + reserveSizeChange += r } default: @@ -170,12 +176,17 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e err = db.incGCSizeInBatch(batch, gcSizeChange) if err != nil { - return nil, err + return nil, fmt.Errorf("inc gc: %w", err) + } + + err = db.incReserveSizeInBatch(batch, reserveSizeChange) + if err != nil { + return nil, fmt.Errorf("inc reserve: %w", err) } err = db.shed.WriteBatch(batch) if err != nil { - return nil, err + return nil, fmt.Errorf("write batch: %w", err) } for po := range triggerPullFeed { @@ -192,43 +203,41 @@ func (db *DB) put(mode storage.ModePut, chs ...swarm.Chunk) (exist []bool, err e // - it does not enter the syncpool // The batch can be written to the database. // Provided batch and binID map are updated. -func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item, forcePin, forceCache bool) (exists bool, gcSizeChange int64, err error) { +func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item, forcePin, forceCache bool) (exists bool, gcSizeChange, reserveSizeChange int64, err error) { exists, err = db.retrievalDataIndex.Has(item) if err != nil { - return false, 0, err + return false, 0, 0, err } if exists { - return true, 0, nil + return true, 0, 0, nil } previous, err := db.postageIndexIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { - return false, 0, err + return false, 0, 0, err } } else { if item.Immutable { - return false, 0, ErrOverwrite + return false, 0, 0, ErrOverwrite } // if a chunk is found with the same postage stamp index, // replace it with the new one only if timestamp is later if !later(previous, item) { - return false, 0, nil + return false, 0, 0, nil } gcSizeChange, err = db.setRemove(batch, previous, true) if err != nil { - return false, 0, err + return false, 0, 0, err } radius, err := db.postageRadiusIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { - return false, 0, err + return false, 0, 0, err } } else { if db.po(swarm.NewAddress(item.Address)) >= radius.Radius { - if err := db.incReserveSizeInBatch(batch, -1); err != nil { - return false, 0, err - } + reserveSizeChange-- } } @@ -237,29 +246,29 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she item.StoreTimestamp = now() item.BinID, err = db.incBinID(binIDs, db.po(swarm.NewAddress(item.Address))) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.retrievalDataIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.postageChunksIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.postageIndexIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } item.AccessTimestamp = now() err = db.retrievalAccessIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } - gcSizeChangeNew, err := db.preserveOrCache(batch, item, forcePin, forceCache) + gcSizeChangeNew, reserveSizeChangeNew, err := db.preserveOrCache(batch, item, forcePin, forceCache) if err != nil { - return false, 0, err + return false, 0, 0, err } if !forceCache { @@ -267,11 +276,11 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she // therefore we'd like to be able to pullsync it err = db.pullIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } } - return false, gcSizeChange + gcSizeChangeNew, nil + return false, gcSizeChange + gcSizeChangeNew, reserveSizeChange + reserveSizeChangeNew, nil } // putUpload adds an Item to the batch by updating required indexes: @@ -281,7 +290,7 @@ func (db *DB) putRequest(batch *leveldb.Batch, binIDs map[uint8]uint64, item she func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item) (exists bool, gcSizeChange int64, err error) { exists, err = db.retrievalDataIndex.Has(item) if err != nil { - return false, 0, err + return false, 0, fmt.Errorf("retrieval has: %w", err) } if exists { return true, 0, nil @@ -290,7 +299,7 @@ func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed previous, err := db.postageIndexIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { - return false, 0, err + return false, 0, fmt.Errorf("postage index get: %w", err) } } else { if item.Immutable { @@ -303,14 +312,14 @@ func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed } _, err = db.setRemove(batch, previous, true) if err != nil { - return false, 0, err + return false, 0, fmt.Errorf("same slot remove: %w", err) } } item.StoreTimestamp = now() item.BinID, err = db.incBinID(binIDs, db.po(swarm.NewAddress(item.Address))) if err != nil { - return false, 0, err + return false, 0, fmt.Errorf("inc bin id: %w", err) } err = db.retrievalDataIndex.PutInBatch(batch, item) if err != nil { @@ -339,43 +348,41 @@ func (db *DB) putUpload(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed // - put to indexes: retrieve, pull, gc // The batch can be written to the database. // Provided batch and binID map are updated. -func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item) (exists bool, gcSizeChange int64, err error) { +func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.Item) (exists bool, gcSizeChange, reserveSizeChange int64, err error) { exists, err = db.retrievalDataIndex.Has(item) if err != nil { - return false, 0, err + return false, 0, 0, err } if exists { - return true, 0, nil + return true, 0, 0, nil } previous, err := db.postageIndexIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { - return false, 0, err + return false, 0, 0, err } } else { if item.Immutable { - return false, 0, ErrOverwrite + return false, 0, 0, ErrOverwrite } // if a chunk is found with the same postage stamp index, // replace it with the new one only if timestamp is later if !later(previous, item) { - return false, 0, nil + return false, 0, 0, nil } _, err = db.setRemove(batch, previous, true) if err != nil { - return false, 0, err + return false, 0, 0, err } radius, err := db.postageRadiusIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { - return false, 0, err + return false, 0, 0, err } } else { if db.po(swarm.NewAddress(item.Address)) >= radius.Radius { - if err := db.incReserveSizeInBatch(batch, -1); err != nil { - return false, 0, err - } + reserveSizeChange-- } } } @@ -383,72 +390,71 @@ func (db *DB) putSync(batch *leveldb.Batch, binIDs map[uint8]uint64, item shed.I item.StoreTimestamp = now() item.BinID, err = db.incBinID(binIDs, db.po(swarm.NewAddress(item.Address))) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.retrievalDataIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.pullIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.postageChunksIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } err = db.postageIndexIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } item.AccessTimestamp = now() err = db.retrievalAccessIndex.PutInBatch(batch, item) if err != nil { - return false, 0, err + return false, 0, 0, err } - gcSizeChangeNew, err := db.preserveOrCache(batch, item, false, false) + gcSizeChangeNew, reserveSizeChangeNew, err := db.preserveOrCache(batch, item, false, false) if err != nil { - return false, 0, err + return false, 0, 0, err } - return false, gcSizeChange + gcSizeChangeNew, nil + return false, gcSizeChange + gcSizeChangeNew, reserveSizeChange + reserveSizeChangeNew, nil } // preserveOrCache is a helper function used to add chunks to either a pinned reserve or gc cache // (the retrieval access index and the gc index) -func (db *DB) preserveOrCache(batch *leveldb.Batch, item shed.Item, forcePin, forceCache bool) (gcSizeChange int64, err error) { +func (db *DB) preserveOrCache(batch *leveldb.Batch, item shed.Item, forcePin, forceCache bool) (gcSizeChange, reserveSizeChange int64, err error) { if !forceCache && (withinRadiusFn(db, item) || forcePin) { if !forcePin { - if err := db.incReserveSizeInBatch(batch, 1); err != nil { - return 0, err - } + reserveSizeChange++ } - return db.setPin(batch, item) + gcSizeChange, err = db.setPin(batch, item) + return gcSizeChange, reserveSizeChange, err } // add new entry to gc index ONLY if it is not present in pinIndex ok, err := db.pinIndex.Has(item) if err != nil { - return 0, err + return 0, 0, err } if ok { - return gcSizeChange, nil + return gcSizeChange, 0, nil } exists, err := db.gcIndex.Has(item) if err != nil && !errors.Is(err, leveldb.ErrNotFound) { - return 0, err + return 0, 0, err } if exists { - return 0, nil + return 0, 0, nil } err = db.gcIndex.PutInBatch(batch, item) if err != nil { - return 0, err + return 0, 0, err } gcSizeChange++ - return gcSizeChange, nil + return gcSizeChange, 0, nil } // incBinID is a helper function for db.put* methods that increments bin id diff --git a/pkg/localstore/mode_set.go b/pkg/localstore/mode_set.go index 8b6a26c4861..9e6fb03ac10 100644 --- a/pkg/localstore/mode_set.go +++ b/pkg/localstore/mode_set.go @@ -56,18 +56,22 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) { // variables that provide information for operations // to be done after write batch function successfully executes - var gcSizeChange int64 // number to add or subtract from gcSize + var ( + gcSizeChange int64 // number to add or subtract from gcSize + reserveSizeChange int64 // number of items to add or subtract from reserveSize + ) triggerPullFeed := make(map[uint8]struct{}) // signal pull feed subscriptions to iterate switch mode { case storage.ModeSetSync: for _, addr := range addrs { - c, err := db.setSync(batch, addr) + c, r, err := db.setSync(batch, addr) if err != nil { return err } gcSizeChange += c + reserveSizeChange += r } case storage.ModeSetRemove: @@ -106,6 +110,11 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) { return err } + err = db.incReserveSizeInBatch(batch, reserveSizeChange) + if err != nil { + return err + } + err = db.shed.WriteBatch(batch) if err != nil { return err @@ -121,7 +130,7 @@ func (db *DB) set(mode storage.ModeSet, addrs ...swarm.Address) (err error) { // from push sync index // - update to gc index happens given item does not exist in pin index // Provided batch is updated. -func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange int64, err error) { +func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange, reserveSizeChange int64, err error) { item := addressToItem(addr) // need to get access timestamp here as it is not @@ -137,11 +146,11 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange in // if it is there err = db.pushIndex.DeleteInBatch(batch, item) if err != nil { - return 0, err + return 0, 0, err } - return 0, nil + return 0, 0, nil } - return 0, err + return 0, 0, err } item.StoreTimestamp = i.StoreTimestamp item.BinID = i.BinID @@ -157,7 +166,7 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange in // but this function is called with ModeSetSync db.logger.Debugf("localstore: chunk with address %s not found in push index", addr) } else { - return 0, err + return 0, 0, err } } if err == nil && db.tags != nil && i.Tag != 0 { @@ -169,25 +178,25 @@ func (db *DB) setSync(batch *leveldb.Batch, addr swarm.Address) (gcSizeChange in } else { err = t.Inc(tags.StateSynced) if err != nil { - return 0, err + return 0, 0, err } } } err = db.pushIndex.DeleteInBatch(batch, item) if err != nil { - return 0, err + return 0, 0, err } i1, err := db.retrievalAccessIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { - return 0, err + return 0, 0, err } item.AccessTimestamp = now() err := db.retrievalAccessIndex.PutInBatch(batch, item) if err != nil { - return 0, err + return 0, 0, err } } else { item.AccessTimestamp = i1.AccessTimestamp diff --git a/pkg/localstore/reserve.go b/pkg/localstore/reserve.go index 09f0281565f..f335ff82709 100644 --- a/pkg/localstore/reserve.go +++ b/pkg/localstore/reserve.go @@ -22,13 +22,15 @@ func (db *DB) UnreserveBatch(id []byte, radius uint8) (evicted uint64, err error BatchID: id, } batch = new(leveldb.Batch) - oldRadius = radius + oldRadius uint8 ) + i, err := db.postageRadiusIndex.Get(item) if err != nil { if !errors.Is(err, leveldb.ErrNotFound) { return 0, err } + oldRadius = 0 } else { oldRadius = i.Radius } diff --git a/pkg/localstore/reserve_test.go b/pkg/localstore/reserve_test.go index e12ed890ad4..3b215820d6d 100644 --- a/pkg/localstore/reserve_test.go +++ b/pkg/localstore/reserve_test.go @@ -84,6 +84,8 @@ func TestDB_ReserveGC_AllOutOfRadius(t *testing.T) { t.Run("gc size", newIndexGCSizeTest(db)) + t.Run("reserve size", reserveSizeTest(db, 0)) + // the first synced chunk should be removed t.Run("get the first synced chunk", func(t *testing.T) { _, err := db.Get(context.Background(), storage.ModeGetRequest, addrs[0]) @@ -190,6 +192,8 @@ func TestDB_ReserveGC_AllWithinRadius(t *testing.T) { t.Run("gc size", newIndexGCSizeTest(db)) + t.Run("reserve size", reserveSizeTest(db, 150)) + t.Run("all chunks should be accessible", func(t *testing.T) { for _, a := range addrs { _, err := db.Get(context.Background(), storage.ModeGetRequest, a) @@ -369,6 +373,8 @@ func TestDB_ReserveGC_Unreserve(t *testing.T) { t.Run("gc size", newIndexGCSizeTest(db)) + t.Run("reserve size", reserveSizeTest(db, 90)) + t.Run("first ten unreserved chunks should not be accessible", func(t *testing.T) { for _, a := range addrs[:10] { _, err := db.Get(context.Background(), storage.ModeGetRequest, a) @@ -562,6 +568,8 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) { t.Run("gc size", newIndexGCSizeTest(db)) + t.Run("reserve size", reserveSizeTest(db, 90)) + t.Run("first ten unreserved chunks should not be accessible", func(t *testing.T) { for _, a := range addrs[:10] { _, err := db.Get(context.Background(), storage.ModeGetRequest, a) @@ -580,3 +588,98 @@ func TestDB_ReserveGC_EvictMaxPO(t *testing.T) { } }) } + +func TestReserveSize(t *testing.T) { + var ( + chunkCount = 10 + ) + + t.Run("variadic put sync", func(t *testing.T) { + var ( + db = newTestDB(t, &Options{ + Capacity: 100, + ReserveCapacity: 100, + }) + chs []swarm.Chunk + ) + for i := 0; i < chunkCount; i++ { + ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false) + chs = append(chs, ch) + } + _, err := db.Put(context.Background(), storage.ModePutSync, chs...) + if err != nil { + t.Fatal(err) + } + t.Run("reserve size", reserveSizeTest(db, 10)) + }) + + t.Run("variadic put upload then set sync", func(t *testing.T) { + var ( + db = newTestDB(t, &Options{ + Capacity: 100, + ReserveCapacity: 100, + }) + chs []swarm.Chunk + addrs []swarm.Address + ) + for i := 0; i < chunkCount; i++ { + ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false) + chs = append(chs, ch) + addrs = append(addrs, ch.Address()) + } + _, err := db.Put(context.Background(), storage.ModePutUpload, chs...) + if err != nil { + t.Fatal(err) + } + t.Run("reserve size", reserveSizeTest(db, 0)) + + err = db.Set(context.Background(), storage.ModeSetSync, addrs...) + if err != nil { + t.Fatal(err) + } + t.Run("reserve size", reserveSizeTest(db, 10)) + }) + + t.Run("sequencial put sync", func(t *testing.T) { + var ( + db = newTestDB(t, &Options{ + Capacity: 100, + ReserveCapacity: 100, + }) + ) + for i := 0; i < chunkCount; i++ { + ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false) + _, err := db.Put(context.Background(), storage.ModePutSync, ch) + if err != nil { + t.Fatal(err) + } + } + t.Run("reserve size", reserveSizeTest(db, 10)) + }) + + t.Run("sequencial put upload then set sync", func(t *testing.T) { + var ( + db = newTestDB(t, &Options{ + Capacity: 100, + ReserveCapacity: 100, + }) + chs []swarm.Chunk + ) + for i := 0; i < chunkCount; i++ { + ch := generateTestRandomChunkAt(swarm.NewAddress(db.baseKey), 2).WithBatch(2, 3, 2, false) + chs = append(chs, ch) + _, err := db.Put(context.Background(), storage.ModePutUpload, ch) + if err != nil { + t.Fatal(err) + } + } + t.Run("reserve size", reserveSizeTest(db, 0)) + for _, ch := range chs { + err := db.Set(context.Background(), storage.ModeSetSync, ch.Address()) + if err != nil { + t.Fatal(err) + } + } + t.Run("reserve size", reserveSizeTest(db, 10)) + }) +}