From c6240d36e8e9935395f087977f05aaa6f0c71ca2 Mon Sep 17 00:00:00 2001 From: Draco Date: Wed, 29 Oct 2025 15:50:38 -0400 Subject: [PATCH 1/8] feat(blockdb): add lru cache for block entries --- x/blockdb/README.md | 2 +- x/blockdb/config.go | 16 +++++ x/blockdb/database.go | 30 ++++++++-- x/blockdb/entry_cache_test.go | 107 ++++++++++++++++++++++++++++++++++ x/blockdb/readblock_test.go | 2 + x/blockdb/writeblock_test.go | 5 +- 6 files changed, 153 insertions(+), 9 deletions(-) create mode 100644 x/blockdb/entry_cache_test.go diff --git a/x/blockdb/README.md b/x/blockdb/README.md index d57896b50ed8..9f62ffde7b60 100644 --- a/x/blockdb/README.md +++ b/x/blockdb/README.md @@ -10,6 +10,7 @@ BlockDB is a specialized database optimized for blockchain blocks. - **Configurable Durability**: Optional `syncToDisk` mode guarantees immediate recoverability - **Automatic Recovery**: Detects and recovers unindexed blocks after unclean shutdowns - **Block Compression**: zstd compression for block data +- **In-Memory Cache**: LRU cache for recently accessed blocks ## Design @@ -167,7 +168,6 @@ if err != nil { ## TODO -- Implement a block cache for recently accessed blocks - Use a buffered pool to avoid allocations on reads and writes - Add performance benchmarks - Consider supporting missing data files (currently we error if any data files are missing) diff --git a/x/blockdb/config.go b/x/blockdb/config.go index c587b8483d22..4df89c6eef95 100644 --- a/x/blockdb/config.go +++ b/x/blockdb/config.go @@ -11,6 +11,9 @@ const DefaultMaxDataFileSize = 500 * 1024 * 1024 * 1024 // DefaultMaxDataFiles is the default maximum number of data files descriptors cached. const DefaultMaxDataFiles = 10 +// DefaultEntryCacheSize is the default size of the entry cache. +const DefaultEntryCacheSize = 256 + // DatabaseConfig contains configuration parameters for BlockDB. type DatabaseConfig struct { // IndexDir is the directory where the index file is stored. @@ -28,6 +31,9 @@ type DatabaseConfig struct { // MaxDataFiles is the maximum number of data files descriptors cached. MaxDataFiles int + // EntryCacheSize is the size of the entry cache (default: 256). + EntryCacheSize int + // CheckpointInterval defines how frequently (in blocks) the index file header is updated (default: 1024). CheckpointInterval uint64 @@ -43,6 +49,7 @@ func DefaultConfig() DatabaseConfig { MinimumHeight: 0, MaxDataFileSize: DefaultMaxDataFileSize, MaxDataFiles: DefaultMaxDataFiles, + EntryCacheSize: DefaultEntryCacheSize, CheckpointInterval: 1024, SyncToDisk: true, } @@ -91,6 +98,12 @@ func (c DatabaseConfig) WithMaxDataFiles(maxFiles int) DatabaseConfig { return c } +// WithEntryCacheSize returns a copy of the config with EntryCacheSize set to the given value. +func (c DatabaseConfig) WithEntryCacheSize(size int) DatabaseConfig { + c.EntryCacheSize = size + return c +} + // WithCheckpointInterval returns a copy of the config with CheckpointInterval set to the given value. func (c DatabaseConfig) WithCheckpointInterval(interval uint64) DatabaseConfig { c.CheckpointInterval = interval @@ -114,5 +127,8 @@ func (c DatabaseConfig) Validate() error { if c.MaxDataFileSize == 0 { return errors.New("MaxDataFileSize must be positive") } + if c.EntryCacheSize < 1 { + return errors.New("EntryCacheSize cannot be less than 1") + } return nil } diff --git a/x/blockdb/database.go b/x/blockdb/database.go index 14d6c70a9c4d..8ac01321c096 100644 --- a/x/blockdb/database.go +++ b/x/blockdb/database.go @@ -12,6 +12,7 @@ import ( "math" "os" "path/filepath" + "slices" "sync" "sync/atomic" @@ -176,6 +177,7 @@ type Database struct { log logging.Logger closed bool fileCache *lru.Cache[int, *os.File] + entryCache *lru.Cache[BlockHeight, BlockData] compressor compression.Compressor // closeMu prevents the database from being closed while in use and prevents @@ -223,6 +225,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { f.Close() } }), + entryCache: lru.NewCache[BlockHeight, BlockData](config.EntryCacheSize), compressor: compressor, } @@ -231,6 +234,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { zap.String("dataDir", config.DataDir), zap.Uint64("maxDataFileSize", config.MaxDataFileSize), zap.Int("maxDataFiles", config.MaxDataFiles), + zap.Int("entryCacheSize", config.EntryCacheSize), ) if err := s.openAndInitializeIndex(); err != nil { @@ -275,6 +279,7 @@ func (s *Database) Close() error { } s.closeFiles() + s.entryCache.Flush() s.log.Info("Block database closed successfully") return err @@ -371,6 +376,7 @@ func (s *Database) Put(height BlockHeight, block BlockData) error { ) return err } + s.entryCache.Put(height, slices.Clone(block)) s.log.Debug("Block written successfully", zap.Uint64("height", height), @@ -385,12 +391,6 @@ func (s *Database) Put(height BlockHeight, block BlockData) error { // It returns database.ErrNotFound if the block does not exist. func (s *Database) readBlockIndex(height BlockHeight) (indexEntry, error) { var entry indexEntry - if s.closed { - s.log.Error("Failed to read block index: database is closed", - zap.Uint64("height", height), - ) - return entry, database.ErrClosed - } // Skip the index entry read if we know the block is past the max height. maxHeight := s.maxBlockHeight.Load() @@ -436,6 +436,15 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) { s.closeMu.RLock() defer s.closeMu.RUnlock() + if s.closed { + s.log.Error("Failed Get: database closed", zap.Uint64("height", height)) + return nil, database.ErrClosed + } + + if c, ok := s.entryCache.Get(height); ok { + return slices.Clone(c), nil + } + indexEntry, err := s.readBlockIndex(height) if err != nil { return nil, err @@ -486,6 +495,7 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) { return nil, fmt.Errorf("checksum mismatch: calculated %d, stored %d", calculatedChecksum, bh.Checksum) } + s.entryCache.Put(height, slices.Clone(decompressed)) return decompressed, nil } @@ -494,6 +504,14 @@ func (s *Database) Has(height BlockHeight) (bool, error) { s.closeMu.RLock() defer s.closeMu.RUnlock() + if s.closed { + s.log.Error("Failed Has: database closed", zap.Uint64("height", height)) + return false, database.ErrClosed + } + + if _, ok := s.entryCache.Get(height); ok { + return true, nil + } _, err := s.readBlockIndex(height) if err != nil { if errors.Is(err, database.ErrNotFound) || errors.Is(err, ErrInvalidBlockHeight) { diff --git a/x/blockdb/entry_cache_test.go b/x/blockdb/entry_cache_test.go new file mode 100644 index 000000000000..aac46f7ee541 --- /dev/null +++ b/x/blockdb/entry_cache_test.go @@ -0,0 +1,107 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blockdb + +import ( + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCacheOnMiss(t *testing.T) { + db, _ := newTestDatabase(t, DefaultConfig()) + height := uint64(20) + block := randomBlock(t) + require.NoError(t, db.Put(height, block)) + + // Evict the entry from cache to simulate a cache miss + db.entryCache.Evict(height) + + // Read the block - should populate the cache on cache miss + _, err := db.Get(height) + require.NoError(t, err) + + _, ok := db.entryCache.Get(height) + require.True(t, ok) +} + +func TestCacheHas(t *testing.T) { + db, _ := newTestDatabase(t, DefaultConfig()) + height := uint64(30) + block := randomBlock(t) + require.NoError(t, db.Put(height, block)) + + has, err := db.Has(height) + require.NoError(t, err) + require.True(t, has) + + // Verify block is in cache + cached, ok := db.entryCache.Get(height) + require.True(t, ok) + require.Equal(t, block, cached) +} + +func TestCachePutStoresClone(t *testing.T) { + db, _ := newTestDatabase(t, DefaultConfig()) + height := uint64(40) + block := randomBlock(t) + clone := slices.Clone(block) + require.NoError(t, db.Put(height, clone)) + + // Modify the original block after Put + clone[0] = 99 + + // Cache should have the original unmodified data + cached, ok := db.entryCache.Get(height) + require.True(t, ok) + require.Equal(t, block, cached) +} + +func TestCacheGetReturnsClone(t *testing.T) { + db, _ := newTestDatabase(t, DefaultConfig()) + height := uint64(50) + block := randomBlock(t) + require.NoError(t, db.Put(height, block)) + + // Get the block and modify the returned data + data, err := db.Get(height) + require.NoError(t, err) + data[0] = 99 + + // Cache should still have the original unmodified data + cached, ok := db.entryCache.Get(height) + require.True(t, ok) + require.Equal(t, block, cached) + + // Second Get should also return original data + data, err = db.Get(height) + require.NoError(t, err) + require.Equal(t, block, data) +} + +func TestCachePutOverridesSameHeight(t *testing.T) { + db, _ := newTestDatabase(t, DefaultConfig()) + height := uint64(60) + b1 := randomBlock(t) + require.NoError(t, db.Put(height, b1)) + + // Verify first block is in cache + cached, ok := db.entryCache.Get(height) + require.True(t, ok) + require.Equal(t, b1, cached) + + // Put second block at same height and verify it overrides the first one + b2 := randomBlock(t) + require.NoError(t, db.Put(height, b2)) + cached, ok = db.entryCache.Get(height) + require.True(t, ok) + require.Equal(t, b2, cached) + require.NotEqual(t, b1, cached) + + // Get should also return the new block + data, err := db.Get(height) + require.NoError(t, err) + require.Equal(t, b2, data) +} diff --git a/x/blockdb/readblock_test.go b/x/blockdb/readblock_test.go index 3e3fc1339bd2..5bf7a156b2e0 100644 --- a/x/blockdb/readblock_test.go +++ b/x/blockdb/readblock_test.go @@ -51,6 +51,7 @@ func TestReadOperations(t *testing.T) { MaxDataFileSize: DefaultMaxDataFileSize, CheckpointInterval: 1024, MaxDataFiles: DefaultMaxDataFileSize, + EntryCacheSize: DefaultEntryCacheSize, }, }, { @@ -69,6 +70,7 @@ func TestReadOperations(t *testing.T) { MaxDataFileSize: DefaultMaxDataFileSize, CheckpointInterval: 1024, MaxDataFiles: DefaultMaxDataFileSize, + EntryCacheSize: DefaultEntryCacheSize, }, wantErr: ErrInvalidBlockHeight, }, diff --git a/x/blockdb/writeblock_test.go b/x/blockdb/writeblock_test.go index 00d5d221ad6a..dd8f66a862bf 100644 --- a/x/blockdb/writeblock_test.go +++ b/x/blockdb/writeblock_test.go @@ -4,6 +4,7 @@ package blockdb import ( + "bytes" "math" "os" "strings" @@ -38,7 +39,7 @@ func TestPutGet(t *testing.T) { { name: "nil block", block: nil, - want: []byte{}, + want: nil, }, } for _, tt := range tests { @@ -49,7 +50,7 @@ func TestPutGet(t *testing.T) { got, err := db.Get(0) require.NoError(t, err) - require.Equal(t, tt.want, got) + require.True(t, bytes.Equal(tt.want, got)) }) } } From e5336fc92d20c74717dcbe2a11ae218a7c542c13 Mon Sep 17 00:00:00 2001 From: Draco Date: Fri, 31 Oct 2025 14:17:56 -0400 Subject: [PATCH 2/8] fix: test to ensure testing cache is used --- x/blockdb/entry_cache_test.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/x/blockdb/entry_cache_test.go b/x/blockdb/entry_cache_test.go index aac46f7ee541..fd5594777ef2 100644 --- a/x/blockdb/entry_cache_test.go +++ b/x/blockdb/entry_cache_test.go @@ -27,20 +27,32 @@ func TestCacheOnMiss(t *testing.T) { require.True(t, ok) } -func TestCacheHas(t *testing.T) { +func TestCacheGet(t *testing.T) { db, _ := newTestDatabase(t, DefaultConfig()) height := uint64(30) block := randomBlock(t) - require.NoError(t, db.Put(height, block)) + // Populate cache directly without writing to database + db.entryCache.Put(height, block) + + // Get should return the block from cache + data, err := db.Get(height) + require.NoError(t, err) + require.Equal(t, block, data) +} + +func TestCacheHas(t *testing.T) { + db, _ := newTestDatabase(t, DefaultConfig()) + height := uint64(40) + block := randomBlock(t) + + // Populate cache directly without writing to database + db.entryCache.Put(height, block) + + // Has should return true from cache even though block is not in database has, err := db.Has(height) require.NoError(t, err) require.True(t, has) - - // Verify block is in cache - cached, ok := db.entryCache.Get(height) - require.True(t, ok) - require.Equal(t, block, cached) } func TestCachePutStoresClone(t *testing.T) { From 225955e483f3acffada05575d9b3aa48d1336795 Mon Sep 17 00:00:00 2001 From: Draco Date: Fri, 31 Oct 2025 16:49:57 -0400 Subject: [PATCH 3/8] refactor: entryCache -> blockCache --- x/blockdb/config.go | 20 ++++++++++---------- x/blockdb/database.go | 16 ++++++++-------- x/blockdb/entry_cache_test.go | 16 ++++++++-------- x/blockdb/readblock_test.go | 4 ++-- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/x/blockdb/config.go b/x/blockdb/config.go index 4df89c6eef95..c86ef823d334 100644 --- a/x/blockdb/config.go +++ b/x/blockdb/config.go @@ -11,8 +11,8 @@ const DefaultMaxDataFileSize = 500 * 1024 * 1024 * 1024 // DefaultMaxDataFiles is the default maximum number of data files descriptors cached. const DefaultMaxDataFiles = 10 -// DefaultEntryCacheSize is the default size of the entry cache. -const DefaultEntryCacheSize = 256 +// DefaultBlockCacheSize is the default size of the block cache. +const DefaultBlockCacheSize = 256 // DatabaseConfig contains configuration parameters for BlockDB. type DatabaseConfig struct { @@ -31,8 +31,8 @@ type DatabaseConfig struct { // MaxDataFiles is the maximum number of data files descriptors cached. MaxDataFiles int - // EntryCacheSize is the size of the entry cache (default: 256). - EntryCacheSize int + // BlockCacheSize is the size of the block cache (default: 256). + BlockCacheSize int // CheckpointInterval defines how frequently (in blocks) the index file header is updated (default: 1024). CheckpointInterval uint64 @@ -49,7 +49,7 @@ func DefaultConfig() DatabaseConfig { MinimumHeight: 0, MaxDataFileSize: DefaultMaxDataFileSize, MaxDataFiles: DefaultMaxDataFiles, - EntryCacheSize: DefaultEntryCacheSize, + BlockCacheSize: DefaultBlockCacheSize, CheckpointInterval: 1024, SyncToDisk: true, } @@ -98,9 +98,9 @@ func (c DatabaseConfig) WithMaxDataFiles(maxFiles int) DatabaseConfig { return c } -// WithEntryCacheSize returns a copy of the config with EntryCacheSize set to the given value. -func (c DatabaseConfig) WithEntryCacheSize(size int) DatabaseConfig { - c.EntryCacheSize = size +// WithBlockCacheSize returns a copy of the config with BlockCacheSize set to the given value. +func (c DatabaseConfig) WithBlockCacheSize(size int) DatabaseConfig { + c.BlockCacheSize = size return c } @@ -127,8 +127,8 @@ func (c DatabaseConfig) Validate() error { if c.MaxDataFileSize == 0 { return errors.New("MaxDataFileSize must be positive") } - if c.EntryCacheSize < 1 { - return errors.New("EntryCacheSize cannot be less than 1") + if c.BlockCacheSize < 1 { + return errors.New("BlockCacheSize cannot be less than 1") } return nil } diff --git a/x/blockdb/database.go b/x/blockdb/database.go index 8ac01321c096..770c97a5de91 100644 --- a/x/blockdb/database.go +++ b/x/blockdb/database.go @@ -177,7 +177,7 @@ type Database struct { log logging.Logger closed bool fileCache *lru.Cache[int, *os.File] - entryCache *lru.Cache[BlockHeight, BlockData] + blockCache *lru.Cache[BlockHeight, BlockData] compressor compression.Compressor // closeMu prevents the database from being closed while in use and prevents @@ -225,7 +225,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { f.Close() } }), - entryCache: lru.NewCache[BlockHeight, BlockData](config.EntryCacheSize), + blockCache: lru.NewCache[BlockHeight, BlockData](config.BlockCacheSize), compressor: compressor, } @@ -234,7 +234,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { zap.String("dataDir", config.DataDir), zap.Uint64("maxDataFileSize", config.MaxDataFileSize), zap.Int("maxDataFiles", config.MaxDataFiles), - zap.Int("entryCacheSize", config.EntryCacheSize), + zap.Int("blockCacheSize", config.BlockCacheSize), ) if err := s.openAndInitializeIndex(); err != nil { @@ -279,7 +279,7 @@ func (s *Database) Close() error { } s.closeFiles() - s.entryCache.Flush() + s.blockCache.Flush() s.log.Info("Block database closed successfully") return err @@ -376,7 +376,7 @@ func (s *Database) Put(height BlockHeight, block BlockData) error { ) return err } - s.entryCache.Put(height, slices.Clone(block)) + s.blockCache.Put(height, slices.Clone(block)) s.log.Debug("Block written successfully", zap.Uint64("height", height), @@ -441,7 +441,7 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) { return nil, database.ErrClosed } - if c, ok := s.entryCache.Get(height); ok { + if c, ok := s.blockCache.Get(height); ok { return slices.Clone(c), nil } @@ -495,7 +495,7 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) { return nil, fmt.Errorf("checksum mismatch: calculated %d, stored %d", calculatedChecksum, bh.Checksum) } - s.entryCache.Put(height, slices.Clone(decompressed)) + s.blockCache.Put(height, slices.Clone(decompressed)) return decompressed, nil } @@ -509,7 +509,7 @@ func (s *Database) Has(height BlockHeight) (bool, error) { return false, database.ErrClosed } - if _, ok := s.entryCache.Get(height); ok { + if _, ok := s.blockCache.Get(height); ok { return true, nil } _, err := s.readBlockIndex(height) diff --git a/x/blockdb/entry_cache_test.go b/x/blockdb/entry_cache_test.go index fd5594777ef2..d4c48ecee583 100644 --- a/x/blockdb/entry_cache_test.go +++ b/x/blockdb/entry_cache_test.go @@ -17,13 +17,13 @@ func TestCacheOnMiss(t *testing.T) { require.NoError(t, db.Put(height, block)) // Evict the entry from cache to simulate a cache miss - db.entryCache.Evict(height) + db.blockCache.Evict(height) // Read the block - should populate the cache on cache miss _, err := db.Get(height) require.NoError(t, err) - _, ok := db.entryCache.Get(height) + _, ok := db.blockCache.Get(height) require.True(t, ok) } @@ -33,7 +33,7 @@ func TestCacheGet(t *testing.T) { block := randomBlock(t) // Populate cache directly without writing to database - db.entryCache.Put(height, block) + db.blockCache.Put(height, block) // Get should return the block from cache data, err := db.Get(height) @@ -47,7 +47,7 @@ func TestCacheHas(t *testing.T) { block := randomBlock(t) // Populate cache directly without writing to database - db.entryCache.Put(height, block) + db.blockCache.Put(height, block) // Has should return true from cache even though block is not in database has, err := db.Has(height) @@ -66,7 +66,7 @@ func TestCachePutStoresClone(t *testing.T) { clone[0] = 99 // Cache should have the original unmodified data - cached, ok := db.entryCache.Get(height) + cached, ok := db.blockCache.Get(height) require.True(t, ok) require.Equal(t, block, cached) } @@ -83,7 +83,7 @@ func TestCacheGetReturnsClone(t *testing.T) { data[0] = 99 // Cache should still have the original unmodified data - cached, ok := db.entryCache.Get(height) + cached, ok := db.blockCache.Get(height) require.True(t, ok) require.Equal(t, block, cached) @@ -100,14 +100,14 @@ func TestCachePutOverridesSameHeight(t *testing.T) { require.NoError(t, db.Put(height, b1)) // Verify first block is in cache - cached, ok := db.entryCache.Get(height) + cached, ok := db.blockCache.Get(height) require.True(t, ok) require.Equal(t, b1, cached) // Put second block at same height and verify it overrides the first one b2 := randomBlock(t) require.NoError(t, db.Put(height, b2)) - cached, ok = db.entryCache.Get(height) + cached, ok = db.blockCache.Get(height) require.True(t, ok) require.Equal(t, b2, cached) require.NotEqual(t, b1, cached) diff --git a/x/blockdb/readblock_test.go b/x/blockdb/readblock_test.go index 5bf7a156b2e0..ee9f061fd586 100644 --- a/x/blockdb/readblock_test.go +++ b/x/blockdb/readblock_test.go @@ -51,7 +51,7 @@ func TestReadOperations(t *testing.T) { MaxDataFileSize: DefaultMaxDataFileSize, CheckpointInterval: 1024, MaxDataFiles: DefaultMaxDataFileSize, - EntryCacheSize: DefaultEntryCacheSize, + BlockCacheSize: DefaultBlockCacheSize, }, }, { @@ -70,7 +70,7 @@ func TestReadOperations(t *testing.T) { MaxDataFileSize: DefaultMaxDataFileSize, CheckpointInterval: 1024, MaxDataFiles: DefaultMaxDataFileSize, - EntryCacheSize: DefaultEntryCacheSize, + BlockCacheSize: DefaultBlockCacheSize, }, wantErr: ErrInvalidBlockHeight, }, From 20b70f1562db90ee4533d115c2dbb2373897c71b Mon Sep 17 00:00:00 2001 From: Draco Date: Mon, 3 Nov 2025 10:54:08 -0500 Subject: [PATCH 4/8] use cache_db wrapper for caching functionality --- x/blockdb/cache_db.go | 79 +++++++++++++++++++ .../{entry_cache_test.go => cache_db_test.go} | 28 +++---- x/blockdb/config.go | 9 +-- x/blockdb/database.go | 30 +++---- x/blockdb/database_test.go | 27 ++++--- x/blockdb/datasplit_test.go | 16 ++-- x/blockdb/helpers_test.go | 27 +++++-- x/blockdb/readblock_test.go | 9 +-- x/blockdb/recovery_test.go | 14 ++-- x/blockdb/writeblock_test.go | 13 +-- 10 files changed, 162 insertions(+), 90 deletions(-) create mode 100644 x/blockdb/cache_db.go rename x/blockdb/{entry_cache_test.go => cache_db_test.go} (81%) diff --git a/x/blockdb/cache_db.go b/x/blockdb/cache_db.go new file mode 100644 index 000000000000..a0a89b65c77c --- /dev/null +++ b/x/blockdb/cache_db.go @@ -0,0 +1,79 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blockdb + +import ( + "slices" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/cache/lru" + "github.com/ava-labs/avalanchego/database" +) + +var _ database.HeightIndex = (*cacheDB)(nil) + +type cacheDB struct { + db *Database + cache *lru.Cache[BlockHeight, BlockData] +} + +func newCacheDB(db *Database, size uint16) *cacheDB { + return &cacheDB{ + db: db, + cache: lru.NewCache[BlockHeight, BlockData](int(size)), + } +} + +func (c *cacheDB) Get(height BlockHeight) (BlockData, error) { + c.db.closeMu.RLock() + defer c.db.closeMu.RUnlock() + + if c.db.closed { + c.db.log.Error("Failed Get: database closed", zap.Uint64("height", height)) + return nil, database.ErrClosed + } + + if cached, ok := c.cache.Get(height); ok { + return slices.Clone(cached), nil + } + data, err := c.db.getWithoutLock(height) + if err != nil { + return nil, err + } + c.cache.Put(height, slices.Clone(data)) + return data, nil +} + +func (c *cacheDB) Put(height BlockHeight, data BlockData) error { + if err := c.db.Put(height, data); err != nil { + return err + } + + c.cache.Put(height, slices.Clone(data)) + return nil +} + +func (c *cacheDB) Has(height BlockHeight) (bool, error) { + c.db.closeMu.RLock() + defer c.db.closeMu.RUnlock() + + if c.db.closed { + c.db.log.Error("Failed Has: database closed", zap.Uint64("height", height)) + return false, database.ErrClosed + } + + if _, ok := c.cache.Get(height); ok { + return true, nil + } + return c.db.hasWithoutLock(height) +} + +func (c *cacheDB) Close() error { + if err := c.db.Close(); err != nil { + return err + } + c.cache.Flush() + return nil +} diff --git a/x/blockdb/entry_cache_test.go b/x/blockdb/cache_db_test.go similarity index 81% rename from x/blockdb/entry_cache_test.go rename to x/blockdb/cache_db_test.go index d4c48ecee583..da00f21b4434 100644 --- a/x/blockdb/entry_cache_test.go +++ b/x/blockdb/cache_db_test.go @@ -11,29 +11,29 @@ import ( ) func TestCacheOnMiss(t *testing.T) { - db, _ := newTestDatabase(t, DefaultConfig()) + db := newCacheDatabase(t, DefaultConfig()) height := uint64(20) block := randomBlock(t) require.NoError(t, db.Put(height, block)) // Evict the entry from cache to simulate a cache miss - db.blockCache.Evict(height) + db.cache.Evict(height) // Read the block - should populate the cache on cache miss _, err := db.Get(height) require.NoError(t, err) - _, ok := db.blockCache.Get(height) + _, ok := db.cache.Get(height) require.True(t, ok) } func TestCacheGet(t *testing.T) { - db, _ := newTestDatabase(t, DefaultConfig()) + db := newCacheDatabase(t, DefaultConfig()) height := uint64(30) block := randomBlock(t) // Populate cache directly without writing to database - db.blockCache.Put(height, block) + db.cache.Put(height, block) // Get should return the block from cache data, err := db.Get(height) @@ -42,12 +42,12 @@ func TestCacheGet(t *testing.T) { } func TestCacheHas(t *testing.T) { - db, _ := newTestDatabase(t, DefaultConfig()) + db := newCacheDatabase(t, DefaultConfig()) height := uint64(40) block := randomBlock(t) // Populate cache directly without writing to database - db.blockCache.Put(height, block) + db.cache.Put(height, block) // Has should return true from cache even though block is not in database has, err := db.Has(height) @@ -56,7 +56,7 @@ func TestCacheHas(t *testing.T) { } func TestCachePutStoresClone(t *testing.T) { - db, _ := newTestDatabase(t, DefaultConfig()) + db := newCacheDatabase(t, DefaultConfig()) height := uint64(40) block := randomBlock(t) clone := slices.Clone(block) @@ -66,13 +66,13 @@ func TestCachePutStoresClone(t *testing.T) { clone[0] = 99 // Cache should have the original unmodified data - cached, ok := db.blockCache.Get(height) + cached, ok := db.cache.Get(height) require.True(t, ok) require.Equal(t, block, cached) } func TestCacheGetReturnsClone(t *testing.T) { - db, _ := newTestDatabase(t, DefaultConfig()) + db := newCacheDatabase(t, DefaultConfig()) height := uint64(50) block := randomBlock(t) require.NoError(t, db.Put(height, block)) @@ -83,7 +83,7 @@ func TestCacheGetReturnsClone(t *testing.T) { data[0] = 99 // Cache should still have the original unmodified data - cached, ok := db.blockCache.Get(height) + cached, ok := db.cache.Get(height) require.True(t, ok) require.Equal(t, block, cached) @@ -94,20 +94,20 @@ func TestCacheGetReturnsClone(t *testing.T) { } func TestCachePutOverridesSameHeight(t *testing.T) { - db, _ := newTestDatabase(t, DefaultConfig()) + db := newCacheDatabase(t, DefaultConfig()) height := uint64(60) b1 := randomBlock(t) require.NoError(t, db.Put(height, b1)) // Verify first block is in cache - cached, ok := db.blockCache.Get(height) + cached, ok := db.cache.Get(height) require.True(t, ok) require.Equal(t, b1, cached) // Put second block at same height and verify it overrides the first one b2 := randomBlock(t) require.NoError(t, db.Put(height, b2)) - cached, ok = db.blockCache.Get(height) + cached, ok = db.cache.Get(height) require.True(t, ok) require.Equal(t, b2, cached) require.NotEqual(t, b1, cached) diff --git a/x/blockdb/config.go b/x/blockdb/config.go index c86ef823d334..252c11a918f5 100644 --- a/x/blockdb/config.go +++ b/x/blockdb/config.go @@ -12,7 +12,7 @@ const DefaultMaxDataFileSize = 500 * 1024 * 1024 * 1024 const DefaultMaxDataFiles = 10 // DefaultBlockCacheSize is the default size of the block cache. -const DefaultBlockCacheSize = 256 +const DefaultBlockCacheSize uint16 = 256 // DatabaseConfig contains configuration parameters for BlockDB. type DatabaseConfig struct { @@ -32,7 +32,7 @@ type DatabaseConfig struct { MaxDataFiles int // BlockCacheSize is the size of the block cache (default: 256). - BlockCacheSize int + BlockCacheSize uint16 // CheckpointInterval defines how frequently (in blocks) the index file header is updated (default: 1024). CheckpointInterval uint64 @@ -99,7 +99,7 @@ func (c DatabaseConfig) WithMaxDataFiles(maxFiles int) DatabaseConfig { } // WithBlockCacheSize returns a copy of the config with BlockCacheSize set to the given value. -func (c DatabaseConfig) WithBlockCacheSize(size int) DatabaseConfig { +func (c DatabaseConfig) WithBlockCacheSize(size uint16) DatabaseConfig { c.BlockCacheSize = size return c } @@ -127,8 +127,5 @@ func (c DatabaseConfig) Validate() error { if c.MaxDataFileSize == 0 { return errors.New("MaxDataFileSize must be positive") } - if c.BlockCacheSize < 1 { - return errors.New("BlockCacheSize cannot be less than 1") - } return nil } diff --git a/x/blockdb/database.go b/x/blockdb/database.go index 770c97a5de91..66654c15e493 100644 --- a/x/blockdb/database.go +++ b/x/blockdb/database.go @@ -12,7 +12,6 @@ import ( "math" "os" "path/filepath" - "slices" "sync" "sync/atomic" @@ -177,7 +176,6 @@ type Database struct { log logging.Logger closed bool fileCache *lru.Cache[int, *os.File] - blockCache *lru.Cache[BlockHeight, BlockData] compressor compression.Compressor // closeMu prevents the database from being closed while in use and prevents @@ -199,7 +197,7 @@ type Database struct { // Parameters: // - config: Configuration parameters // - log: Logger instance for structured logging -func New(config DatabaseConfig, log logging.Logger) (*Database, error) { +func New(config DatabaseConfig, log logging.Logger) (database.HeightIndex, error) { if err := config.Validate(); err != nil { return nil, err } @@ -225,7 +223,6 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { f.Close() } }), - blockCache: lru.NewCache[BlockHeight, BlockData](config.BlockCacheSize), compressor: compressor, } @@ -234,7 +231,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { zap.String("dataDir", config.DataDir), zap.Uint64("maxDataFileSize", config.MaxDataFileSize), zap.Int("maxDataFiles", config.MaxDataFiles), - zap.Int("blockCacheSize", config.BlockCacheSize), + zap.Uint16("blockCacheSize", config.BlockCacheSize), ) if err := s.openAndInitializeIndex(); err != nil { @@ -260,6 +257,9 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) { zap.Uint64("maxBlockHeight", maxHeight), ) + if config.BlockCacheSize > 0 { + return newCacheDB(s, config.BlockCacheSize), nil + } return s, nil } @@ -279,7 +279,6 @@ func (s *Database) Close() error { } s.closeFiles() - s.blockCache.Flush() s.log.Info("Block database closed successfully") return err @@ -291,9 +290,7 @@ func (s *Database) Put(height BlockHeight, block BlockData) error { defer s.closeMu.RUnlock() if s.closed { - s.log.Error("Failed to write block: database is closed", - zap.Uint64("height", height), - ) + s.log.Error("Failed Put: database closed", zap.Uint64("height", height)) return database.ErrClosed } @@ -376,7 +373,6 @@ func (s *Database) Put(height BlockHeight, block BlockData) error { ) return err } - s.blockCache.Put(height, slices.Clone(block)) s.log.Debug("Block written successfully", zap.Uint64("height", height), @@ -441,10 +437,10 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) { return nil, database.ErrClosed } - if c, ok := s.blockCache.Get(height); ok { - return slices.Clone(c), nil - } + return s.getWithoutLock(height) +} +func (s *Database) getWithoutLock(height BlockHeight) (BlockData, error) { indexEntry, err := s.readBlockIndex(height) if err != nil { return nil, err @@ -495,7 +491,6 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) { return nil, fmt.Errorf("checksum mismatch: calculated %d, stored %d", calculatedChecksum, bh.Checksum) } - s.blockCache.Put(height, slices.Clone(decompressed)) return decompressed, nil } @@ -509,9 +504,10 @@ func (s *Database) Has(height BlockHeight) (bool, error) { return false, database.ErrClosed } - if _, ok := s.blockCache.Get(height); ok { - return true, nil - } + return s.hasWithoutLock(height) +} + +func (s *Database) hasWithoutLock(height BlockHeight) (bool, error) { _, err := s.readBlockIndex(height) if err != nil { if errors.Is(err, database.ErrNotFound) || errors.Is(err, ErrInvalidBlockHeight) { diff --git a/x/blockdb/database_test.go b/x/blockdb/database_test.go index b4af2837cf2a..a85cd253b202 100644 --- a/x/blockdb/database_test.go +++ b/x/blockdb/database_test.go @@ -90,7 +90,7 @@ func TestNew_Params(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - db, err := New(tt.config, nil) + hdb, err := New(tt.config, nil) if tt.wantErr != nil { require.Equal(t, tt.wantErr.Error(), err.Error()) @@ -98,14 +98,17 @@ func TestNew_Params(t *testing.T) { } require.NoError(t, err) - require.NotNil(t, db) + require.NotNil(t, hdb) + db, ok := hdb.(*cacheDB) + require.True(t, ok) + config := db.db.config // Verify the database was created with correct configuration - require.Equal(t, tt.config.MinimumHeight, db.config.MinimumHeight) - require.Equal(t, tt.config.MaxDataFileSize, db.config.MaxDataFileSize) - require.Equal(t, tt.config.MaxDataFiles, db.config.MaxDataFiles) - require.Equal(t, tt.config.CheckpointInterval, db.config.CheckpointInterval) - require.Equal(t, tt.config.SyncToDisk, db.config.SyncToDisk) + require.Equal(t, tt.config.MinimumHeight, config.MinimumHeight) + require.Equal(t, tt.config.MaxDataFileSize, config.MaxDataFileSize) + require.Equal(t, tt.config.MaxDataFiles, config.MaxDataFiles) + require.Equal(t, tt.config.CheckpointInterval, config.CheckpointInterval) + require.Equal(t, tt.config.SyncToDisk, config.SyncToDisk) indexPath := filepath.Join(tt.config.IndexDir, indexFileName) require.FileExists(t, indexPath) @@ -263,9 +266,8 @@ func TestFileCache_Eviction(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - store, cleanup := newTestDatabase(t, tt.config.WithMaxDataFileSize(1024*1.5)) + store := newDatabase(t, tt.config.WithMaxDataFileSize(1024*1.5)) store.compressor = compression.NewNoCompressor() - defer cleanup() // Override the file cache with specified size evictionCount := atomic.Int32{} @@ -346,8 +348,7 @@ func TestMaxDataFiles_CacheLimit(t *testing.T) { WithMaxDataFiles(2). // Only allow 2 files in cache WithMaxDataFileSize(1024) // Small file size to force multiple files - store, cleanup := newTestDatabase(t, config) - defer cleanup() + db := newDatabase(t, config) // Create blocks that will span multiple data files // Each block is ~512 bytes, so 2 blocks per file @@ -355,12 +356,12 @@ func TestMaxDataFiles_CacheLimit(t *testing.T) { // Write blocks to force multiple data files for i := range numBlocks { block := fixedSizeBlock(t, 512, uint64(i)) - require.NoError(t, store.Put(uint64(i), block)) + require.NoError(t, db.Put(uint64(i), block)) } // Verify all blocks are still readable despite evictions for i := range numBlocks { - block, err := store.Get(uint64(i)) + block, err := db.Get(uint64(i)) require.NoError(t, err, "failed to read block at height %d after eviction", i) require.Len(t, block, 512, "block size mismatch at height %d", i) } diff --git a/x/blockdb/datasplit_test.go b/x/blockdb/datasplit_test.go index d978fb714299..e005595e1a9d 100644 --- a/x/blockdb/datasplit_test.go +++ b/x/blockdb/datasplit_test.go @@ -17,8 +17,7 @@ import ( func TestDataSplitting(t *testing.T) { // Each data file should have enough space for 2 blocks config := DefaultConfig().WithMaxDataFileSize(1024 * 2.5) - store, cleanup := newTestDatabase(t, config) - defer cleanup() + store := newDatabase(t, config) // Override the compressor so we can have fixed size blocks store.compressor = compression.NewNoCompressor() @@ -54,13 +53,11 @@ func TestDataSplitting(t *testing.T) { // reopen and verify all blocks are readable require.NoError(t, store.Close()) - config = config.WithDataDir(store.config.DataDir).WithIndexDir(store.config.IndexDir) - store, err = New(config, store.log) - require.NoError(t, err) - store.compressor = compression.NewNoCompressor() - defer store.Close() + dir := store.config.DataDir + db := newDatabase(t, config.WithIndexDir(dir).WithDataDir(dir)) + db.compressor = compression.NewNoCompressor() for i := range numBlocks { - readBlock, err := store.Get(uint64(i)) + readBlock, err := db.Get(uint64(i)) require.NoError(t, err) require.Equal(t, blocks[i], readBlock) } @@ -68,8 +65,7 @@ func TestDataSplitting(t *testing.T) { func TestDataSplitting_DeletedFile(t *testing.T) { config := DefaultConfig().WithMaxDataFileSize(1024 * 2.5) - store, cleanup := newTestDatabase(t, config) - defer cleanup() + store := newDatabase(t, config) // create 5 blocks, 1kb each numBlocks := 5 diff --git a/x/blockdb/helpers_test.go b/x/blockdb/helpers_test.go index 578469ed4f5f..32e1f49e9380 100644 --- a/x/blockdb/helpers_test.go +++ b/x/blockdb/helpers_test.go @@ -11,13 +11,22 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/logging" ) -func newTestDatabase(t *testing.T, opts DatabaseConfig) (*Database, func()) { +func newDatabase(t *testing.T, config DatabaseConfig) *Database { t.Helper() + + db := newHeightIndexDatabase(t, config.WithBlockCacheSize(0)) + require.IsType(t, &Database{}, db) + return db.(*Database) +} + +func newHeightIndexDatabase(t *testing.T, config DatabaseConfig) database.HeightIndex { + t.Helper() + dir := t.TempDir() - config := opts if config.IndexDir == "" { config = config.WithIndexDir(dir) } @@ -25,12 +34,16 @@ func newTestDatabase(t *testing.T, opts DatabaseConfig) (*Database, func()) { config = config.WithDataDir(dir) } db, err := New(config, logging.NoLog{}) - require.NoError(t, err, "failed to create database") + require.NoError(t, err) + return db +} - cleanup := func() { - db.Close() - } - return db, cleanup +func newCacheDatabase(t *testing.T, config DatabaseConfig) *cacheDB { + t.Helper() + + db := newHeightIndexDatabase(t, config) + require.IsType(t, &cacheDB{}, db) + return db.(*cacheDB) } // randomBlock generates a random block of size 1KB-50KB. diff --git a/x/blockdb/readblock_test.go b/x/blockdb/readblock_test.go index ee9f061fd586..43dabf9159e9 100644 --- a/x/blockdb/readblock_test.go +++ b/x/blockdb/readblock_test.go @@ -94,8 +94,7 @@ func TestReadOperations(t *testing.T) { config = &defaultConfig } - store, cleanup := newTestDatabase(t, *config) - defer cleanup() + store := newDatabase(t, *config) // Seed database with blocks based on config (unless skipSeed is true) seededBlocks := make(map[uint64][]byte) @@ -139,8 +138,7 @@ func TestReadOperations(t *testing.T) { } func TestReadOperations_Concurrency(t *testing.T) { - store, cleanup := newTestDatabase(t, DefaultConfig()) - defer cleanup() + store := newDatabase(t, DefaultConfig()) // Pre-generate blocks and write them numBlocks := 50 @@ -271,8 +269,7 @@ func TestHasBlock(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - store, cleanup := newTestDatabase(t, DefaultConfig().WithMinimumHeight(minHeight)) - defer cleanup() + store := newDatabase(t, DefaultConfig().WithMinimumHeight(minHeight)) for i := minHeight; i <= minHeight+blocksCount; i++ { if i == gapHeight { diff --git a/x/blockdb/recovery_test.go b/x/blockdb/recovery_test.go index 723aca2358ab..0925d5ad2270 100644 --- a/x/blockdb/recovery_test.go +++ b/x/blockdb/recovery_test.go @@ -197,7 +197,7 @@ func TestRecovery_Success(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - store, _ := newTestDatabase(t, config) + store := newDatabase(t, config) blockHeights := []uint64{0, 1, 3, 6, 2, 8, 4} blocks := make(map[uint64][]byte) @@ -217,17 +217,16 @@ func TestRecovery_Success(t *testing.T) { require.NoError(t, tt.corruptIndex(indexPath, blocks)) // Reopen the database and test recovery - recoveredStore, err := New(config.WithIndexDir(store.config.IndexDir).WithDataDir(store.config.DataDir), store.log) - require.NoError(t, err) - defer recoveredStore.Close() + dir := store.config.DataDir + recoveredDB := newDatabase(t, config.WithIndexDir(dir).WithDataDir(dir)) // Verify blocks are readable for _, height := range blockHeights { - readBlock, err := recoveredStore.Get(height) + readBlock, err := recoveredDB.Get(height) require.NoError(t, err) require.Equal(t, blocks[height], readBlock, "block %d should be the same", height) } - checkDatabaseState(t, recoveredStore, 8) + checkDatabaseState(t, recoveredDB, 8) }) } } @@ -518,11 +517,10 @@ func TestRecovery_CorruptionDetection(t *testing.T) { config = config.WithMaxDataFileSize(*tt.maxDataFileSize) } - store, cleanup := newTestDatabase(t, config) + store := newDatabase(t, config) if tt.disableCompression { store.compressor = compression.NewNoCompressor() } - defer cleanup() // Setup blocks blocks := make([][]byte, len(tt.blockHeights)) diff --git a/x/blockdb/writeblock_test.go b/x/blockdb/writeblock_test.go index dd8f66a862bf..9002506cb225 100644 --- a/x/blockdb/writeblock_test.go +++ b/x/blockdb/writeblock_test.go @@ -44,8 +44,7 @@ func TestPutGet(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - db, cleanup := newTestDatabase(t, DefaultConfig()) - defer cleanup() + db := newDatabase(t, DefaultConfig()) require.NoError(t, db.Put(0, tt.block)) got, err := db.Get(0) @@ -151,9 +150,7 @@ func TestPut_MaxHeight(t *testing.T) { if config.CheckpointInterval == 0 { config = DefaultConfig() } - - store, cleanup := newTestDatabase(t, config) - defer cleanup() + store := newDatabase(t, config) blocksWritten := make(map[uint64][]byte) for _, h := range tt.blockHeights { @@ -170,8 +167,7 @@ func TestPut_MaxHeight(t *testing.T) { } func TestWriteBlock_Concurrency(t *testing.T) { - store, cleanup := newTestDatabase(t, DefaultConfig()) - defer cleanup() + store := newDatabase(t, DefaultConfig()) var wg sync.WaitGroup var errors atomic.Int32 @@ -303,11 +299,10 @@ func TestWriteBlock_Errors(t *testing.T) { config = DefaultConfig() } - store, cleanup := newTestDatabase(t, config) + store := newDatabase(t, config) if tt.disableCompression { store.compressor = compression.NewNoCompressor() } - defer cleanup() if tt.setup != nil { tt.setup(store) From afa7427635d7432ea72f7273d5a188618bddc359 Mon Sep 17 00:00:00 2001 From: Draco Date: Tue, 4 Nov 2025 15:51:28 -0500 Subject: [PATCH 5/8] add locking to cached Put --- x/blockdb/cache_db.go | 22 ++++++++++++++++++++-- x/blockdb/cache_db_test.go | 1 - 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/x/blockdb/cache_db.go b/x/blockdb/cache_db.go index a0a89b65c77c..f9ca03d1ae58 100644 --- a/x/blockdb/cache_db.go +++ b/x/blockdb/cache_db.go @@ -5,6 +5,7 @@ package blockdb import ( "slices" + "sync" "go.uber.org/zap" @@ -12,11 +13,17 @@ import ( "github.com/ava-labs/avalanchego/database" ) +// numShards is the number of mutex shards used to reduce lock contention for +// concurrent Put operations. Using 256 shards provides a good balance between +// memory usage (~2KB) and concurrency. +const numShards = 256 + var _ database.HeightIndex = (*cacheDB)(nil) type cacheDB struct { - db *Database - cache *lru.Cache[BlockHeight, BlockData] + db *Database + cache *lru.Cache[BlockHeight, BlockData] + shards [numShards]sync.Mutex } func newCacheDB(db *Database, size uint16) *cacheDB { @@ -46,7 +53,18 @@ func (c *cacheDB) Get(height BlockHeight) (BlockData, error) { return data, nil } +// Put writes block data at the specified height to both the underlying database +// and the cache. +// +// Concurrent calls to Put with the same height are serialized using sharded +// locking to ensure cache consistency with the underlying database. +// This allows concurrent writes to different heights while preventing race +// conditions for writes to the same height. func (c *cacheDB) Put(height BlockHeight, data BlockData) error { + shard := &c.shards[height%numShards] + shard.Lock() + defer shard.Unlock() + if err := c.db.Put(height, data); err != nil { return err } diff --git a/x/blockdb/cache_db_test.go b/x/blockdb/cache_db_test.go index da00f21b4434..fceb80e01430 100644 --- a/x/blockdb/cache_db_test.go +++ b/x/blockdb/cache_db_test.go @@ -110,7 +110,6 @@ func TestCachePutOverridesSameHeight(t *testing.T) { cached, ok = db.cache.Get(height) require.True(t, ok) require.Equal(t, b2, cached) - require.NotEqual(t, b1, cached) // Get should also return the new block data, err := db.Get(height) From d666deb7cd37615571567edb9c8a99f35f788086 Mon Sep 17 00:00:00 2001 From: Draco Date: Tue, 4 Nov 2025 16:35:48 -0500 Subject: [PATCH 6/8] acceptable limitation --- x/blockdb/cache_db.go | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/x/blockdb/cache_db.go b/x/blockdb/cache_db.go index f9ca03d1ae58..83d9859c8e4d 100644 --- a/x/blockdb/cache_db.go +++ b/x/blockdb/cache_db.go @@ -5,7 +5,6 @@ package blockdb import ( "slices" - "sync" "go.uber.org/zap" @@ -13,17 +12,17 @@ import ( "github.com/ava-labs/avalanchego/database" ) -// numShards is the number of mutex shards used to reduce lock contention for -// concurrent Put operations. Using 256 shards provides a good balance between -// memory usage (~2KB) and concurrency. -const numShards = 256 - var _ database.HeightIndex = (*cacheDB)(nil) +// cacheDB caches data from the underlying database. +// +// Operations (Get, Has, Put) are not atomic with the underlying database. +// Concurrent writes to the same height can result in cache inconsistencies where +// the cache and database contain different values. This limitation is acceptable +// because concurrent writes to the same height are not an intended use case. type cacheDB struct { - db *Database - cache *lru.Cache[BlockHeight, BlockData] - shards [numShards]sync.Mutex + db *Database + cache *lru.Cache[BlockHeight, BlockData] } func newCacheDB(db *Database, size uint16) *cacheDB { @@ -53,18 +52,7 @@ func (c *cacheDB) Get(height BlockHeight) (BlockData, error) { return data, nil } -// Put writes block data at the specified height to both the underlying database -// and the cache. -// -// Concurrent calls to Put with the same height are serialized using sharded -// locking to ensure cache consistency with the underlying database. -// This allows concurrent writes to different heights while preventing race -// conditions for writes to the same height. func (c *cacheDB) Put(height BlockHeight, data BlockData) error { - shard := &c.shards[height%numShards] - shard.Lock() - defer shard.Unlock() - if err := c.db.Put(height, data); err != nil { return err } From 9264d55b3b35446d9181eee7d2c37840b862c85d Mon Sep 17 00:00:00 2001 From: Draco Date: Wed, 5 Nov 2025 09:04:50 -0500 Subject: [PATCH 7/8] Update doc wording --- x/blockdb/cache_db.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x/blockdb/cache_db.go b/x/blockdb/cache_db.go index 83d9859c8e4d..6ed9e7a4a475 100644 --- a/x/blockdb/cache_db.go +++ b/x/blockdb/cache_db.go @@ -14,10 +14,10 @@ import ( var _ database.HeightIndex = (*cacheDB)(nil) -// cacheDB caches data from the underlying database. +// cacheDB caches data from the underlying [Database]. // // Operations (Get, Has, Put) are not atomic with the underlying database. -// Concurrent writes to the same height can result in cache inconsistencies where +// Concurrent access to the same height can result in cache inconsistencies where // the cache and database contain different values. This limitation is acceptable // because concurrent writes to the same height are not an intended use case. type cacheDB struct { From 9792f389b9e04ed02f13290712670510e352dbc4 Mon Sep 17 00:00:00 2001 From: Draco Date: Fri, 7 Nov 2025 12:21:22 -0500 Subject: [PATCH 8/8] clarify docs --- x/blockdb/cache_db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/blockdb/cache_db.go b/x/blockdb/cache_db.go index 6ed9e7a4a475..756016c153fd 100644 --- a/x/blockdb/cache_db.go +++ b/x/blockdb/cache_db.go @@ -17,7 +17,7 @@ var _ database.HeightIndex = (*cacheDB)(nil) // cacheDB caches data from the underlying [Database]. // // Operations (Get, Has, Put) are not atomic with the underlying database. -// Concurrent access to the same height can result in cache inconsistencies where +// Concurrent writes to the same height can result in cache inconsistencies where // the cache and database contain different values. This limitation is acceptable // because concurrent writes to the same height are not an intended use case. type cacheDB struct {