Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use FIFO Cache for Accepted Blocks #288

Merged
merged 5 commits into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions cache/fifo.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright (C) 2023, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package cache

import (
"sync"

"github.com/ava-labs/avalanchego/utils/buffer"
)

type FIFO[K comparable, V any] struct {
l sync.RWMutex

buffer buffer.Queue[K]
m map[K]V
}

// NewFIFO creates a new First-In-First-Out cache of size [limit].
func NewFIFO[K comparable, V any](limit int) (*FIFO[K, V], error) {
c := &FIFO[K, V]{
m: make(map[K]V, limit),
}
buf, err := buffer.NewBoundedQueue(limit, c.remove)
if err != nil {
return nil, err
}
c.buffer = buf
return c, nil
}

func (f *FIFO[K, V]) Put(key K, val V) {
f.l.Lock()
defer f.l.Unlock()

f.buffer.Push(key) // Insert will remove the oldest [K] if we are at the [limit]
f.m[key] = val
}

func (f *FIFO[K, V]) Get(key K) (V, bool) {
f.l.RLock()
defer f.l.RUnlock()

v, ok := f.m[key]
return v, ok
}

// remove is used as the callback in [BoundedBuffer]. It is assumed that the
// [WriteLock] is held when this is accessed.
func (f *FIFO[K, V]) remove(key K) {
delete(f.m, key)
}
3 changes: 2 additions & 1 deletion config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ func (c *Config) GetTraceConfig() *trace.Config { return &trace.Config{
func (c *Config) GetStateSyncParallelism() int { return 4 }
func (c *Config) GetStateSyncMinBlocks() uint64 { return 256 }
func (c *Config) GetStateSyncServerDelay() time.Duration { return 0 } // used for testing
func (c *Config) GetBlockLRUSize() int { return 128 }
func (c *Config) GetParsedBlockCacheSize() int { return 128 }
func (c *Config) GetAcceptedBlockCacheSize() int { return 128 }

func (c *Config) GetContinuousProfilerConfig() *profiler.Config {
return &profiler.Config{Enabled: false}
Expand Down
3 changes: 2 additions & 1 deletion vm/dependencies.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ type Config interface {
GetStateSyncParallelism() int
GetStateSyncMinBlocks() uint64
GetStateSyncServerDelay() time.Duration
GetBlockLRUSize() int
GetParsedBlockCacheSize() int
GetAcceptedBlockCacheSize() int
GetContinuousProfilerConfig() *profiler.Config
}

Expand Down
12 changes: 8 additions & 4 deletions vm/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"go.uber.org/zap"

"github.com/ava-labs/hypersdk/builder"
hcache "github.com/ava-labs/hypersdk/cache"
"github.com/ava-labs/hypersdk/chain"
"github.com/ava-labs/hypersdk/emap"
"github.com/ava-labs/hypersdk/gossiper"
Expand Down Expand Up @@ -74,7 +75,7 @@ type VM struct {

// cache block objects to optimize "GetBlockStateless"
// only put when a block is accepted
blocks *cache.LRU[ids.ID, *chain.StatelessBlock]
blocks *hcache.FIFO[ids.ID, *chain.StatelessBlock]

// We cannot use a map here because we may parse blocks up in the ancestry
parsedBlocks *cache.LRU[ids.ID, *chain.StatelessBlock]
Expand Down Expand Up @@ -214,11 +215,14 @@ func (vm *VM) Initialize(
// Init channels before initializing other structs
vm.toEngine = toEngine

vm.blocks = &cache.LRU[ids.ID, *chain.StatelessBlock]{Size: vm.config.GetBlockLRUSize()}
vm.parsedBlocks = &cache.LRU[ids.ID, *chain.StatelessBlock]{Size: vm.config.GetParsedBlockCacheSize()}
vm.verifiedBlocks = make(map[ids.ID]*chain.StatelessBlock)
vm.blocks, err = hcache.NewFIFO[ids.ID, *chain.StatelessBlock](vm.config.GetAcceptedBlockCacheSize())
if err != nil {
return err
}
vm.acceptedQueue = make(chan *chain.StatelessBlock, vm.config.GetAcceptorSize())
vm.acceptorDone = make(chan struct{})
vm.parsedBlocks = &cache.LRU[ids.ID, *chain.StatelessBlock]{Size: vm.config.GetBlockLRUSize()}
vm.verifiedBlocks = make(map[ids.ID]*chain.StatelessBlock)

vm.mempool = mempool.New[*chain.Transaction](
vm.tracer,
Expand Down
5 changes: 3 additions & 2 deletions vm/vm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ import (
"testing"

ametrics "github.com/ava-labs/avalanchego/api/metrics"
"github.com/ava-labs/avalanchego/cache"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/snow"
"github.com/ava-labs/avalanchego/utils/logging"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"

hcache "github.com/ava-labs/hypersdk/cache"
"github.com/ava-labs/hypersdk/chain"
"github.com/ava-labs/hypersdk/emap"
"github.com/ava-labs/hypersdk/mempool"
Expand All @@ -37,13 +37,14 @@ func TestBlockCache(t *testing.T) {
blkID := blk.ID()

tracer, _ := trace.New(&trace.Config{Enabled: false})
bcache, _ := hcache.NewFIFO[ids.ID, *chain.StatelessBlock](3)
controller := NewMockController(ctrl)
vm := VM{
snowCtx: &snow.Context{Log: logging.NoLog{}, Metrics: ametrics.NewOptionalGatherer()},

tracer: tracer,

blocks: &cache.LRU[ids.ID, *chain.StatelessBlock]{Size: 3},
blocks: bcache,
verifiedBlocks: make(map[ids.ID]*chain.StatelessBlock),
seen: emap.NewEMap[*chain.Transaction](),
mempool: mempool.New[*chain.Transaction](tracer, 100, 32, nil),
Expand Down