From 1dcf9e551507c401947be1666313d5dca798683e Mon Sep 17 00:00:00 2001 From: Brindrajsinh-Chauhan Date: Wed, 24 Apr 2024 14:04:20 -0400 Subject: [PATCH] update implementation --- core/blockchain.go | 40 +++++++------- core/blockchain_repair_test.go | 4 +- core/blockchain_sethead_test.go | 2 +- core/blockchain_snapshot_test.go | 2 +- core/state/pruner/pruner.go | 2 +- core/state/snapshot/difflayer.go | 6 +- core/state/snapshot/disklayer_test.go | 8 +-- core/state/snapshot/iterator_test.go | 12 ++-- core/state/snapshot/snapshot.go | 80 ++++++++++++++++----------- core/state/snapshot/snapshot_test.go | 56 +++++++++++-------- core/state/statedb.go | 2 +- eth/backend.go | 22 ++++---- eth/ethconfig/config.go | 56 +++++++++---------- eth/ethconfig/gen_config.go | 20 +++---- triedb/pathdb/database.go | 68 +++++++++++++++++------ triedb/pathdb/layertree.go | 4 +- 16 files changed, 222 insertions(+), 162 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 2f03b9d97b12..dfb6e6055aa1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -140,8 +140,8 @@ type CacheConfig struct { StateHistory uint64 // Number of blocks from head whose state histories are reserved. StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top - EnableDiskRootInterval bool // Enable update disk root when time threshold is reached. Disabled by default - DiskRootThreshold time.Duration // Time threshold after which to flush the layers to disk + AllowForceUpdate bool // Enable update disk root when commit threshold is reached. Disabled by default + CommitThreshold int // Number of commits threshold after which to flush the layers to disk SnapshotNoBuild bool // Whether the background generation is allowed SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it @@ -160,9 +160,11 @@ func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config { } if c.StateScheme == rawdb.PathScheme { config.PathDB = &pathdb.Config{ - StateHistory: c.StateHistory, - CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, - DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, + StateHistory: c.StateHistory, + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, + AllowForceUpdate: c.AllowForceUpdate, + CommitThreshold: c.CommitThreshold, } } return config @@ -171,14 +173,14 @@ func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config { // defaultCacheConfig are the default caching values if none are specified by the // user (also used during testing). var defaultCacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - StateScheme: rawdb.HashScheme, - EnableDiskRootInterval: false, - DiskRootThreshold: 60 * time.Minute, + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: true, + StateScheme: rawdb.HashScheme, + AllowForceUpdate: false, + CommitThreshold: 128, } // DefaultCacheConfigWithScheme returns a deep copied default cache config with @@ -446,12 +448,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis recover = true } snapconfig := snapshot.Config{ - CacheSize: bc.cacheConfig.SnapshotLimit, - Recovery: recover, - NoBuild: bc.cacheConfig.SnapshotNoBuild, - AsyncBuild: !bc.cacheConfig.SnapshotWait, - EnableDiskRootInterval: bc.cacheConfig.EnableDiskRootInterval, - DiskRootThreshold: bc.cacheConfig.DiskRootThreshold, + CacheSize: bc.cacheConfig.SnapshotLimit, + Recovery: recover, + NoBuild: bc.cacheConfig.SnapshotNoBuild, + AsyncBuild: !bc.cacheConfig.SnapshotWait, + AllowForceUpdate: bc.cacheConfig.AllowForceUpdate, + CommitThreshold: bc.cacheConfig.CommitThreshold, } bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index a4761f337b85..49e511f6f221 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1820,7 +1820,7 @@ func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme s t.Fatalf("Failed to flush trie state: %v", err) } if snapshots { - if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { + if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0, false); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) } } @@ -1950,7 +1950,7 @@ func testIssue23496(t *testing.T, scheme string) { if _, err := chain.InsertChain(blocks[1:2]); err != nil { t.Fatalf("Failed to import canonical chain start: %v", err) } - if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil { + if err := chain.snaps.Cap(blocks[1].Root(), 0, false); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) } diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 8b77f9f8b20c..9358ecffb51f 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -2023,7 +2023,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme if tt.commitBlock > 0 { chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false) if snapshots { - if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { + if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0, false); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) } } diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 80f8035df151..f15b5474e005 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -108,7 +108,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Flushing the entire snap tree into the disk, the // relevant (a) snapshot root and (b) snapshot generator // will be persisted atomically. - chain.snaps.Cap(blocks[point-1].Root(), 0) + chain.snaps.Cap(blocks[point-1].Root(), 0, false) diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 59c580dacaf1..c5b0ebdaf73e 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -192,7 +192,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta // Pruning is done, now drop the "useless" layers from the snapshot. // Firstly, flushing the target layer into the disk. After that all // diff layers below the target will all be merged into the disk. - if err := snaptree.Cap(root, 0); err != nil { + if err := snaptree.Cap(root, 0, false); err != nil { return err } // Secondly, flushing the snapshot journal into the disk. All diff diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index eabad86f6515..51655ab82bdc 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -77,8 +77,10 @@ var ( bloomAccountHasherOffset = 0 bloomStorageHasherOffset = 0 - // Setting a minimum to prevent very low input from user - minTimeThreshold = 1 * time.Minute + // Count for number of commits before force disk update + // after the first 128 layers, the 129 layers would be committed + // to disk. + defaultCommitThreshold = 128 ) func init() { diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 168458c40519..d9286046e848 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -133,7 +133,7 @@ func TestDiskMerge(t *testing.T) { }); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } - if err := snaps.Cap(diffRoot, 0); err != nil { + if err := snaps.Cap(diffRoot, 0, false); err != nil { t.Fatalf("failed to flatten snapshot tree: %v", err) } // Retrieve all the data through the disk layer and validate it @@ -356,7 +356,7 @@ func TestDiskPartialMerge(t *testing.T) { }); err != nil { t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) } - if err := snaps.Cap(diffRoot, 0); err != nil { + if err := snaps.Cap(diffRoot, 0, false); err != nil { t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) } // Retrieve all the data through the disk layer and validate it @@ -467,7 +467,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { }, nil); err != nil { t.Fatalf("failed to update snapshot tree: %v", err) } - if err := snaps.Cap(diffRoot, 0); err != nil { + if err := snaps.Cap(diffRoot, 0, false); err != nil { t.Fatalf("failed to flatten snapshot tree: %v", err) } blob := rawdb.ReadSnapshotGenerator(db) @@ -489,7 +489,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { } diskLayer := snaps.layers[snaps.diskRoot()].(*diskLayer) diskLayer.genMarker = nil // Construction finished - if err := snaps.Cap(diffTwoRoot, 0); err != nil { + if err := snaps.Cap(diffTwoRoot, 0, false); err != nil { t.Fatalf("failed to flatten snapshot tree: %v", err) } blob = rawdb.ReadSnapshotGenerator(db) diff --git a/core/state/snapshot/iterator_test.go b/core/state/snapshot/iterator_test.go index 54614427a5cf..3f5fcce98209 100644 --- a/core/state/snapshot/iterator_test.go +++ b/core/state/snapshot/iterator_test.go @@ -248,7 +248,7 @@ func TestAccountIteratorTraversal(t *testing.T) { aggregatorMemoryLimit = limit }() aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x04"), 2) + snaps.Cap(common.HexToHash("0x04"), 2, false) verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) it, _ = snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) @@ -296,7 +296,7 @@ func TestStorageIteratorTraversal(t *testing.T) { aggregatorMemoryLimit = limit }() aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x04"), 2) + snaps.Cap(common.HexToHash("0x04"), 2, false) verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage) it, _ = snaps.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) @@ -384,7 +384,7 @@ func TestAccountIteratorTraversalValues(t *testing.T) { aggregatorMemoryLimit = limit }() aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x09"), 2) + snaps.Cap(common.HexToHash("0x09"), 2, false) it, _ = snaps.AccountIterator(common.HexToHash("0x09"), common.Hash{}) for it.Next() { @@ -483,7 +483,7 @@ func TestStorageIteratorTraversalValues(t *testing.T) { aggregatorMemoryLimit = limit }() aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x09"), 2) + snaps.Cap(common.HexToHash("0x09"), 2, false) it, _ = snaps.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{}) for it.Next() { @@ -541,7 +541,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) { aggregatorMemoryLimit = limit }() aggregatorMemoryLimit = 0 // Force pushing the bottom-most layer into disk - snaps.Cap(common.HexToHash("0x80"), 2) + snaps.Cap(common.HexToHash("0x80"), 2, false) verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount) @@ -580,7 +580,7 @@ func TestAccountIteratorFlattening(t *testing.T) { it, _ := snaps.AccountIterator(common.HexToHash("0x04"), common.Hash{}) defer it.Release() - if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { + if err := snaps.Cap(common.HexToHash("0x04"), 1, false); err != nil { t.Fatalf("failed to flatten snapshot stack: %v", err) } //verifyIterator(t, 7, it) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 1bade13077b9..27423e8a7e88 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "sync" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -151,12 +150,23 @@ type snapshot interface { // Config includes the configurations for snapshots. type Config struct { - CacheSize int // Megabytes permitted to use for read caches - Recovery bool // Indicator that the snapshots is in the recovery mode - NoBuild bool // Indicator that the snapshots generation is disallowed - AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously - EnableDiskRootInterval bool // The disk root is allowed to update after a time threshold - DiskRootThreshold time.Duration // The threshold to update disk root + CacheSize int // Megabytes permitted to use for read caches + Recovery bool // Indicator that the snapshots is in the recovery mode + NoBuild bool // Indicator that the snapshots generation is disallowed + AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously + AllowForceUpdate bool // Enable forcing snap root generation on a commit count + CommitThreshold int // Number of commit after which to attempt snap root update +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (c *Config) sanitize() Config { + conf := *c + + if conf.CommitThreshold == 0 { + conf.CommitThreshold = defaultCommitThreshold + } + return conf } // Tree is an Ethereum state snapshot tree. It consists of one persistent base @@ -169,12 +179,12 @@ type Config struct { // storage data to avoid expensive multi-level trie lookups; and to allow sorted, // cheap iteration of the account/storage tries for sync aid. type Tree struct { - config Config // Snapshots configurations - diskdb ethdb.KeyValueStore // Persistent database to store the snapshot - triedb *triedb.Database // In-memory cache to access the trie through - layers map[common.Hash]snapshot // Collection of all known layers - lock sync.RWMutex - baseTime time.Time // Reference to calculate the time threshold + config Config // Snapshots configurations + diskdb ethdb.KeyValueStore // Persistent database to store the snapshot + triedb *triedb.Database // In-memory cache to access the trie through + layers map[common.Hash]snapshot // Collection of all known layers + lock sync.RWMutex + commitCounter int // Counter for number of commits // Test hooks onFlatten func() // Hook invoked when the bottom most diff layers are flattened @@ -199,17 +209,13 @@ type Tree struct { func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash) (*Tree, error) { // Create a new, empty snapshot tree snap := &Tree{ - config: config, - diskdb: diskdb, - triedb: triedb, - layers: make(map[common.Hash]snapshot), - baseTime: time.Now(), + config: config, + diskdb: diskdb, + triedb: triedb, + layers: make(map[common.Hash]snapshot), } - // If user provided threshold smaller than minimum, set to minimum - if config.DiskRootThreshold < minTimeThreshold { - config.DiskRootThreshold = minTimeThreshold - } + config = config.sanitize() // Attempt to load a previously persisted snapshot and rebuild one if failed head, disabled, err := loadSnapshot(diskdb, triedb, root, config.CacheSize, config.Recovery, config.NoBuild) @@ -393,7 +399,7 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m // which may or may not overflow and cascade to disk. Since this last layer's // survival is only known *after* capping, we need to omit it from the count if // we want to ensure that *at least* the requested number of diff layers remain. -func (t *Tree) Cap(root common.Hash, layers int) error { +func (t *Tree) Cap(root common.Hash, layers int, force bool) error { // Retrieve the head snapshot to cap from snap := t.Snapshot(root) if snap == nil { @@ -427,7 +433,7 @@ func (t *Tree) Cap(root common.Hash, layers int) error { t.layers = map[common.Hash]snapshot{base.root: base} return nil } - persisted := t.cap(diff, layers) + persisted := t.cap(diff, layers, force) // Remove any layer that is stale or links into a stale layer children := make(map[common.Hash][]common.Hash) @@ -477,7 +483,7 @@ func (t *Tree) Cap(root common.Hash, layers int) error { // which may or may not overflow and cascade to disk. Since this last layer's // survival is only known *after* capping, we need to omit it from the count if // we want to ensure that *at least* the requested number of diff layers remain. -func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { +func (t *Tree) cap(diff *diffLayer, layers int, force bool) *diskLayer { // Dive until we run out of layers or reach the persistent database for i := 0; i < layers-1; i++ { // If we still have diff layers below, continue down @@ -511,7 +517,7 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { t.onFlatten() } diff.parent = flattened - if (flattened.memory < aggregatorMemoryLimit) && !t.isPastThreshold() { + if (flattened.memory < aggregatorMemoryLimit) && !force { // Accumulator layer is smaller than the limit, so we can abort, unless // there's a snapshot being generated currently. In that case, the trie // will move from underneath the generator so we **must** merge all the @@ -530,9 +536,6 @@ func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { base := diffToDisk(bottom) bottom.lock.RUnlock() - // Reset the time reference for next update - t.baseTime = time.Now() - t.layers[base.root] = base diff.parent = base return base @@ -900,8 +903,19 @@ func (t *Tree) Size() (diffs common.StorageSize, buf common.StorageSize) { return size, 0 } -// Check if time threshold is enabled and if the time elapsed is more than the threshold. -// if false, we can abort else we proceed to update the disk root -func (t *Tree) isPastThreshold() bool { - return (t.config.EnableDiskRootInterval && (time.Since(t.baseTime) > t.config.DiskRootThreshold)) +// Checks the config to compare if count of commits is above threshold +func (t *Tree) CompareThreshold() bool { + if !t.config.AllowForceUpdate { + return false + } + + log.Debug("Snapshot Commit counters", "counter", t.commitCounter, "threshold", t.config.CommitThreshold) + if t.commitCounter > t.config.CommitThreshold { + t.commitCounter = 0 + return true + } + + t.commitCounter++ + + return false } diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 69aeed24e3cd..f31785ebd01d 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -114,7 +114,7 @@ func TestDiskLayerExternalInvalidationFullFlatten(t *testing.T) { t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 2) } // Commit the diff layer onto the disk and ensure it's persisted - if err := snaps.Cap(common.HexToHash("0x02"), 0); err != nil { + if err := snaps.Cap(common.HexToHash("0x02"), 0, false); err != nil { t.Fatalf("failed to merge diff layer onto disk: %v", err) } // Since the base layer was modified, ensure that data retrievals on the external reference fail @@ -164,7 +164,7 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) aggregatorMemoryLimit = 0 - if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil { + if err := snaps.Cap(common.HexToHash("0x03"), 1, false); err != nil { t.Fatalf("failed to merge accumulator onto disk: %v", err) } // Since the base layer was modified, ensure that data retrievals on the external reference fail @@ -219,14 +219,14 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { // Doing a Cap operation with many allowed layers should be a no-op exp := len(snaps.layers) - if err := snaps.Cap(common.HexToHash("0x04"), 2000); err != nil { + if err := snaps.Cap(common.HexToHash("0x04"), 2000, false); err != nil { t.Fatalf("failed to flatten diff layer into accumulator: %v", err) } if got := len(snaps.layers); got != exp { t.Errorf("layers modified, got %d exp %d", got, exp) } // Flatten the diff layer into the bottom accumulator - if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { + if err := snaps.Cap(common.HexToHash("0x04"), 1, false); err != nil { t.Fatalf("failed to flatten diff layer into accumulator: %v", err) } // Since the accumulator diff layer was modified, ensure that data retrievals on the external reference fail @@ -297,11 +297,11 @@ func TestPostCapBasicDataAccess(t *testing.T) { t.Error(err) } // Cap to a bad root should fail - if err := snaps.Cap(common.HexToHash("0x1337"), 0); err == nil { + if err := snaps.Cap(common.HexToHash("0x1337"), 0, false); err == nil { t.Errorf("expected error, got none") } // Now, merge the a-chain - snaps.Cap(common.HexToHash("0xa3"), 0) + snaps.Cap(common.HexToHash("0xa3"), 0, false) // At this point, a2 got merged into a1. Thus, a1 is now modified, and as a1 is // the parent of b2, b2 should no longer be able to iterate into parent. @@ -325,7 +325,7 @@ func TestPostCapBasicDataAccess(t *testing.T) { } // Now, merge it again, just for fun. It should now error, since a3 // is a disk layer - if err := snaps.Cap(common.HexToHash("0xa3"), 0); err == nil { + if err := snaps.Cap(common.HexToHash("0xa3"), 0, false); err == nil { t.Error("expected error capping the disk layer, got none") } } @@ -357,10 +357,8 @@ func TestForceSnapRootCaps(t *testing.T) { layers: map[common.Hash]snapshot{ base.root: base, }, - baseTime: time.Now(), config: Config{ - EnableDiskRootInterval: true, - DiskRootThreshold: 30 * time.Minute, + CommitThreshold: 10, }, } @@ -377,7 +375,7 @@ func TestForceSnapRootCaps(t *testing.T) { } // Now capping without time threshold reached - if err := snaps.Cap(head, 128); err != nil { + if err := snaps.Cap(head, 128, false); err != nil { t.Error("Error while capping layers", err) } @@ -393,12 +391,8 @@ func TestForceSnapRootCaps(t *testing.T) { t.Errorf("Unexpected number of layers after flatten - count: %d, expected: 130", newLayers) } - // Setting baseTime 100 min behind to trigger disk update - snaps.baseTime = time.Now().Add(time.Duration(-100) * time.Minute) - - // Check if forceSnapshot disabled, time threshold should not trigger snapshot - snaps.config.EnableDiskRootInterval = false - if err := snaps.Cap(head, 128); err != nil { + // Check if forceSnapshot disabled, disk root should not update + if err := snaps.Cap(head, 128, false); err != nil { t.Error("Error while capping layers", err) } @@ -408,9 +402,8 @@ func TestForceSnapRootCaps(t *testing.T) { t.Errorf("Disk root should not have updated at this point - actual: %s, expected: %s", firstDiskRoot, base.root) } - // Re-enable forceSnapshot, time threshold should trigger snapshot - snaps.config.EnableDiskRootInterval = true - if err := snaps.Cap(head, 128); err != nil { + // Re-enable forceSnapshot, disk root should update now + if err := snaps.Cap(head, 128, true); err != nil { t.Error("Error while capping layers", err) } @@ -425,6 +418,23 @@ func TestForceSnapRootCaps(t *testing.T) { if newLayers := len(snaps.layers); newLayers != 129 { t.Errorf("Unexpected number of layers after flatten - count: %d, expected: 130", newLayers) } + + // validate compareThreshold method when enabled + snaps.commitCounter = 5 + if compare := snaps.CompareThreshold(); compare { + t.Errorf("Incorrect CompareThreshold return - actual: %t, expected: false", compare) + } + + snaps.commitCounter = 200 + if compare := snaps.CompareThreshold(); !compare { + t.Errorf("Incorrect CompareThreshold return - actual: %t, expected: true", compare) + } + + // validate compareThreshold method when disabled + snaps.config.AllowForceUpdate = false + if compare := snaps.CompareThreshold(); compare { + t.Errorf("Incorrect CompareThreshold return - actual: %t, expected: false", compare) + } } // TestSnaphots tests the functionality for retrieving the snapshot @@ -462,7 +472,7 @@ func TestSnaphots(t *testing.T) { head = makeRoot(uint64(i + 2)) snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil) last = head - snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk) + snaps.Cap(head, 128, false) // 130 layers (128 diffs + 1 accumulator + 1 disk) } var cases = []struct { headRoot common.Hash @@ -497,7 +507,7 @@ func TestSnaphots(t *testing.T) { defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) aggregatorMemoryLimit = 0 - snaps.Cap(head, 128) // 129 (128 diffs + 1 overflown accumulator + 1 disk) + snaps.Cap(head, 128, false) // 129 (128 diffs + 1 overflown accumulator + 1 disk) cases = []struct { headRoot common.Hash @@ -576,7 +586,7 @@ func TestReadStateDuringFlattening(t *testing.T) { } } // Cap the snap tree, which will mark the bottom-most layer as stale. - snaps.Cap(common.HexToHash("0xa3"), 1) + snaps.Cap(common.HexToHash("0xa3"), 1, false) select { case account := <-result: if account == nil { diff --git a/core/state/statedb.go b/core/state/statedb.go index ac37d4ceeb9a..d25086d0e58f 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1276,7 +1276,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er // - head layer is paired with HEAD state // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - if err := s.snaps.Cap(root, TriesInMemory); err != nil { + if err := s.snaps.Cap(root, TriesInMemory, s.snaps.CompareThreshold()); err != nil { log.Warn("Failed to cap snapshot tree", "root", root, "layers", TriesInMemory, "err", err) } } diff --git a/eth/backend.go b/eth/backend.go index 7297cf321fbd..84a5381f2873 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -190,17 +190,17 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { EnablePreimageRecording: config.EnablePreimageRecording, } cacheConfig = &core.CacheConfig{ - TrieCleanLimit: config.TrieCleanCache, - TrieCleanNoPrefetch: config.NoPrefetch, - TrieDirtyLimit: config.TrieDirtyCache, - TrieDirtyDisabled: config.NoPruning, - TrieTimeLimit: config.TrieTimeout, - SnapshotLimit: config.SnapshotCache, - Preimages: config.Preimages, - StateHistory: config.StateHistory, - StateScheme: scheme, - EnableDiskRootInterval: config.EnableDiskRootInterval, - DiskRootThreshold: config.DiskRootThreshold, + TrieCleanLimit: config.TrieCleanCache, + TrieCleanNoPrefetch: config.NoPrefetch, + TrieDirtyLimit: config.TrieDirtyCache, + TrieDirtyDisabled: config.NoPruning, + TrieTimeLimit: config.TrieTimeout, + SnapshotLimit: config.SnapshotCache, + Preimages: config.Preimages, + StateHistory: config.StateHistory, + StateScheme: scheme, + AllowForceUpdate: config.AllowForceUpdate, + CommitThreshold: config.CommitThreshold, } ) if config.VMTrace != "" { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index c56b532986a1..ca32ec90c296 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -48,27 +48,27 @@ var FullNodeGPO = gasprice.Config{ // Defaults contains default settings for use on the Ethereum main net. var Defaults = Config{ - SyncMode: downloader.SnapSync, - NetworkId: 0, // enable auto configuration of networkID == chainID - TxLookupLimit: 2350000, - TransactionHistory: 2350000, - StateHistory: params.FullImmutabilityThreshold, - LightPeers: 100, - DatabaseCache: 512, - TrieCleanCache: 154, - TrieDirtyCache: 256, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 102, - EnableDiskRootInterval: false, - DiskRootThreshold: 60 * time.Minute, - FilterLogCacheSize: 32, - Miner: miner.DefaultConfig, - TxPool: legacypool.DefaultConfig, - BlobPool: blobpool.DefaultConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether + SyncMode: downloader.SnapSync, + NetworkId: 0, // enable auto configuration of networkID == chainID + TxLookupLimit: 2350000, + TransactionHistory: 2350000, + StateHistory: params.FullImmutabilityThreshold, + LightPeers: 100, + DatabaseCache: 512, + TrieCleanCache: 154, + TrieDirtyCache: 256, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 102, + AllowForceUpdate: false, + CommitThreshold: 128, + FilterLogCacheSize: 32, + Miner: miner.DefaultConfig, + TxPool: legacypool.DefaultConfig, + BlobPool: blobpool.DefaultConfig, + RPCGasCap: 50000000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -121,13 +121,13 @@ type Config struct { DatabaseCache int DatabaseFreezer string - TrieCleanCache int - TrieDirtyCache int - TrieTimeout time.Duration - SnapshotCache int - Preimages bool - EnableDiskRootInterval bool - DiskRootThreshold time.Duration + TrieCleanCache int + TrieDirtyCache int + TrieTimeout time.Duration + SnapshotCache int + Preimages bool + AllowForceUpdate bool + CommitThreshold int // This is the number of blocks for which logs will be cached in the filter system. FilterLogCacheSize int diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index bb131b65507e..8e7a355d273a 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -44,8 +44,8 @@ func (c Config) MarshalTOML() (interface{}, error) { TrieTimeout time.Duration SnapshotCache int Preimages bool - EnableDiskRootInterval bool - DiskRootThreshold time.Duration + AllowForceUpdate bool + CommitThreshold int FilterLogCacheSize int Miner miner.Config TxPool legacypool.Config @@ -89,8 +89,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.TrieTimeout = c.TrieTimeout enc.SnapshotCache = c.SnapshotCache enc.Preimages = c.Preimages - enc.EnableDiskRootInterval = c.EnableDiskRootInterval - enc.DiskRootThreshold = c.DiskRootThreshold + enc.AllowForceUpdate = c.AllowForceUpdate + enc.CommitThreshold = c.CommitThreshold enc.FilterLogCacheSize = c.FilterLogCacheSize enc.Miner = c.Miner enc.TxPool = c.TxPool @@ -138,8 +138,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { TrieTimeout *time.Duration SnapshotCache *int Preimages *bool - EnableDiskRootInterval *bool - DiskRootThreshold *time.Duration + AllowForceUpdate *bool + CommitThreshold *int FilterLogCacheSize *int Miner *miner.Config TxPool *legacypool.Config @@ -240,11 +240,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.Preimages != nil { c.Preimages = *dec.Preimages } - if dec.EnableDiskRootInterval != nil { - c.EnableDiskRootInterval = *dec.EnableDiskRootInterval + if dec.AllowForceUpdate != nil { + c.AllowForceUpdate = *dec.AllowForceUpdate } - if dec.DiskRootThreshold != nil { - c.DiskRootThreshold = *dec.DiskRootThreshold + if dec.CommitThreshold != nil { + c.CommitThreshold = *dec.CommitThreshold } if dec.FilterLogCacheSize != nil { c.FilterLogCacheSize = *dec.FilterLogCacheSize diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 05a28aa1efa6..f3269bb571e6 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -50,6 +50,12 @@ const ( // Do not increase the buffer size arbitrarily, otherwise the system // pause time will increase when the database writes happen. DefaultBufferSize = 64 * 1024 * 1024 + + // Count for number of commits before force disk update + // after the first 128 layers, the 129th layer would be + // flattened and committed to disk forming the default + // 128 diff layers + 1 disk layer + defaultCommitThreshold = 128 ) var ( @@ -90,10 +96,12 @@ type layer interface { // Config contains the settings for database. type Config struct { - StateHistory uint64 // Number of recent blocks to maintain state history for - CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes - DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes - ReadOnly bool // Flag whether the database is opened in read only mode. + StateHistory uint64 // Number of recent blocks to maintain state history for + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes + DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes + ReadOnly bool // Flag whether the database is opened in read only mode. + AllowForceUpdate bool // Enable forcing snap root generation on a commit count + CommitThreshold int // Number of commit after which to attempt root update } // sanitize checks the provided user configurations and changes anything that's @@ -104,14 +112,20 @@ func (c *Config) sanitize() *Config { log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.DirtyCacheSize), "updated", common.StorageSize(maxBufferSize)) conf.DirtyCacheSize = maxBufferSize } + + if conf.CommitThreshold == 0 { + log.Warn("Sanitizing invalid commit threshold to default", "defaultThreshold", defaultCommitThreshold) + conf.CommitThreshold = defaultCommitThreshold + } return &conf } // Defaults contains default settings for Ethereum mainnet. var Defaults = &Config{ - StateHistory: params.FullImmutabilityThreshold, - CleanCacheSize: defaultCleanSize, - DirtyCacheSize: DefaultBufferSize, + StateHistory: params.FullImmutabilityThreshold, + CleanCacheSize: defaultCleanSize, + DirtyCacheSize: DefaultBufferSize, + AllowForceUpdate: false, } // ReadOnly is the config in order to open database in read only mode. @@ -132,15 +146,16 @@ type Database struct { // readOnly is the flag whether the mutation is allowed to be applied. // It will be set automatically when the database is journaled during // the shutdown to reject all following unexpected mutations. - readOnly bool // Flag if database is opened in read only mode - waitSync bool // Flag if database is deactivated due to initial state sync - isVerkle bool // Flag if database is used for verkle tree - bufferSize int // Memory allowance (in bytes) for caching dirty nodes - config *Config // Configuration for database - diskdb ethdb.Database // Persistent storage for matured trie nodes - tree *layerTree // The group for all known layers - freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests - lock sync.RWMutex // Lock to prevent mutations from happening at the same time + readOnly bool // Flag if database is opened in read only mode + waitSync bool // Flag if database is deactivated due to initial state sync + isVerkle bool // Flag if database is used for verkle tree + bufferSize int // Memory allowance (in bytes) for caching dirty nodes + config *Config // Configuration for database + diskdb ethdb.Database // Persistent storage for matured trie nodes + tree *layerTree // The group for all known layers + freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests + lock sync.RWMutex // Lock to prevent mutations from happening at the same time + commitCounter int // Counter for number of commits } // New attempts to load an already existing layer from a persistent key-value @@ -250,7 +265,7 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6 // - head-1 layer is paired with HEAD-1 state // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state // - head-128 layer(disk layer) is paired with HEAD-128 state - return db.tree.cap(root, maxDiffLayers) + return db.tree.cap(root, maxDiffLayers, db.compareThreshold()) } // Commit traverses downwards the layer tree from a specified layer with the @@ -265,7 +280,7 @@ func (db *Database) Commit(root common.Hash, report bool) error { if err := db.modifyAllowed(); err != nil { return err } - return db.tree.cap(root, 0) + return db.tree.cap(root, 0, false) } // Disable deactivates the database and invalidates all available state layers @@ -524,3 +539,20 @@ func (db *Database) StorageHistory(address common.Address, slot common.Hash, sta func (db *Database) HistoryRange() (uint64, uint64, error) { return historyRange(db.freezer) } + +// Checks the config to compare if count of commits is above threshold +func (db *Database) compareThreshold() bool { + if !db.config.AllowForceUpdate { + return false + } + + log.Debug("PathDB Commit counters", "counter", db.commitCounter, "threshold", db.config.CommitThreshold) + if db.commitCounter > db.config.CommitThreshold { + db.commitCounter = 0 + return true + } + + db.commitCounter++ + + return false +} diff --git a/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go index d314779910e9..3f9178767d05 100644 --- a/triedb/pathdb/layertree.go +++ b/triedb/pathdb/layertree.go @@ -111,7 +111,7 @@ func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint6 // cap traverses downwards the diff tree until the number of allowed diff layers // are crossed. All diffs beyond the permitted number are flattened downwards. -func (tree *layerTree) cap(root common.Hash, layers int) error { +func (tree *layerTree) cap(root common.Hash, layers int, force bool) error { // Retrieve the head layer to cap from root = types.TrieRootHash(root) l := tree.get(root) @@ -156,7 +156,7 @@ func (tree *layerTree) cap(root common.Hash, layers int) error { // parent is linked correctly. diff.lock.Lock() - base, err := parent.persist(false) + base, err := parent.persist(force) if err != nil { diff.lock.Unlock() return err